From 5b2692f91a8d776185c75beda3ac58ffb8560378 Mon Sep 17 00:00:00 2001 From: Prudhvi Godithi Date: Wed, 12 Feb 2025 14:20:38 -0800 Subject: [PATCH 001/550] Use dafault BM25Similarity (#17306) Signed-off-by: Prudhvi Godithi --- CHANGELOG-3.0.md | 1 + .../rest-api-spec/test/30_inner_hits.yml | 10 ++-- .../rest-api-spec/test/50_legacy_bm25.yml | 50 +++++++++++++++++++ .../basic/TransportTwoNodesSearchIT.java | 25 +++++----- .../search/nested/SimpleNestedIT.java | 2 +- .../index/similarity/SimilarityProviders.java | 12 ++++- .../index/similarity/SimilarityService.java | 5 +- .../similarity/SimilarityServiceTests.java | 8 +++ .../index/similarity/SimilarityTests.java | 34 ++++++++++--- 9 files changed, 119 insertions(+), 28 deletions(-) create mode 100644 modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/50_legacy_bm25.yml diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index 652b5f05f52d6..abb4dd8d4baa2 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -36,6 +36,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Refactor `:server` module `org.apacge.lucene` package to eliminate top level split packages for JPMS support ([#17241](https://github.com/opensearch-project/OpenSearch/pull/17241)) - Stop minimizing automata used for case-insensitive matches ([#17268](https://github.com/opensearch-project/OpenSearch/pull/17268)) - Refactor the `:server` module `org.opensearch.client` to `org.opensearch.transport.client` to eliminate top level split packages for JPMS support ([#17272](https://github.com/opensearch-project/OpenSearch/pull/17272)) +- Use Lucene `BM25Similarity` as default since the `LegacyBM25Similarity` is marked as deprecated ([#17306](https://github.com/opensearch-project/OpenSearch/pull/17306)) ### Deprecated diff --git a/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/30_inner_hits.yml b/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/30_inner_hits.yml index 5ba4077beac46..4e3d079d648ef 100644 --- a/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/30_inner_hits.yml +++ b/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/30_inner_hits.yml @@ -79,11 +79,11 @@ teardown: - match: { hits.total.value: 2 } - - match: { hits.hits.0._id: "3" } - - match: { hits.hits.0.inner_hits.question.hits.total.value: 0} - - match: { hits.hits.1._id: "2" } - - match: { hits.hits.1.inner_hits.question.hits.total.value: 1} - - match: { hits.hits.1.inner_hits.question.hits.hits.0._id: "1"} + - match: { hits.hits.0._id: "2" } + - match: { hits.hits.0.inner_hits.question.hits.total.value: 1 } + - match: { hits.hits.0.inner_hits.question.hits.hits.0._id: "1" } + - match: { hits.hits.1._id: "3" } + - match: { hits.hits.1.inner_hits.question.hits.total.value: 0 } --- "HasParent disallow expensive queries": diff --git a/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/50_legacy_bm25.yml b/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/50_legacy_bm25.yml new file mode 100644 index 0000000000000..e5b242685064b --- /dev/null +++ b/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/50_legacy_bm25.yml @@ -0,0 +1,50 @@ +--- +setup: + - do: + indices.create: + index: legacy_bm25_test + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + similarity: + default: + type: LegacyBM25 + k1: 1.2 + b: 0.75 + mappings: + properties: + content: + type: text + - do: + index: + index: legacy_bm25_test + id: "1" + body: { "content": "This is a test document for legacy BM25 scoring" } + - do: + index: + index: legacy_bm25_test + id: "2" + body: { "content": "legacy legacy legacy scoring" } + - do: + indices.refresh: + index: legacy_bm25_test + +--- +"Legacy BM25 search": + - do: + search: + index: legacy_bm25_test + body: + query: + match: + content: "legacy" + - match: { hits.total.value: 2 } + - match: { hits.hits.0._id: "2" } + - match: { hits.hits.1._id: "1" } + +--- +teardown: + - do: + indices.delete: + index: legacy_bm25_test diff --git a/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportTwoNodesSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportTwoNodesSearchIT.java index b9f16a60d68df..cc88d399932c8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportTwoNodesSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportTwoNodesSearchIT.java @@ -73,6 +73,7 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; import static org.opensearch.transport.client.Requests.createIndexRequest; import static org.opensearch.transport.client.Requests.searchRequest; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; @@ -179,12 +180,12 @@ public void testDfsQueryThenFetch() throws Exception { SearchHit hit = hits[i]; assertThat(hit.getExplanation(), notNullValue()); assertThat(hit.getExplanation().getDetails().length, equalTo(1)); - assertThat(hit.getExplanation().getDetails()[0].getDetails().length, equalTo(3)); - assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDetails().length, equalTo(2)); - assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDetails()[0].getDescription(), startsWith("n,")); - assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDetails()[0].getValue(), equalTo(100L)); - assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDetails()[1].getDescription(), startsWith("N,")); - assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDetails()[1].getValue(), equalTo(100L)); + assertThat(hit.getExplanation().getDetails()[0].getDetails().length, equalTo(2)); + assertThat(hit.getExplanation().getDetails()[0].getDetails()[0].getDescription(), startsWith("idf")); + assertThat(hit.getExplanation().getDetails()[0].getDetails()[0].getDetails().length, equalTo(2)); + assertThat(hit.getExplanation().getDetails()[0].getDetails()[0].getDetails()[0].getValue(), equalTo(100L)); + assertThat(hit.getExplanation().getDetails()[0].getDetails()[0].getDetails()[1].getValue(), equalTo(100L)); + assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDescription(), containsString("freq")); assertThat( "id[" + hit.getId() + "] -> " + hit.getExplanation().toString(), hit.getId(), @@ -221,12 +222,12 @@ public void testDfsQueryThenFetchWithSort() throws Exception { SearchHit hit = hits[i]; assertThat(hit.getExplanation(), notNullValue()); assertThat(hit.getExplanation().getDetails().length, equalTo(1)); - assertThat(hit.getExplanation().getDetails()[0].getDetails().length, equalTo(3)); - assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDetails().length, equalTo(2)); - assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDetails()[0].getDescription(), startsWith("n,")); - assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDetails()[0].getValue(), equalTo(100L)); - assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDetails()[1].getDescription(), startsWith("N,")); - assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDetails()[1].getValue(), equalTo(100L)); + assertThat(hit.getExplanation().getDetails()[0].getDetails().length, equalTo(2)); + assertThat(hit.getExplanation().getDetails()[0].getDetails()[0].getDescription(), startsWith("idf")); + assertThat(hit.getExplanation().getDetails()[0].getDetails()[0].getDetails().length, equalTo(2)); + assertThat(hit.getExplanation().getDetails()[0].getDetails()[0].getDetails()[0].getValue(), equalTo(100L)); + assertThat(hit.getExplanation().getDetails()[0].getDetails()[0].getDetails()[1].getValue(), equalTo(100L)); + assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDescription(), containsString("freq")); assertThat("id[" + hit.getId() + "]", hit.getId(), equalTo(Integer.toString(total + i))); } total += hits.length; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java b/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java index 9aad23cdb9544..6746a72329232 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java @@ -496,7 +496,7 @@ public void testExplainWithSingleDoc() throws Exception { assertThat(searchResponse.getHits().getTotalHits().value(), equalTo(1L)); Explanation explanation = searchResponse.getHits().getHits()[0].getExplanation(); assertThat(explanation.getValue(), equalTo(searchResponse.getHits().getHits()[0].getScore())); - assertThat(explanation.toString(), startsWith("0.36464313 = Score based on 2 child docs in range from 0 to 1")); + assertThat(explanation.toString(), startsWith("0.16574687 = Score based on 2 child docs in range from 0 to 1")); } public void testSimpleNestedSorting() throws Exception { diff --git a/server/src/main/java/org/opensearch/index/similarity/SimilarityProviders.java b/server/src/main/java/org/opensearch/index/similarity/SimilarityProviders.java index 4fbd717b64496..3465632eee6da 100644 --- a/server/src/main/java/org/opensearch/index/similarity/SimilarityProviders.java +++ b/server/src/main/java/org/opensearch/index/similarity/SimilarityProviders.java @@ -35,6 +35,7 @@ import org.apache.lucene.search.similarities.AfterEffect; import org.apache.lucene.search.similarities.AfterEffectB; import org.apache.lucene.search.similarities.AfterEffectL; +import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.search.similarities.BasicModel; import org.apache.lucene.search.similarities.BasicModelG; import org.apache.lucene.search.similarities.BasicModelIF; @@ -62,6 +63,7 @@ import org.apache.lucene.search.similarities.NormalizationH2; import org.apache.lucene.search.similarities.NormalizationH3; import org.apache.lucene.search.similarities.NormalizationZ; +import org.apache.lucene.search.similarities.Similarity; import org.opensearch.Version; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; @@ -271,13 +273,21 @@ static void assertSettingsIsSubsetOf(String type, Version version, Settings sett } } - public static LegacyBM25Similarity createBM25Similarity(Settings settings, Version indexCreatedVersion) { + public static Similarity createBM25Similarity(Settings settings, Version indexCreatedVersion) { assertSettingsIsSubsetOf("BM25", indexCreatedVersion, settings, "k1", "b", DISCOUNT_OVERLAPS); float k1 = settings.getAsFloat("k1", 1.2f); float b = settings.getAsFloat("b", 0.75f); boolean discountOverlaps = settings.getAsBoolean(DISCOUNT_OVERLAPS, true); + return new BM25Similarity(k1, b, discountOverlaps); + } + + public static Similarity createLegacyBM25Similarity(Settings settings, Version indexCreatedVersion) { + assertSettingsIsSubsetOf("LegacyBM25", indexCreatedVersion, settings, "k1", "b", DISCOUNT_OVERLAPS); + float k1 = settings.getAsFloat("k1", 1.2f); + float b = settings.getAsFloat("b", 0.75f); + boolean discountOverlaps = settings.getAsBoolean(DISCOUNT_OVERLAPS, true); return new LegacyBM25Similarity(k1, b, discountOverlaps); } diff --git a/server/src/main/java/org/opensearch/index/similarity/SimilarityService.java b/server/src/main/java/org/opensearch/index/similarity/SimilarityService.java index 203068e08c3ce..ba2cf81c9a624 100644 --- a/server/src/main/java/org/opensearch/index/similarity/SimilarityService.java +++ b/server/src/main/java/org/opensearch/index/similarity/SimilarityService.java @@ -37,6 +37,7 @@ import org.apache.lucene.search.CollectionStatistics; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.TermStatistics; +import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.search.similarities.BooleanSimilarity; import org.apache.lucene.search.similarities.PerFieldSimilarityWrapper; import org.apache.lucene.search.similarities.Similarity; @@ -52,7 +53,6 @@ import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; -import org.opensearch.lucene.similarity.LegacyBM25Similarity; import org.opensearch.script.ScriptService; import java.util.Collections; @@ -84,7 +84,7 @@ public final class SimilarityService extends AbstractIndexComponent { }; }); defaults.put("BM25", version -> { - final LegacyBM25Similarity similarity = SimilarityProviders.createBM25Similarity(Settings.EMPTY, version); + final Similarity similarity = new BM25Similarity(); return () -> similarity; }); defaults.put("boolean", version -> { @@ -100,6 +100,7 @@ public final class SimilarityService extends AbstractIndexComponent { ); }); builtIn.put("BM25", (settings, version, scriptService) -> SimilarityProviders.createBM25Similarity(settings, version)); + builtIn.put("LegacyBM25", (settings, version, scriptService) -> SimilarityProviders.createLegacyBM25Similarity(settings, version)); builtIn.put("boolean", (settings, version, scriptService) -> SimilarityProviders.createBooleanSimilarity(settings, version)); builtIn.put("DFR", (settings, version, scriptService) -> SimilarityProviders.createDfrSimilarity(settings, version)); builtIn.put("IB", (settings, version, scriptService) -> SimilarityProviders.createIBSimilarity(settings, version)); diff --git a/server/src/test/java/org/opensearch/index/similarity/SimilarityServiceTests.java b/server/src/test/java/org/opensearch/index/similarity/SimilarityServiceTests.java index dbe4b4c7a2c30..7eb1b3b676cf3 100644 --- a/server/src/test/java/org/opensearch/index/similarity/SimilarityServiceTests.java +++ b/server/src/test/java/org/opensearch/index/similarity/SimilarityServiceTests.java @@ -34,6 +34,7 @@ import org.apache.lucene.index.FieldInvertState; import org.apache.lucene.search.CollectionStatistics; import org.apache.lucene.search.TermStatistics; +import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.search.similarities.BooleanSimilarity; import org.apache.lucene.search.similarities.Similarity; import org.opensearch.Version; @@ -53,6 +54,13 @@ public void testDefaultSimilarity() { Settings settings = Settings.builder().build(); IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", settings); SimilarityService service = new SimilarityService(indexSettings, null, Collections.emptyMap()); + assertThat(service.getDefaultSimilarity(), instanceOf(BM25Similarity.class)); + } + + public void testLegacySimilarity() { + Settings settings = Settings.builder().put("index.similarity.default.type", "LegacyBM25").build(); + IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", settings); + SimilarityService service = new SimilarityService(indexSettings, null, Collections.emptyMap()); assertThat(service.getDefaultSimilarity(), instanceOf(LegacyBM25Similarity.class)); } diff --git a/server/src/test/java/org/opensearch/index/similarity/SimilarityTests.java b/server/src/test/java/org/opensearch/index/similarity/SimilarityTests.java index 247b31bc0e579..1fafa4739b8b4 100644 --- a/server/src/test/java/org/opensearch/index/similarity/SimilarityTests.java +++ b/server/src/test/java/org/opensearch/index/similarity/SimilarityTests.java @@ -33,6 +33,7 @@ package org.opensearch.index.similarity; import org.apache.lucene.search.similarities.AfterEffectL; +import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.search.similarities.BasicModelG; import org.apache.lucene.search.similarities.BooleanSimilarity; import org.apache.lucene.search.similarities.DFISimilarity; @@ -72,7 +73,7 @@ protected Collection> getPlugins() { public void testResolveDefaultSimilarities() { SimilarityService similarityService = createIndex("foo").similarityService(); - assertThat(similarityService.getSimilarity("BM25").get(), instanceOf(LegacyBM25Similarity.class)); + assertThat(similarityService.getSimilarity("BM25").get(), instanceOf(BM25Similarity.class)); assertThat(similarityService.getSimilarity("boolean").get(), instanceOf(BooleanSimilarity.class)); assertThat(similarityService.getSimilarity("default"), equalTo(null)); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> similarityService.getSimilarity("classic")); @@ -83,7 +84,29 @@ public void testResolveDefaultSimilarities() { ); } - public void testResolveSimilaritiesFromMapping_classicIsForbidden() throws IOException { + public void testResolveLegacySimilarity() throws IOException { + Settings settings = Settings.builder() + .put("index.similarity.my_similarity.type", "LegacyBM25") + .put("index.similarity.my_similarity.k1", 1.2f) + .put("index.similarity.my_similarity.b", 0.75f) + .put("index.similarity.my_similarity.discount_overlaps", false) + .build(); + + XContentBuilder mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("dummy") + .field("type", "text") + .field("similarity", "my_similarity") + .endObject() + .endObject() + .endObject(); + + MapperService mapperService = createIndex("foo", settings, "type", mapping).mapperService(); + assertThat(mapperService.fieldType("dummy").getTextSearchInfo().getSimilarity().get(), instanceOf(LegacyBM25Similarity.class)); + } + + public void testResolveSimilaritiesFromMapping_classicIsForbidden() { Settings indexSettings = Settings.builder() .put("index.similarity.my_similarity.type", "classic") .put("index.similarity.my_similarity.discount_overlaps", false) @@ -114,12 +137,9 @@ public void testResolveSimilaritiesFromMapping_bm25() throws IOException { .put("index.similarity.my_similarity.discount_overlaps", false) .build(); MapperService mapperService = createIndex("foo", indexSettings, "type", mapping).mapperService(); - assertThat(mapperService.fieldType("field1").getTextSearchInfo().getSimilarity().get(), instanceOf(LegacyBM25Similarity.class)); + assertThat(mapperService.fieldType("field1").getTextSearchInfo().getSimilarity().get(), instanceOf(BM25Similarity.class)); - LegacyBM25Similarity similarity = (LegacyBM25Similarity) mapperService.fieldType("field1") - .getTextSearchInfo() - .getSimilarity() - .get(); + BM25Similarity similarity = (BM25Similarity) mapperService.fieldType("field1").getTextSearchInfo().getSimilarity().get(); assertThat(similarity.getK1(), equalTo(2.0f)); assertThat(similarity.getB(), equalTo(0.5f)); assertThat(similarity.getDiscountOverlaps(), equalTo(false)); From 5feb514f27e063eb6bf68c47dd939802bd0ca2d4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 12 Feb 2025 20:39:31 -0500 Subject: [PATCH 002/550] Bump reactor_netty from 1.1.26 to 1.1.27 (#17322) Signed-off-by: Andriy Redko Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- CHANGELOG.md | 1 + gradle/libs.versions.toml | 2 +- .../licenses/reactor-netty-core-1.1.26.jar.sha1 | 1 - .../licenses/reactor-netty-core-1.1.27.jar.sha1 | 1 + .../licenses/reactor-netty-http-1.1.26.jar.sha1 | 1 - .../licenses/reactor-netty-http-1.1.27.jar.sha1 | 1 + .../licenses/reactor-netty-core-1.1.26.jar.sha1 | 1 - .../licenses/reactor-netty-core-1.1.27.jar.sha1 | 1 + .../licenses/reactor-netty-http-1.1.26.jar.sha1 | 1 - .../licenses/reactor-netty-http-1.1.27.jar.sha1 | 1 + 10 files changed, 6 insertions(+), 5 deletions(-) delete mode 100644 plugins/repository-azure/licenses/reactor-netty-core-1.1.26.jar.sha1 create mode 100644 plugins/repository-azure/licenses/reactor-netty-core-1.1.27.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/reactor-netty-http-1.1.26.jar.sha1 create mode 100644 plugins/repository-azure/licenses/reactor-netty-http-1.1.27.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.26.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.27.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.26.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.27.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index b6d28d37fde1f..739b3b09c1926 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.jruby.joni:joni` from 2.2.1 to 2.2.3 ([#17136](https://github.com/opensearch-project/OpenSearch/pull/17136)) - Bump `org.apache.ant:ant` from 1.10.14 to 1.10.15 ([#17288](https://github.com/opensearch-project/OpenSearch/pull/17288)) - Bump netty from 4.1.117.Final to 4.1.118.Final ([#17320](https://github.com/opensearch-project/OpenSearch/pull/17320)) +- Bump `reactor_netty` from 1.1.26 to 1.1.27 ([#17322](https://github.com/opensearch-project/OpenSearch/pull/17322)) ### Changed - Convert transport-reactor-netty4 to use gradle version catalog [#17233](https://github.com/opensearch-project/OpenSearch/pull/17233)) diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index 32b8cb431afeb..8cd210bbcb65a 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -37,7 +37,7 @@ joda = "2.12.7" roaringbitmap = "1.3.0" # project reactor -reactor_netty = "1.1.26" +reactor_netty = "1.1.27" reactor = "3.5.20" # client dependencies diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.26.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.26.jar.sha1 deleted file mode 100644 index e64cc3645514f..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-core-1.1.26.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -05a8c6004161a4c1a9c0639b05387baab6efaa32 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.27.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.27.jar.sha1 new file mode 100644 index 0000000000000..3eac15e74ad19 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-core-1.1.27.jar.sha1 @@ -0,0 +1 @@ +094428d25b65a0bdb89d639934d39b7ba7f169ee \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.26.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.26.jar.sha1 deleted file mode 100644 index 035d2fb1c4c4c..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-http-1.1.26.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -41682e517e2808fc469d6b2b85fea48d0a7fe73b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.27.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.27.jar.sha1 new file mode 100644 index 0000000000000..7236917a621c7 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-http-1.1.27.jar.sha1 @@ -0,0 +1 @@ +56dab8976b8d79e37597e736d62bb4111cc28e9c \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.26.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.26.jar.sha1 deleted file mode 100644 index e64cc3645514f..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.26.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -05a8c6004161a4c1a9c0639b05387baab6efaa32 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.27.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.27.jar.sha1 new file mode 100644 index 0000000000000..3eac15e74ad19 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.27.jar.sha1 @@ -0,0 +1 @@ +094428d25b65a0bdb89d639934d39b7ba7f169ee \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.26.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.26.jar.sha1 deleted file mode 100644 index 035d2fb1c4c4c..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.26.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -41682e517e2808fc469d6b2b85fea48d0a7fe73b \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.27.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.27.jar.sha1 new file mode 100644 index 0000000000000..7236917a621c7 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.27.jar.sha1 @@ -0,0 +1 @@ +56dab8976b8d79e37597e736d62bb4111cc28e9c \ No newline at end of file From e34422bd838e92a55970399e2f273a8d1a25f496 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Thu, 13 Feb 2025 07:07:44 -0500 Subject: [PATCH 003/550] Add bwc version 2.19.1 (#17333) Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Signed-off-by: Andriy Redko Co-authored-by: opensearch-ci-bot <83309141+opensearch-ci-bot@users.noreply.github.com> --- .ci/bwcVersions | 3 ++- README.md | 2 +- libs/core/src/main/java/org/opensearch/Version.java | 1 + 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 73b14fa56190e..8ddc5e5811d7a 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -43,4 +43,5 @@ BWC_VERSION: - "2.18.0" - "2.18.1" - "2.19.0" - - "2.20.0" \ No newline at end of file + - "2.19.1" + - "2.20.0" diff --git a/README.md b/README.md index 6ae7e12948670..bf9fef148df3e 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ [![Security Vulnerabilities](https://img.shields.io/github/issues/opensearch-project/OpenSearch/security%20vulnerability?labelColor=red)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"security%20vulnerability") [![Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch)](https://github.com/opensearch-project/OpenSearch/issues) [![Open Pull Requests](https://img.shields.io/github/issues-pr/opensearch-project/OpenSearch)](https://github.com/opensearch-project/OpenSearch/pulls) -[![2.19.0 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v2.19.0)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v2.19.0") +[![2.19.1 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v2.19.1)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v2.19.1") [![2.18.1 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v2.18.1)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v2.18.1") [![3.0.0 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v3.0.0)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v3.0.0") [![GHA gradle check](https://github.com/opensearch-project/OpenSearch/actions/workflows/gradle-check.yml/badge.svg)](https://github.com/opensearch-project/OpenSearch/actions/workflows/gradle-check.yml) diff --git a/libs/core/src/main/java/org/opensearch/Version.java b/libs/core/src/main/java/org/opensearch/Version.java index 339a162bb0a33..5adde08a40d54 100644 --- a/libs/core/src/main/java/org/opensearch/Version.java +++ b/libs/core/src/main/java/org/opensearch/Version.java @@ -114,6 +114,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_18_0 = new Version(2180099, org.apache.lucene.util.Version.LUCENE_9_12_0); public static final Version V_2_18_1 = new Version(2180199, org.apache.lucene.util.Version.LUCENE_9_12_1); public static final Version V_2_19_0 = new Version(2190099, org.apache.lucene.util.Version.LUCENE_9_12_1); + public static final Version V_2_19_1 = new Version(2190199, org.apache.lucene.util.Version.LUCENE_9_12_1); public static final Version V_2_20_0 = new Version(2200099, org.apache.lucene.util.Version.LUCENE_9_12_1); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_10_1_0); public static final Version CURRENT = V_3_0_0; From 38e4b335f5182c0efefc0d88fee733805304f95b Mon Sep 17 00:00:00 2001 From: Gaurav Bafna <85113518+gbbafna@users.noreply.github.com> Date: Thu, 13 Feb 2025 21:18:29 +0530 Subject: [PATCH 004/550] Making force merge threadpool 1/8th of total cores (#17255) Signed-off-by: Gaurav Bafna --- CHANGELOG.md | 3 ++- .../main/java/org/opensearch/threadpool/ThreadPool.java | 9 ++++++++- .../java/org/opensearch/threadpool/ThreadPoolTests.java | 8 ++++++++ 3 files changed, 18 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 739b3b09c1926..d5092745fb825 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,7 +22,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `reactor_netty` from 1.1.26 to 1.1.27 ([#17322](https://github.com/opensearch-project/OpenSearch/pull/17322)) ### Changed -- Convert transport-reactor-netty4 to use gradle version catalog [#17233](https://github.com/opensearch-project/OpenSearch/pull/17233)) +- Convert transport-reactor-netty4 to use gradle version catalog [#17233](https://github.com/opensearch-project/OpenSearch/pull/17233) +- Increase force merge threads to 1/8th of cores [#17255](https://github.com/opensearch-project/OpenSearch/pull/17255) ### Deprecated diff --git a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java index 59d3b110aeca8..b67b00bb42054 100644 --- a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java @@ -278,7 +278,10 @@ public ThreadPool( Names.FETCH_SHARD_STARTED, new ScalingExecutorBuilder(Names.FETCH_SHARD_STARTED, 1, 2 * allocatedProcessors, TimeValue.timeValueMinutes(5)) ); - builders.put(Names.FORCE_MERGE, new FixedExecutorBuilder(settings, Names.FORCE_MERGE, 1, -1)); + builders.put( + Names.FORCE_MERGE, + new FixedExecutorBuilder(settings, Names.FORCE_MERGE, oneEighthAllocatedProcessors(allocatedProcessors), -1) + ); builders.put( Names.FETCH_SHARD_STORE, new ScalingExecutorBuilder(Names.FETCH_SHARD_STORE, 1, 2 * allocatedProcessors, TimeValue.timeValueMinutes(5)) @@ -678,6 +681,10 @@ static int boundedBy(int value, int min, int max) { return Math.min(max, Math.max(min, value)); } + static int oneEighthAllocatedProcessors(final int allocatedProcessors) { + return boundedBy(allocatedProcessors / 8, 1, Integer.MAX_VALUE); + } + static int halfAllocatedProcessors(int allocatedProcessors) { return (allocatedProcessors + 1) / 2; } diff --git a/server/src/test/java/org/opensearch/threadpool/ThreadPoolTests.java b/server/src/test/java/org/opensearch/threadpool/ThreadPoolTests.java index 205bf7621c576..fd79115ad5872 100644 --- a/server/src/test/java/org/opensearch/threadpool/ThreadPoolTests.java +++ b/server/src/test/java/org/opensearch/threadpool/ThreadPoolTests.java @@ -196,4 +196,12 @@ public void testThreadPoolResizeFail() { terminate(threadPool); } } + + public void testOneEighthAllocatedProcessors() { + assertThat(ThreadPool.oneEighthAllocatedProcessors(1), equalTo(1)); + assertThat(ThreadPool.oneEighthAllocatedProcessors(4), equalTo(1)); + assertThat(ThreadPool.oneEighthAllocatedProcessors(8), equalTo(1)); + assertThat(ThreadPool.oneEighthAllocatedProcessors(32), equalTo(4)); + assertThat(ThreadPool.oneEighthAllocatedProcessors(128), equalTo(16)); + } } From 9de21d1bdf5b7926fcbe6d788c5ec1f4b4fe3ba3 Mon Sep 17 00:00:00 2001 From: Gaurav Bafna <85113518+gbbafna@users.noreply.github.com> Date: Fri, 14 Feb 2025 08:58:59 +0530 Subject: [PATCH 005/550] Using RemoteDirectory#delete to clear all segments during migration (#17021) Signed-off-by: Gaurav Bafna --- .../RemotePrimaryRelocationIT.java | 86 +++++++++++++++++++ .../common/blobstore/fs/FsBlobContainer.java | 1 + .../opensearch/index/shard/IndexShard.java | 2 +- .../store/RemoteSegmentStoreDirectory.java | 2 +- 4 files changed, 89 insertions(+), 2 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemotePrimaryRelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemotePrimaryRelocationIT.java index 7cacfdc972736..c32d3520e83cf 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemotePrimaryRelocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemotePrimaryRelocationIT.java @@ -18,14 +18,17 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.index.query.QueryBuilders; +import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; import org.opensearch.test.transport.MockTransportService; +import org.opensearch.transport.TransportService; import org.opensearch.transport.client.Client; import org.opensearch.transport.client.Requests; +import java.io.IOException; import java.util.Collection; import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; @@ -195,4 +198,87 @@ public void testMixedModeRelocation_RemoteSeedingFail() throws Exception { .setTransientSettings(Settings.builder().put(RecoverySettings.INDICES_INTERNAL_REMOTE_UPLOAD_TIMEOUT.getKey(), (String) null)) .get(); } + + public void testMixedModeRelocation_FailInFinalize() throws Exception { + String docRepNode = internalCluster().startNode(); + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings(Settings.builder().put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed")); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + // create shard with 0 replica and 1 shard + client().admin().indices().prepareCreate("test").setSettings(indexSettings()).setMapping("field", "type=text").get(); + ensureGreen("test"); + + AsyncIndexingService asyncIndexingService = new AsyncIndexingService("test"); + asyncIndexingService.startIndexing(); + + refresh("test"); + + // add remote node in mixed mode cluster + setAddRemote(true); + String remoteNode = internalCluster().startNode(); + internalCluster().validateClusterFormed(); + + AtomicBoolean failFinalize = new AtomicBoolean(true); + + MockTransportService remoteNodeTransportService = (MockTransportService) internalCluster().getInstance( + TransportService.class, + remoteNode + ); + + remoteNodeTransportService.addRequestHandlingBehavior( + PeerRecoveryTargetService.Actions.FINALIZE, + (handler, request, channel, task) -> { + if (failFinalize.get()) { + throw new IOException("Failing finalize"); + } else { + handler.messageReceived(request, channel, task); + } + } + ); + + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(RecoverySettings.INDICES_INTERNAL_REMOTE_UPLOAD_TIMEOUT.getKey(), "40s")) + .get(); + + // Change direction to remote store + updateSettingsRequest.persistentSettings(Settings.builder().put(MIGRATION_DIRECTION_SETTING.getKey(), "remote_store")); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + logger.info("--> relocating from {} to {} ", docRepNode, remoteNode); + client().admin().cluster().prepareReroute().add(new MoveAllocationCommand("test", 0, docRepNode, remoteNode)).execute().actionGet(); + ClusterHealthResponse clusterHealthResponse = client().admin() + .cluster() + .prepareHealth() + .setTimeout(TimeValue.timeValueSeconds(5)) + .setWaitForEvents(Priority.LANGUID) + .setWaitForNoRelocatingShards(true) + .execute() + .actionGet(); + + assertTrue(clusterHealthResponse.getRelocatingShards() == 1); + + ClusterHealthRequest healthRequest = Requests.clusterHealthRequest() + .waitForNoRelocatingShards(true) + .waitForNoInitializingShards(true); + ClusterHealthResponse actionGet = client().admin().cluster().health(healthRequest).actionGet(); + assertEquals(actionGet.getRelocatingShards(), 0); + assertEquals(docRepNode, primaryNodeName("test")); + + // now unblock it + logger.info("Unblocking the finalize recovery now"); + failFinalize.set(false); + + client().admin().cluster().prepareReroute().add(new MoveAllocationCommand("test", 0, docRepNode, remoteNode)).execute().actionGet(); + waitForRelocation(); + + asyncIndexingService.stopIndexing(); + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(RecoverySettings.INDICES_INTERNAL_REMOTE_UPLOAD_TIMEOUT.getKey(), (String) null)) + .get(); + } } diff --git a/server/src/main/java/org/opensearch/common/blobstore/fs/FsBlobContainer.java b/server/src/main/java/org/opensearch/common/blobstore/fs/FsBlobContainer.java index b6644ffd16bab..931841ae3de6e 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/fs/FsBlobContainer.java +++ b/server/src/main/java/org/opensearch/common/blobstore/fs/FsBlobContainer.java @@ -225,6 +225,7 @@ public void writeBlobAtomic(final String blobName, final InputStream inputStream } private void writeToPath(InputStream inputStream, Path tempBlobPath, long blobSize) throws IOException { + Files.createDirectories(path); try (OutputStream outputStream = Files.newOutputStream(tempBlobPath, StandardOpenOption.CREATE_NEW)) { final int bufferSize = blobStore.bufferSizeInBytes(); org.opensearch.common.util.io.Streams.copy( diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index f26e53967b873..df841dac4cf8e 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -5056,7 +5056,7 @@ public void deleteTranslogFilesFromRemoteTranslog() throws IOException { */ public void deleteRemoteStoreContents() throws IOException { deleteTranslogFilesFromRemoteTranslog(); - getRemoteDirectory().deleteStaleSegments(0); + getRemoteDirectory().delete(); } public void syncTranslogFilesFromRemoteTranslog() throws IOException { diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index 941cf047347f7..46a90da2a18b6 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -1061,7 +1061,7 @@ private boolean deleteIfEmpty() throws IOException { return delete(); } - private boolean delete() { + public boolean delete() { try { remoteDataDirectory.delete(); remoteMetadataDirectory.delete(); From 2d2b41d34544e467b285b4764b6c5e9e8cedeab0 Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Fri, 14 Feb 2025 09:02:37 -0800 Subject: [PATCH 006/550] Remove deprecated settings to defer cluster recovery (#17357) The following settings were deprecated in ES 7.7 prior to the fork: - gateway.expected_nodes - gateway.expected_master_nodes - gateway.recover_after_nodes - gateway.recover_after_master_nodes This commit removes the deprecated settings and replaces their usages in tests with `recover_after_data_nodes`. Signed-off-by: Andrew Ross --- CHANGELOG-3.0.md | 1 + distribution/src/config/opensearch.yml | 2 +- .../admin/cluster/stats/ClusterStatsIT.java | 12 +-- .../cluster/tasks/PendingTasksBlocksIT.java | 2 +- .../admin/indices/exists/IndicesExistsIT.java | 2 +- .../gateway/RecoverAfterNodesIT.java | 97 +------------------ .../gateway/RecoveryFromGatewayIT.java | 12 +-- .../common/settings/ClusterSettings.java | 4 - .../opensearch/gateway/GatewayService.java | 75 +------------- .../gateway/GatewayServiceTests.java | 17 ---- 10 files changed, 22 insertions(+), 202 deletions(-) diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index abb4dd8d4baa2..c5f9611910fa9 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -58,6 +58,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Remove `index.store.hybrid.mmap.extensions` setting in favor of `index.store.hybrid.nio.extensions` setting ([#9392](https://github.com/opensearch-project/OpenSearch/pull/9392)) - Remove package org.opensearch.action.support.master ([#4856](https://github.com/opensearch-project/OpenSearch/issues/4856)) - Remove transport-nio plugin ([#16887](https://github.com/opensearch-project/OpenSearch/issues/16887)) +- Remove deprecated 'gateway' settings used to defer cluster recovery ([#3117](https://github.com/opensearch-project/OpenSearch/issues/3117)) ### Fixed - Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827)) diff --git a/distribution/src/config/opensearch.yml b/distribution/src/config/opensearch.yml index 4115601f62ada..1ac7c5299964e 100644 --- a/distribution/src/config/opensearch.yml +++ b/distribution/src/config/opensearch.yml @@ -77,7 +77,7 @@ ${path.logs} # # Block initial recovery after a full cluster restart until N nodes are started: # -#gateway.recover_after_nodes: 3 +#gateway.recover_after_data_nodes: 3 # # For more information, consult the gateway module documentation. # diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java index c81d203ec3db6..9058dc2f5b147 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java @@ -46,6 +46,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.gateway.GatewayService; import org.opensearch.monitor.os.OsStats; import org.opensearch.node.NodeRoleSettings; import org.opensearch.test.OpenSearchIntegTestCase; @@ -383,7 +384,9 @@ public void testAllocatedProcessors() throws Exception { } public void testClusterStatusWhenStateNotRecovered() throws Exception { - internalCluster().startClusterManagerOnlyNode(Settings.builder().put("gateway.recover_after_nodes", 2).build()); + internalCluster().startClusterManagerOnlyNode( + Settings.builder().put(GatewayService.RECOVER_AFTER_DATA_NODES_SETTING.getKey(), 2).build() + ); ClusterStatsResponse response = client().admin() .cluster() .prepareClusterStats() @@ -391,11 +394,8 @@ public void testClusterStatusWhenStateNotRecovered() throws Exception { .get(); assertThat(response.getStatus(), equalTo(ClusterHealthStatus.RED)); - if (randomBoolean()) { - internalCluster().startClusterManagerOnlyNode(); - } else { - internalCluster().startDataOnlyNode(); - } + internalCluster().startDataOnlyNodes(2); + // wait for the cluster status to settle ensureGreen(); response = client().admin().cluster().prepareClusterStats().useAggregatedNodeLevelResponses(randomBoolean()).get(); diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java index 83aa744a80599..2be4acd16671f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java @@ -95,7 +95,7 @@ public void testPendingTasksWithClusterNotRecoveredBlock() throws Exception { internalCluster().fullRestart(new InternalTestCluster.RestartCallback() { @Override public Settings onNodeStopped(String nodeName) { - return Settings.builder().put(GatewayService.RECOVER_AFTER_NODES_SETTING.getKey(), nodeCount + 1).build(); + return Settings.builder().put(GatewayService.RECOVER_AFTER_DATA_NODES_SETTING.getKey(), nodeCount + 1).build(); } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/exists/IndicesExistsIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/exists/IndicesExistsIT.java index b5ab4b5290171..8b063abc5ef5a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/exists/IndicesExistsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/exists/IndicesExistsIT.java @@ -49,7 +49,7 @@ public class IndicesExistsIT extends OpenSearchIntegTestCase { public void testIndexExistsWithBlocksInPlace() throws IOException { internalCluster().setBootstrapClusterManagerNodeIndex(0); - Settings settings = Settings.builder().put(GatewayService.RECOVER_AFTER_NODES_SETTING.getKey(), 99).build(); + Settings settings = Settings.builder().put(GatewayService.RECOVER_AFTER_DATA_NODES_SETTING.getKey(), 99).build(); String node = internalCluster().startNode(settings); assertRequestBuilderThrows( diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoverAfterNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoverAfterNodesIT.java index 480158273bdbb..44fd0f93cb080 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoverAfterNodesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoverAfterNodesIT.java @@ -77,7 +77,7 @@ public Client startNode(Settings.Builder settings) { public void testRecoverAfterNodes() throws Exception { internalCluster().setBootstrapClusterManagerNodeIndex(0); logger.info("--> start node (1)"); - Client clientNode1 = startNode(Settings.builder().put("gateway.recover_after_nodes", 3)); + Client clientNode1 = startNode(Settings.builder().put(GatewayService.RECOVER_AFTER_DATA_NODES_SETTING.getKey(), 3)); assertThat( clientNode1.admin() .cluster() @@ -92,7 +92,7 @@ public void testRecoverAfterNodes() throws Exception { ); logger.info("--> start node (2)"); - Client clientNode2 = startNode(Settings.builder().put("gateway.recover_after_nodes", 3)); + Client clientNode2 = startNode(Settings.builder().put(GatewayService.RECOVER_AFTER_DATA_NODES_SETTING.getKey(), 3)); Thread.sleep(BLOCK_WAIT_TIMEOUT.millis()); assertThat( clientNode1.admin() @@ -120,104 +120,13 @@ public void testRecoverAfterNodes() throws Exception { ); logger.info("--> start node (3)"); - Client clientNode3 = startNode(Settings.builder().put("gateway.recover_after_nodes", 3)); + Client clientNode3 = startNode(Settings.builder().put(GatewayService.RECOVER_AFTER_DATA_NODES_SETTING.getKey(), 3)); assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, clientNode1).isEmpty(), equalTo(true)); assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, clientNode2).isEmpty(), equalTo(true)); assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, clientNode3).isEmpty(), equalTo(true)); } - public void testRecoverAfterClusterManagerNodes() throws Exception { - internalCluster().setBootstrapClusterManagerNodeIndex(0); - logger.info("--> start cluster_manager_node (1)"); - Client clusterManager1 = startNode(Settings.builder().put("gateway.recover_after_master_nodes", 2).put(clusterManagerOnlyNode())); - assertThat( - clusterManager1.admin() - .cluster() - .prepareState() - .setLocal(true) - .execute() - .actionGet() - .getState() - .blocks() - .global(ClusterBlockLevel.METADATA_WRITE), - hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK) - ); - - logger.info("--> start data_node (1)"); - Client data1 = startNode(Settings.builder().put("gateway.recover_after_master_nodes", 2).put(dataOnlyNode())); - assertThat( - clusterManager1.admin() - .cluster() - .prepareState() - .setLocal(true) - .execute() - .actionGet() - .getState() - .blocks() - .global(ClusterBlockLevel.METADATA_WRITE), - hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK) - ); - assertThat( - data1.admin() - .cluster() - .prepareState() - .setLocal(true) - .execute() - .actionGet() - .getState() - .blocks() - .global(ClusterBlockLevel.METADATA_WRITE), - hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK) - ); - - logger.info("--> start data_node (2)"); - Client data2 = startNode(Settings.builder().put("gateway.recover_after_master_nodes", 2).put(dataOnlyNode())); - assertThat( - clusterManager1.admin() - .cluster() - .prepareState() - .setLocal(true) - .execute() - .actionGet() - .getState() - .blocks() - .global(ClusterBlockLevel.METADATA_WRITE), - hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK) - ); - assertThat( - data1.admin() - .cluster() - .prepareState() - .setLocal(true) - .execute() - .actionGet() - .getState() - .blocks() - .global(ClusterBlockLevel.METADATA_WRITE), - hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK) - ); - assertThat( - data2.admin() - .cluster() - .prepareState() - .setLocal(true) - .execute() - .actionGet() - .getState() - .blocks() - .global(ClusterBlockLevel.METADATA_WRITE), - hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK) - ); - - logger.info("--> start cluster_manager_node (2)"); - Client clusterManager2 = startNode(Settings.builder().put("gateway.recover_after_master_nodes", 2).put(clusterManagerOnlyNode())); - assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, clusterManager1).isEmpty(), equalTo(true)); - assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, clusterManager2).isEmpty(), equalTo(true)); - assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, data1).isEmpty(), equalTo(true)); - assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, data2).isEmpty(), equalTo(true)); - } - public void testRecoverAfterDataNodes() throws Exception { internalCluster().setBootstrapClusterManagerNodeIndex(0); logger.info("--> start cluster_manager_node (1)"); diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java index 02e573059b499..f7156840a140e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java @@ -119,7 +119,7 @@ import static org.opensearch.gateway.GatewayRecoveryTestUtils.corruptShard; import static org.opensearch.gateway.GatewayRecoveryTestUtils.getDiscoveryNodes; import static org.opensearch.gateway.GatewayRecoveryTestUtils.prepareRequestMap; -import static org.opensearch.gateway.GatewayService.RECOVER_AFTER_NODES_SETTING; +import static org.opensearch.gateway.GatewayService.RECOVER_AFTER_DATA_NODES_SETTING; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -411,7 +411,7 @@ public void testTwoNodeFirstNodeCleared() throws Exception { @Override public Settings onNodeStopped(String nodeName) { return Settings.builder() - .put(RECOVER_AFTER_NODES_SETTING.getKey(), 2) + .put(RECOVER_AFTER_DATA_NODES_SETTING.getKey(), 2) .putList(INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey()) // disable bootstrapping .build(); } @@ -436,7 +436,7 @@ public boolean clearData(String nodeName) { public void testLatestVersionLoaded() throws Exception { // clean two nodes - List nodes = internalCluster().startNodes(2, Settings.builder().put("gateway.recover_after_nodes", 2).build()); + List nodes = internalCluster().startNodes(2, Settings.builder().put(RECOVER_AFTER_DATA_NODES_SETTING.getKey(), 2).build()); Settings node1DataPathSettings = internalCluster().dataPathSettings(nodes.get(0)); Settings node2DataPathSettings = internalCluster().dataPathSettings(nodes.get(1)); @@ -520,8 +520,8 @@ public void testLatestVersionLoaded() throws Exception { logger.info("--> starting the two nodes back"); internalCluster().startNodes( - Settings.builder().put(node1DataPathSettings).put("gateway.recover_after_nodes", 2).build(), - Settings.builder().put(node2DataPathSettings).put("gateway.recover_after_nodes", 2).build() + Settings.builder().put(node1DataPathSettings).put(RECOVER_AFTER_DATA_NODES_SETTING.getKey(), 2).build(), + Settings.builder().put(node2DataPathSettings).put(RECOVER_AFTER_DATA_NODES_SETTING.getKey(), 2).build() ); logger.info("--> running cluster_health (wait for the shards to startup)"); @@ -710,7 +710,7 @@ public void testStartedShardFoundIfStateNotYetProcessed() throws Exception { @Override public Settings onNodeStopped(String nodeName) throws Exception { // make sure state is not recovered - return Settings.builder().put(RECOVER_AFTER_NODES_SETTING.getKey(), 2).build(); + return Settings.builder().put(RECOVER_AFTER_DATA_NODES_SETTING.getKey(), 2).build(); } }); diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 024db380650ff..d204c383524c2 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -346,11 +346,7 @@ public void apply(Settings value, Settings current, Settings previous) { NoClusterManagerBlockService.NO_MASTER_BLOCK_SETTING, // deprecated NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING, GatewayService.EXPECTED_DATA_NODES_SETTING, - GatewayService.EXPECTED_MASTER_NODES_SETTING, - GatewayService.EXPECTED_NODES_SETTING, GatewayService.RECOVER_AFTER_DATA_NODES_SETTING, - GatewayService.RECOVER_AFTER_MASTER_NODES_SETTING, - GatewayService.RECOVER_AFTER_NODES_SETTING, GatewayService.RECOVER_AFTER_TIME_SETTING, ShardsBatchGatewayAllocator.GATEWAY_ALLOCATOR_BATCH_SIZE, ShardsBatchGatewayAllocator.PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING, diff --git a/server/src/main/java/org/opensearch/gateway/GatewayService.java b/server/src/main/java/org/opensearch/gateway/GatewayService.java index 7f95d7afe90c7..ed31b455f24f8 100644 --- a/server/src/main/java/org/opensearch/gateway/GatewayService.java +++ b/server/src/main/java/org/opensearch/gateway/GatewayService.java @@ -68,51 +68,23 @@ public class GatewayService extends AbstractLifecycleComponent implements ClusterStateListener { private static final Logger logger = LogManager.getLogger(GatewayService.class); - public static final Setting EXPECTED_NODES_SETTING = Setting.intSetting( - "gateway.expected_nodes", - -1, - -1, - Property.NodeScope, - Property.Deprecated - ); public static final Setting EXPECTED_DATA_NODES_SETTING = Setting.intSetting( "gateway.expected_data_nodes", -1, -1, Property.NodeScope ); - public static final Setting EXPECTED_MASTER_NODES_SETTING = Setting.intSetting( - "gateway.expected_master_nodes", - -1, - -1, - Property.NodeScope, - Property.Deprecated - ); public static final Setting RECOVER_AFTER_TIME_SETTING = Setting.positiveTimeSetting( "gateway.recover_after_time", TimeValue.timeValueMillis(0), Property.NodeScope ); - public static final Setting RECOVER_AFTER_NODES_SETTING = Setting.intSetting( - "gateway.recover_after_nodes", - -1, - -1, - Property.NodeScope, - Property.Deprecated - ); public static final Setting RECOVER_AFTER_DATA_NODES_SETTING = Setting.intSetting( "gateway.recover_after_data_nodes", -1, -1, Property.NodeScope ); - public static final Setting RECOVER_AFTER_MASTER_NODES_SETTING = Setting.intSetting( - "gateway.recover_after_master_nodes", - 0, - 0, - Property.NodeScope, - Property.Deprecated - ); public static final ClusterBlock STATE_NOT_RECOVERED_BLOCK = new ClusterBlock( 1, @@ -133,12 +105,8 @@ public class GatewayService extends AbstractLifecycleComponent implements Cluste private final ClusterService clusterService; private final TimeValue recoverAfterTime; - private final int recoverAfterNodes; - private final int expectedNodes; private final int recoverAfterDataNodes; private final int expectedDataNodes; - private final int recoverAfterClusterManagerNodes; - private final int expectedClusterManagerNodes; private final Runnable recoveryRunnable; @@ -158,25 +126,16 @@ public GatewayService( this.clusterService = clusterService; this.threadPool = threadPool; // allow to control a delay of when indices will get created - this.expectedNodes = EXPECTED_NODES_SETTING.get(settings); this.expectedDataNodes = EXPECTED_DATA_NODES_SETTING.get(settings); - this.expectedClusterManagerNodes = EXPECTED_MASTER_NODES_SETTING.get(settings); if (RECOVER_AFTER_TIME_SETTING.exists(settings)) { recoverAfterTime = RECOVER_AFTER_TIME_SETTING.get(settings); - } else if (expectedNodes >= 0 || expectedDataNodes >= 0 || expectedClusterManagerNodes >= 0) { + } else if (expectedDataNodes >= 0) { recoverAfterTime = DEFAULT_RECOVER_AFTER_TIME_IF_EXPECTED_NODES_IS_SET; } else { recoverAfterTime = null; } - this.recoverAfterNodes = RECOVER_AFTER_NODES_SETTING.get(settings); this.recoverAfterDataNodes = RECOVER_AFTER_DATA_NODES_SETTING.get(settings); - // default the recover after cluster-manager nodes to the minimum cluster-manager nodes in the discovery - if (RECOVER_AFTER_MASTER_NODES_SETTING.exists(settings)) { - recoverAfterClusterManagerNodes = RECOVER_AFTER_MASTER_NODES_SETTING.get(settings); - } else { - recoverAfterClusterManagerNodes = -1; - } if (discovery instanceof Coordinator) { recoveryRunnable = () -> clusterService.submitStateUpdateTask("local-gateway-elected-state", new RecoverStateUpdateTask()); @@ -222,28 +181,16 @@ public void clusterChanged(final ClusterChangedEvent event) { final DiscoveryNodes nodes = state.nodes(); if (state.nodes().getClusterManagerNodeId() == null) { logger.debug("not recovering from gateway, no cluster-manager elected yet"); - } else if (recoverAfterNodes != -1 && (nodes.getClusterManagerAndDataNodes().size()) < recoverAfterNodes) { - logger.debug( - "not recovering from gateway, nodes_size (data+master) [{}] < recover_after_nodes [{}]", - nodes.getClusterManagerAndDataNodes().size(), - recoverAfterNodes - ); } else if (recoverAfterDataNodes != -1 && nodes.getDataNodes().size() < recoverAfterDataNodes) { logger.debug( "not recovering from gateway, nodes_size (data) [{}] < recover_after_data_nodes [{}]", nodes.getDataNodes().size(), recoverAfterDataNodes ); - } else if (recoverAfterClusterManagerNodes != -1 && nodes.getClusterManagerNodes().size() < recoverAfterClusterManagerNodes) { - logger.debug( - "not recovering from gateway, nodes_size (master) [{}] < recover_after_master_nodes [{}]", - nodes.getClusterManagerNodes().size(), - recoverAfterClusterManagerNodes - ); } else { boolean enforceRecoverAfterTime; String reason; - if (expectedNodes == -1 && expectedClusterManagerNodes == -1 && expectedDataNodes == -1) { + if (expectedDataNodes == -1) { // no expected is set, honor the setting if they are there enforceRecoverAfterTime = true; reason = "recover_after_time was set to [" + recoverAfterTime + "]"; @@ -251,25 +198,9 @@ public void clusterChanged(final ClusterChangedEvent event) { // one of the expected is set, see if all of them meet the need, and ignore the timeout in this case enforceRecoverAfterTime = false; reason = ""; - if (expectedNodes != -1 && (nodes.getClusterManagerAndDataNodes().size() < expectedNodes)) { // does not meet the - // expected... - enforceRecoverAfterTime = true; - reason = "expecting [" - + expectedNodes - + "] nodes, but only have [" - + nodes.getClusterManagerAndDataNodes().size() - + "]"; - } else if (expectedDataNodes != -1 && (nodes.getDataNodes().size() < expectedDataNodes)) { // does not meet the expected... + if (nodes.getDataNodes().size() < expectedDataNodes) { // does not meet the expected... enforceRecoverAfterTime = true; reason = "expecting [" + expectedDataNodes + "] data nodes, but only have [" + nodes.getDataNodes().size() + "]"; - } else if (expectedClusterManagerNodes != -1 && (nodes.getClusterManagerNodes().size() < expectedClusterManagerNodes)) { - // does not meet the expected... - enforceRecoverAfterTime = true; - reason = "expecting [" - + expectedClusterManagerNodes - + "] cluster-manager nodes, but only have [" - + nodes.getClusterManagerNodes().size() - + "]"; } } performStateRecovery(enforceRecoverAfterTime, reason); diff --git a/server/src/test/java/org/opensearch/gateway/GatewayServiceTests.java b/server/src/test/java/org/opensearch/gateway/GatewayServiceTests.java index 59fb7df5428e2..b9493716de0d7 100644 --- a/server/src/test/java/org/opensearch/gateway/GatewayServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/GatewayServiceTests.java @@ -48,7 +48,6 @@ import org.opensearch.cluster.routing.allocation.decider.SameShardAllocationDecider; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.transport.TransportAddress; @@ -110,22 +109,6 @@ public void testDefaultRecoverAfterTime() { assertThat(service.recoverAfterTime().millis(), Matchers.equalTo(timeValue.millis())); } - public void testDeprecatedSettings() { - GatewayService service = createService(Settings.builder()); - - service = createService(Settings.builder().put("gateway.expected_nodes", 1)); - assertSettingDeprecationsAndWarnings(new Setting[] { GatewayService.EXPECTED_NODES_SETTING }); - - service = createService(Settings.builder().put("gateway.expected_master_nodes", 1)); - assertSettingDeprecationsAndWarnings(new Setting[] { GatewayService.EXPECTED_MASTER_NODES_SETTING }); - - service = createService(Settings.builder().put("gateway.recover_after_nodes", 1)); - assertSettingDeprecationsAndWarnings(new Setting[] { GatewayService.RECOVER_AFTER_NODES_SETTING }); - - service = createService(Settings.builder().put("gateway.recover_after_master_nodes", 1)); - assertSettingDeprecationsAndWarnings(new Setting[] { GatewayService.RECOVER_AFTER_MASTER_NODES_SETTING }); - } - public void testRecoverStateUpdateTask() throws Exception { GatewayService service = createService(Settings.builder()); ClusterStateUpdateTask clusterStateUpdateTask = service.new RecoverStateUpdateTask(); From 56825f63ec19cd1b96651d0c5a3663108ee26c8e Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Fri, 14 Feb 2025 18:17:23 -0800 Subject: [PATCH 007/550] [Bugfix] Fix IllegalArgumentException thrown when creating a PIT (#16781) * fix create_pit enum bug Signed-off-by: Peter Alfonsi * changelog Signed-off-by: Peter Alfonsi * Fix SearchResponse XContent Signed-off-by: Peter Alfonsi * Addressed David's comments Signed-off-by: Peter Alfonsi * rerun gradle Signed-off-by: Peter Alfonsi * Addressed andrross's comment Signed-off-by: Peter Alfonsi * Revert "Addressed andrross's comment" This reverts commit 49fec5734f5f1bb366764b689780c38833b90a84. Signed-off-by: Peter Alfonsi * Revert "Addressed David's comments" This reverts commit a41f9be10026344d956f2b861d2439b9c03f2d5b. Signed-off-by: Peter Alfonsi * Revert "Fix SearchResponse XContent" This reverts commit ea30484370116fbba655c4b2a29e1ebe7b0ea3ef. Signed-off-by: Peter Alfonsi * Revert "fix create_pit enum bug" This reverts commit 0c9110d5f017b0d04033e6cf5f5715aca19f14ef. Signed-off-by: Peter Alfonsi * Fix create_pit bug with catch-all search phase name Signed-off-by: Peter Alfonsi * switch to new optional method Signed-off-by: Peter Alfonsi * rerun gradle Signed-off-by: Peter Alfonsi * Update server/src/main/java/org/opensearch/action/search/SearchPhase.java Co-authored-by: Andrew Ross Signed-off-by: Peter Alfonsi --------- Signed-off-by: Peter Alfonsi Signed-off-by: Peter Alfonsi Co-authored-by: Peter Alfonsi Co-authored-by: Andrew Ross --- CHANGELOG.md | 1 + .../opensearch/action/search/SearchPhase.java | 18 +++++- .../action/search/SearchRequestStats.java | 14 +++-- .../AbstractSearchAsyncActionTests.java | 53 +++++++++-------- .../SearchRequestOperationsListenerTests.java | 11 ++-- .../search/SearchRequestStatsTests.java | 58 +++++++++++++++++-- .../index/search/stats/SearchStatsTests.java | 3 +- 7 files changed, 115 insertions(+), 43 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d5092745fb825..aa6e7bce8655d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,6 +33,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix case insensitive and escaped query on wildcard ([#16827](https://github.com/opensearch-project/OpenSearch/pull/16827)) - Fix exists queries on nested flat_object fields throws exception ([#16803](https://github.com/opensearch-project/OpenSearch/pull/16803)) - Add highlighting for wildcard search on `match_only_text` field ([#17101](https://github.com/opensearch-project/OpenSearch/pull/17101)) +- Fix illegal argument exception when creating a PIT ([#16781](https://github.com/opensearch-project/OpenSearch/pull/16781)) ### Security diff --git a/server/src/main/java/org/opensearch/action/search/SearchPhase.java b/server/src/main/java/org/opensearch/action/search/SearchPhase.java index 0890e9f5de8d4..351c23fec3d80 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchPhase.java +++ b/server/src/main/java/org/opensearch/action/search/SearchPhase.java @@ -37,6 +37,7 @@ import java.io.IOException; import java.util.Locale; import java.util.Objects; +import java.util.Optional; /** * Base class for all individual search phases like collecting distributed frequencies, fetching documents, querying shards. @@ -69,11 +70,26 @@ public String getName() { } /** - * Returns the SearchPhase name as {@link SearchPhaseName}. Exception will come if SearchPhase name is not defined + * Returns the SearchPhase name as {@link SearchPhaseName}. Exception will come if SearchPhase name is not defined. + * @deprecated Use getSearchPhaseNameOptional() to avoid possible exceptions. * in {@link SearchPhaseName} * @return {@link SearchPhaseName} */ + @Deprecated public SearchPhaseName getSearchPhaseName() { return SearchPhaseName.valueOf(name.toUpperCase(Locale.ROOT)); } + + /** + * Returns an Optional of the SearchPhase name as {@link SearchPhaseName}. If there's not a matching SearchPhaseName, + * returns an empty Optional. + * @return {@link Optional} + */ + public Optional getSearchPhaseNameOptional() { + try { + return Optional.of(SearchPhaseName.valueOf(name.toUpperCase(Locale.ROOT))); + } catch (IllegalArgumentException e) { + return Optional.empty(); + } + } } diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java b/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java index 94200d29a4f21..dd3b6838ab5da 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java @@ -73,20 +73,22 @@ public long getTookMetric() { @Override protected void onPhaseStart(SearchPhaseContext context) { - phaseStatsMap.get(context.getCurrentPhase().getSearchPhaseName()).current.inc(); + context.getCurrentPhase().getSearchPhaseNameOptional().ifPresent(name -> phaseStatsMap.get(name).current.inc()); } @Override protected void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { - StatsHolder phaseStats = phaseStatsMap.get(context.getCurrentPhase().getSearchPhaseName()); - phaseStats.current.dec(); - phaseStats.total.inc(); - phaseStats.timing.inc(TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - context.getCurrentPhase().getStartTimeInNanos())); + context.getCurrentPhase().getSearchPhaseNameOptional().ifPresent(name -> { + StatsHolder phaseStats = phaseStatsMap.get(name); + phaseStats.current.dec(); + phaseStats.total.inc(); + phaseStats.timing.inc(TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - context.getCurrentPhase().getStartTimeInNanos())); + }); } @Override protected void onPhaseFailure(SearchPhaseContext context, Throwable cause) { - phaseStatsMap.get(context.getCurrentPhase().getSearchPhaseName()).current.dec(); + context.getCurrentPhase().getSearchPhaseNameOptional().ifPresent(name -> phaseStatsMap.get(name).current.dec()); } @Override diff --git a/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java b/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java index 27336e86e52b0..b0fab3b7a3556 100644 --- a/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java @@ -399,29 +399,29 @@ public void testOnPhaseFailureAndVerifyListeners() { final List requestOperationListeners = List.of(testListener, assertingListener); SearchQueryThenFetchAsyncAction action = createSearchQueryThenFetchAsyncAction(requestOperationListeners); action.start(); - assertEquals(1, testListener.getPhaseCurrent(action.getSearchPhaseName())); + assertEquals(1, testListener.getPhaseCurrent(action.getSearchPhaseNameOptional().get())); action.onPhaseFailure(new SearchPhase("test") { @Override public void run() { } }, "message", null); - assertEquals(0, testListener.getPhaseCurrent(action.getSearchPhaseName())); - assertEquals(0, testListener.getPhaseTotal(action.getSearchPhaseName())); + assertEquals(0, testListener.getPhaseCurrent(action.getSearchPhaseNameOptional().get())); + assertEquals(0, testListener.getPhaseTotal(action.getSearchPhaseNameOptional().get())); SearchDfsQueryThenFetchAsyncAction searchDfsQueryThenFetchAsyncAction = createSearchDfsQueryThenFetchAsyncAction( requestOperationListeners ); searchDfsQueryThenFetchAsyncAction.start(); - assertEquals(1, testListener.getPhaseCurrent(searchDfsQueryThenFetchAsyncAction.getSearchPhaseName())); + assertEquals(1, testListener.getPhaseCurrent(searchDfsQueryThenFetchAsyncAction.getSearchPhaseNameOptional().get())); searchDfsQueryThenFetchAsyncAction.onPhaseFailure(new SearchPhase("test") { @Override public void run() { } }, "message", null); - assertEquals(0, testListener.getPhaseCurrent(action.getSearchPhaseName())); - assertEquals(0, testListener.getPhaseTotal(action.getSearchPhaseName())); + assertEquals(0, testListener.getPhaseCurrent(action.getSearchPhaseNameOptional().get())); + assertEquals(0, testListener.getPhaseTotal(action.getSearchPhaseNameOptional().get())); FetchSearchPhase fetchPhase = createFetchSearchPhase(); ShardId shardId = new ShardId(randomAlphaOfLengthBetween(5, 10), randomAlphaOfLength(10), randomInt()); @@ -430,15 +430,15 @@ public void run() { action.skipShard(searchShardIterator); action.start(); action.executeNextPhase(action, fetchPhase); - assertEquals(1, testListener.getPhaseCurrent(fetchPhase.getSearchPhaseName())); + assertEquals(1, testListener.getPhaseCurrent(fetchPhase.getSearchPhaseNameOptional().get())); action.onPhaseFailure(new SearchPhase("test") { @Override public void run() { } }, "message", null); - assertEquals(0, testListener.getPhaseCurrent(fetchPhase.getSearchPhaseName())); - assertEquals(0, testListener.getPhaseTotal(fetchPhase.getSearchPhaseName())); + assertEquals(0, testListener.getPhaseCurrent(fetchPhase.getSearchPhaseNameOptional().get())); + assertEquals(0, testListener.getPhaseTotal(fetchPhase.getSearchPhaseNameOptional().get())); } public void testOnPhaseFailure() { @@ -722,7 +722,7 @@ public void testOnPhaseListenersWithQueryAndThenFetchType() throws InterruptedEx action.start(); // Verify queryPhase current metric - assertEquals(1, testListener.getPhaseCurrent(action.getSearchPhaseName())); + assertEquals(1, testListener.getPhaseCurrent(action.getSearchPhaseNameOptional().get())); TimeUnit.MILLISECONDS.sleep(delay); FetchSearchPhase fetchPhase = createFetchSearchPhase(); @@ -733,12 +733,12 @@ public void testOnPhaseListenersWithQueryAndThenFetchType() throws InterruptedEx action.executeNextPhase(action, fetchPhase); // Verify queryPhase total, current and latency metrics - assertEquals(0, testListener.getPhaseCurrent(action.getSearchPhaseName())); - assertThat(testListener.getPhaseMetric(action.getSearchPhaseName()), greaterThanOrEqualTo(delay)); - assertEquals(1, testListener.getPhaseTotal(action.getSearchPhaseName())); + assertEquals(0, testListener.getPhaseCurrent(action.getSearchPhaseNameOptional().get())); + assertThat(testListener.getPhaseMetric(action.getSearchPhaseNameOptional().get()), greaterThanOrEqualTo(delay)); + assertEquals(1, testListener.getPhaseTotal(action.getSearchPhaseNameOptional().get())); // Verify fetchPhase current metric - assertEquals(1, testListener.getPhaseCurrent(fetchPhase.getSearchPhaseName())); + assertEquals(1, testListener.getPhaseCurrent(fetchPhase.getSearchPhaseNameOptional().get())); TimeUnit.MILLISECONDS.sleep(delay); ExpandSearchPhase expandPhase = createExpandSearchPhase(); @@ -746,18 +746,18 @@ public void testOnPhaseListenersWithQueryAndThenFetchType() throws InterruptedEx TimeUnit.MILLISECONDS.sleep(delay); // Verify fetchPhase total, current and latency metrics - assertThat(testListener.getPhaseMetric(fetchPhase.getSearchPhaseName()), greaterThanOrEqualTo(delay)); - assertEquals(1, testListener.getPhaseTotal(fetchPhase.getSearchPhaseName())); - assertEquals(0, testListener.getPhaseCurrent(fetchPhase.getSearchPhaseName())); + assertThat(testListener.getPhaseMetric(fetchPhase.getSearchPhaseNameOptional().get()), greaterThanOrEqualTo(delay)); + assertEquals(1, testListener.getPhaseTotal(fetchPhase.getSearchPhaseNameOptional().get())); + assertEquals(0, testListener.getPhaseCurrent(fetchPhase.getSearchPhaseNameOptional().get())); - assertEquals(1, testListener.getPhaseCurrent(expandPhase.getSearchPhaseName())); + assertEquals(1, testListener.getPhaseCurrent(expandPhase.getSearchPhaseNameOptional().get())); action.executeNextPhase(expandPhase, fetchPhase); action.onPhaseDone(); /* finish phase since we don't have reponse being sent */ - assertThat(testListener.getPhaseMetric(expandPhase.getSearchPhaseName()), greaterThanOrEqualTo(delay)); - assertEquals(1, testListener.getPhaseTotal(expandPhase.getSearchPhaseName())); - assertEquals(0, testListener.getPhaseCurrent(expandPhase.getSearchPhaseName())); + assertThat(testListener.getPhaseMetric(expandPhase.getSearchPhaseNameOptional().get()), greaterThanOrEqualTo(delay)); + assertEquals(1, testListener.getPhaseTotal(expandPhase.getSearchPhaseNameOptional().get())); + assertEquals(0, testListener.getPhaseCurrent(expandPhase.getSearchPhaseNameOptional().get())); } public void testOnPhaseListenersWithDfsType() throws InterruptedException { @@ -772,7 +772,7 @@ public void testOnPhaseListenersWithDfsType() throws InterruptedException { FetchSearchPhase fetchPhase = createFetchSearchPhase(); searchDfsQueryThenFetchAsyncAction.start(); - assertEquals(1, testListener.getPhaseCurrent(searchDfsQueryThenFetchAsyncAction.getSearchPhaseName())); + assertEquals(1, testListener.getPhaseCurrent(searchDfsQueryThenFetchAsyncAction.getSearchPhaseNameOptional().get())); TimeUnit.MILLISECONDS.sleep(delay); ShardId shardId = new ShardId(randomAlphaOfLengthBetween(5, 10), randomAlphaOfLength(10), randomInt()); SearchShardIterator searchShardIterator = new SearchShardIterator(null, shardId, Collections.emptyList(), OriginalIndices.NONE); @@ -786,9 +786,12 @@ public void testOnPhaseListenersWithDfsType() throws InterruptedException { null ); /* finalizing the fetch phase since we do adhoc phase lifecycle calls */ - assertThat(testListener.getPhaseMetric(searchDfsQueryThenFetchAsyncAction.getSearchPhaseName()), greaterThanOrEqualTo(delay)); - assertEquals(1, testListener.getPhaseTotal(searchDfsQueryThenFetchAsyncAction.getSearchPhaseName())); - assertEquals(0, testListener.getPhaseCurrent(searchDfsQueryThenFetchAsyncAction.getSearchPhaseName())); + assertThat( + testListener.getPhaseMetric(searchDfsQueryThenFetchAsyncAction.getSearchPhaseNameOptional().get()), + greaterThanOrEqualTo(delay) + ); + assertEquals(1, testListener.getPhaseTotal(searchDfsQueryThenFetchAsyncAction.getSearchPhaseNameOptional().get())); + assertEquals(0, testListener.getPhaseCurrent(searchDfsQueryThenFetchAsyncAction.getSearchPhaseNameOptional().get())); } private SearchDfsQueryThenFetchAsyncAction createSearchDfsQueryThenFetchAsyncAction( diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerTests.java index 990ed95f1aebc..29561e938bf6c 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerTests.java @@ -14,6 +14,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -30,18 +31,18 @@ public void testListenersAreExecuted() { @Override public void onPhaseStart(SearchPhaseContext context) { - searchPhaseMap.get(context.getCurrentPhase().getSearchPhaseName()).current.inc(); + searchPhaseMap.get(context.getCurrentPhase().getSearchPhaseNameOptional().get()).current.inc(); } @Override public void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { - searchPhaseMap.get(context.getCurrentPhase().getSearchPhaseName()).current.dec(); - searchPhaseMap.get(context.getCurrentPhase().getSearchPhaseName()).total.inc(); + searchPhaseMap.get(context.getCurrentPhase().getSearchPhaseNameOptional().get()).current.dec(); + searchPhaseMap.get(context.getCurrentPhase().getSearchPhaseNameOptional().get()).total.inc(); } @Override public void onPhaseFailure(SearchPhaseContext context, Throwable cause) { - searchPhaseMap.get(context.getCurrentPhase().getSearchPhaseName()).current.dec(); + searchPhaseMap.get(context.getCurrentPhase().getSearchPhaseNameOptional().get()).current.dec(); } }; @@ -61,7 +62,7 @@ public void onPhaseFailure(SearchPhaseContext context, Throwable cause) { for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { when(ctx.getCurrentPhase()).thenReturn(searchPhase); - when(searchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + when(searchPhase.getSearchPhaseNameOptional()).thenReturn(Optional.of(searchPhaseName)); compositeListener.onPhaseStart(ctx); assertEquals(totalListeners, searchPhaseMap.get(searchPhaseName).current.count()); } diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestStatsTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestStatsTests.java index 3bad3ec3e7d21..7c2a3435afd6d 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchRequestStatsTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestStatsTests.java @@ -16,6 +16,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Phaser; import java.util.concurrent.TimeUnit; @@ -68,7 +69,7 @@ public void testSearchRequestPhaseFailure() { when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { - when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + when(mockSearchPhase.getSearchPhaseNameOptional()).thenReturn(Optional.of(searchPhaseName)); testRequestStats.onPhaseStart(ctx); assertEquals(1, testRequestStats.getPhaseCurrent(searchPhaseName)); testRequestStats.onPhaseFailure(ctx, new Throwable()); @@ -85,7 +86,7 @@ public void testSearchRequestStats() { when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { - when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + when(mockSearchPhase.getSearchPhaseNameOptional()).thenReturn(Optional.of(searchPhaseName)); long tookTimeInMillis = randomIntBetween(1, 10); testRequestStats.onPhaseStart(ctx); long startTime = System.nanoTime() - TimeUnit.MILLISECONDS.toNanos(tookTimeInMillis); @@ -116,7 +117,7 @@ public void testSearchRequestStatsOnPhaseStartConcurrently() throws InterruptedE SearchPhaseContext ctx = mock(SearchPhaseContext.class); SearchPhase mockSearchPhase = mock(SearchPhase.class); when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); - when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + when(mockSearchPhase.getSearchPhaseNameOptional()).thenReturn(Optional.of(searchPhaseName)); for (int i = 0; i < numTasks; i++) { threads[i] = new Thread(() -> { phaser.arriveAndAwaitAdvance(); @@ -145,7 +146,7 @@ public void testSearchRequestStatsOnPhaseEndConcurrently() throws InterruptedExc SearchPhaseContext ctx = mock(SearchPhaseContext.class); SearchPhase mockSearchPhase = mock(SearchPhase.class); when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); - when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + when(mockSearchPhase.getSearchPhaseNameOptional()).thenReturn(Optional.of(searchPhaseName)); long tookTimeInMillis = randomIntBetween(1, 10); long startTime = System.nanoTime() - TimeUnit.MILLISECONDS.toNanos(tookTimeInMillis); when(mockSearchPhase.getStartTimeInNanos()).thenReturn(startTime); @@ -188,7 +189,7 @@ public void testSearchRequestStatsOnPhaseFailureConcurrently() throws Interrupte SearchPhaseContext ctx = mock(SearchPhaseContext.class); SearchPhase mockSearchPhase = mock(SearchPhase.class); when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); - when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + when(mockSearchPhase.getSearchPhaseNameOptional()).thenReturn(Optional.of(searchPhaseName)); for (int i = 0; i < numTasks; i++) { threads[i] = new Thread(() -> { phaser.arriveAndAwaitAdvance(); @@ -205,4 +206,51 @@ public void testSearchRequestStatsOnPhaseFailureConcurrently() throws Interrupte assertEquals(0, testRequestStats.getPhaseCurrent(searchPhaseName)); } } + + public void testUnrecognizedPhaseNamesAreIgnored() { + // Unrecognized phase names producing an empty optional should not throw any error and no stats should be incremented. + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + SearchRequestStats testRequestStats = new SearchRequestStats(clusterSettings); + SearchPhaseContext ctx = mock(SearchPhaseContext.class); + SearchPhase mockSearchPhase = mock(SearchPhase.class); + when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); + + when(mockSearchPhase.getSearchPhaseNameOptional()).thenReturn(Optional.empty()); + testRequestStats.onPhaseStart(ctx); + int minTimeNanos = 10; + long startTime = System.nanoTime() - TimeUnit.MILLISECONDS.toNanos(minTimeNanos); + when(mockSearchPhase.getStartTimeInNanos()).thenReturn(startTime); + + for (SearchPhaseName name : SearchPhaseName.values()) { + assertEquals(0, testRequestStats.getPhaseCurrent(name)); + } + + testRequestStats.onPhaseEnd( + ctx, + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + new SearchRequest(), + () -> null + ) + ); + + for (SearchPhaseName name : SearchPhaseName.values()) { + assertEquals(0, testRequestStats.getPhaseCurrent(name)); + assertEquals(0, testRequestStats.getPhaseTotal(name)); + assertEquals(0, testRequestStats.getPhaseMetric(name)); + } + } + + public void testUnrecognizedSearchPhaseReturnsEmptyOptional() { + // Test search phases with unrecognized names return Optional.empty() when getSearchPhaseNameOptional() is called. + // These may exist, for example, "create_pit". + String unrecognizedName = "unrecognized_name"; + SearchPhase dummyPhase = new SearchPhase(unrecognizedName) { + @Override + public void run() {} + }; + + assertEquals(unrecognizedName, dummyPhase.getName()); + assertEquals(Optional.empty(), dummyPhase.getSearchPhaseNameOptional()); + } } diff --git a/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java b/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java index 594700ea60b3e..65e8997d75403 100644 --- a/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java +++ b/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java @@ -44,6 +44,7 @@ import java.util.HashMap; import java.util.Map; +import java.util.Optional; import java.util.concurrent.TimeUnit; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -86,7 +87,7 @@ public void testShardLevelSearchGroupStats() throws Exception { SearchPhase mockSearchPhase = mock(SearchPhase.class); when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); when(mockSearchPhase.getStartTimeInNanos()).thenReturn(System.nanoTime() - TimeUnit.SECONDS.toNanos(paramValue)); - when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + when(mockSearchPhase.getSearchPhaseNameOptional()).thenReturn(Optional.of(searchPhaseName)); for (int iterator = 0; iterator < paramValue; iterator++) { onPhaseStart(testRequestStats, ctx); onPhaseEnd(testRequestStats, ctx); From 91a93dacb84eae4f09decbabe54771585d42b570 Mon Sep 17 00:00:00 2001 From: Rampreeth Ethiraj Date: Tue, 18 Feb 2025 00:07:48 +0530 Subject: [PATCH 008/550] Fix Segment Replication stats throwing NPE (#14580) Signed-off-by: Rampreeth Ethiraj --- .../java/org/opensearch/index/seqno/ReplicationTracker.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java index c0bb52b6b43bc..76ef45158e3d5 100644 --- a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java @@ -1254,8 +1254,9 @@ public ReplicationCheckpoint getLatestReplicationCheckpoint() { // skip any shard that is a relocating primary or search only replica (not tracked by primary) private boolean shouldSkipReplicationTimer(String allocationId) { - Optional shardRouting = routingTable.shards() + Optional shardRouting = routingTable.assignedShards() .stream() + .filter(routing -> Objects.nonNull(routing.allocationId())) .filter(routing -> routing.allocationId().getId().equals(allocationId)) .findAny(); return shardRouting.isPresent() && (shardRouting.get().primary() || shardRouting.get().isSearchOnly()); From e62bf1a6b5e87cf6d138ddeecfca255fe0c4aa07 Mon Sep 17 00:00:00 2001 From: "Samuel.G" <1148690954@qq.com> Date: Wed, 19 Feb 2025 02:23:05 +0900 Subject: [PATCH 009/550] Wildcard field use only 3-gram to index (#17349) * support 3gram wildcard Signed-off-by: gesong.samuel * add changelog-3 Signed-off-by: gesong.samuel * add rolling upgrade test for wildcard field Signed-off-by: gesong.samuel * remove test case added in #16827 Signed-off-by: gesong.samuel --------- Signed-off-by: gesong.samuel Co-authored-by: gesong.samuel --- CHANGELOG-3.0.md | 1 + .../test/mixed_cluster/40_wildcard.yml | 200 +++++++++++++++ .../test/old_cluster/40_wildcard.yml | 235 ++++++++++++++++++ .../test/upgraded_cluster/40_wildcard.yml | 200 +++++++++++++++ .../index/mapper/WildcardFieldMapper.java | 134 ++++------ .../mapper/WildcardFieldMapperTests.java | 37 +-- .../index/mapper/WildcardFieldTypeTests.java | 38 +-- 7 files changed, 715 insertions(+), 130 deletions(-) create mode 100644 qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_wildcard.yml create mode 100644 qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_wildcard.yml create mode 100644 qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_wildcard.yml diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index c5f9611910fa9..bc5e63dbdf8ce 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -37,6 +37,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Stop minimizing automata used for case-insensitive matches ([#17268](https://github.com/opensearch-project/OpenSearch/pull/17268)) - Refactor the `:server` module `org.opensearch.client` to `org.opensearch.transport.client` to eliminate top level split packages for JPMS support ([#17272](https://github.com/opensearch-project/OpenSearch/pull/17272)) - Use Lucene `BM25Similarity` as default since the `LegacyBM25Similarity` is marked as deprecated ([#17306](https://github.com/opensearch-project/OpenSearch/pull/17306)) +- Wildcard field index only 3gram of the input data [#17349](https://github.com/opensearch-project/OpenSearch/pull/17349) ### Deprecated diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_wildcard.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_wildcard.yml new file mode 100644 index 0000000000000..e06854af7e924 --- /dev/null +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_wildcard.yml @@ -0,0 +1,200 @@ +# refactored from rest-api-spec/src/main/resources/rest-api-spec/test/search/270_wildcard_fieldtype_queries.yml +--- +"search on mixed state": + # "term query matches exact value" + - do: + search: + index: test + body: + query: + term: + my_field: "AbCd" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "5" } + + - do: + search: + index: test + body: + query: + term: + my_field.doc_values: "AbCd" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "5" } + + # term query matches lowercase-normalized value + - do: + search: + index: test + body: + query: + term: + my_field.lower: "abcd" + - match: { hits.total.value: 2 } + - match: { hits.hits.0._id: "5" } + - match: { hits.hits.1._id: "7" } + + - do: + search: + index: test + body: + query: + term: + my_field.lower: "ABCD" + - match: { hits.total.value: 2 } + - match: { hits.hits.0._id: "5" } + - match: { hits.hits.1._id: "7" } + + - do: + search: + index: test + body: + query: + term: + my_field: "abcd" + - match: { hits.total.value: 0 } + + # wildcard query matches + - do: + search: + index: test + body: + query: + wildcard: + my_field: + value: "*Node*Exception*" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "1" } + + # wildcard query matches lowercase-normalized field + - do: + search: + index: test + body: + query: + wildcard: + my_field.lower: + value: "*node*exception*" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "1" } + + - do: + search: + index: test + body: + query: + wildcard: + my_field.lower: + value: "*NODE*EXCEPTION*" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "1" } + + - do: + search: + index: test + body: + query: + wildcard: + my_field: + value: "*node*exception*" + - match: { hits.total.value: 0 } + + # prefix query matches + - do: + search: + index: test + body: + query: + prefix: + my_field: + value: "[2024-06-08T" + - match: { hits.total.value: 3 } + + # regexp query matches + - do: + search: + index: test + body: + query: + regexp: + my_field: + value: ".*06-08.*cluster-manager node.*" + - match: { hits.total.value: 2 } + + # regexp query matches lowercase-normalized field + - do: + search: + index: test + body: + query: + regexp: + my_field.lower: + value: ".*06-08.*Cluster-Manager Node.*" + - match: { hits.total.value: 2 } + + - do: + search: + index: test + body: + query: + regexp: + my_field: + value: ".*06-08.*Cluster-Manager Node.*" + - match: { hits.total.value: 0 } + + # wildcard match-all works + - do: + search: + index: test + body: + query: + wildcard: + my_field: + value: "*" + - match: { hits.total.value: 6 } + + # regexp match-all works + - do: + search: + index: test + body: + query: + regexp: + my_field: + value: ".*" + - match: { hits.total.value: 6 } + + # terms query on wildcard field matches + - do: + search: + index: test + body: + query: + terms: { my_field: [ "AbCd" ] } + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "5" } + + # case insensitive query on wildcard field + - do: + search: + index: test + body: + query: + wildcard: + my_field: + value: "AbCd" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "5" } + + - do: + search: + index: test + body: + query: + wildcard: + my_field: + value: "AbCd" + case_insensitive: true + - match: { hits.total.value: 2 } + - match: { hits.hits.0._id: "5" } + - match: { hits.hits.1._id: "7" } diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_wildcard.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_wildcard.yml new file mode 100644 index 0000000000000..b19882c69ddd7 --- /dev/null +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_wildcard.yml @@ -0,0 +1,235 @@ +# refactored from rest-api-spec/src/main/resources/rest-api-spec/test/search/270_wildcard_fieldtype_queries.yml +--- +"Create index with Wildcard field": + - do: + indices.create: + index: test + body: + mappings: + properties: + my_field: + type: wildcard + fields: + lower: + type: wildcard + normalizer: lowercase + doc_values: + type: wildcard + doc_values: true + + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "test", "_id":1}}' + - '{"my_field": "org.opensearch.transport.NodeDisconnectedException: [node_s0][127.0.0.1:39953][disconnected] disconnected"}' + - '{"index": {"_index": "test", "_id":2}}' + - '{"my_field": "[2024-06-08T06:31:37,443][INFO ][o.o.c.c.Coordinator ] [node_s2] cluster-manager node [{node_s0}{Nj7FjR7hRP2lh_zur8KN_g}{OTGOoWmmSsWP_RQ3tIKJ9g}{127.0.0.1}{127.0.0.1:39953}{imr}{shard_indexing_pressure_enabled=true}] failed, restarting discovery"}' + - '{"index": {"_index": "test", "_id":3}}' + - '{"my_field": "[2024-06-08T06:31:37,451][INFO ][o.o.c.s.ClusterApplierService] [node_s2] cluster-manager node changed {previous [{node_s0}{Nj7FjR7hRP2lh_zur8KN_g}{OTGOoWmmSsWP_RQ3tIKJ9g}{127.0.0.1}{127.0.0.1:39953}{imr}{shard_indexing_pressure_enabled=true}], current []}, term: 1, version: 24, reason: becoming candidate: onLeaderFailure"}' + - '{"index": {"_index": "test", "_id":4}}' + - '{"my_field": "[2024-06-08T06:31:37,452][WARN ][o.o.c.NodeConnectionsService] [node_s1] failed to connect to {node_s0}{Nj7FjR7hRP2lh_zur8KN_g}{OTGOoWmmSsWP_RQ3tIKJ9g}{127.0.0.1}{127.0.0.1:39953}{imr}{shard_indexing_pressure_enabled=true} (tried [1] times)"}' + - '{"index": {"_index": "test", "_id":5}}' + - '{"my_field": "AbCd"}' + - '{"index": {"_index": "test", "_id":6}}' + - '{"other_field": "test"}' + - '{"index": {"_index": "test", "_id":7}}' + - '{"my_field": "ABCD"}' + + # "term query matches exact value" + - do: + search: + index: test + body: + query: + term: + my_field: "AbCd" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "5" } + + - do: + search: + index: test + body: + query: + term: + my_field.doc_values: "AbCd" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "5" } + + # term query matches lowercase-normalized value + - do: + search: + index: test + body: + query: + term: + my_field.lower: "abcd" + - match: { hits.total.value: 2 } + - match: { hits.hits.0._id: "5" } + - match: { hits.hits.1._id: "7" } + + - do: + search: + index: test + body: + query: + term: + my_field.lower: "ABCD" + - match: { hits.total.value: 2 } + - match: { hits.hits.0._id: "5" } + - match: { hits.hits.1._id: "7" } + + - do: + search: + index: test + body: + query: + term: + my_field: "abcd" + - match: { hits.total.value: 0 } + + # wildcard query matches + - do: + search: + index: test + body: + query: + wildcard: + my_field: + value: "*Node*Exception*" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "1" } + + # wildcard query matches lowercase-normalized field + - do: + search: + index: test + body: + query: + wildcard: + my_field.lower: + value: "*node*exception*" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "1" } + + - do: + search: + index: test + body: + query: + wildcard: + my_field.lower: + value: "*NODE*EXCEPTION*" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "1" } + + - do: + search: + index: test + body: + query: + wildcard: + my_field: + value: "*node*exception*" + - match: { hits.total.value: 0 } + + # prefix query matches + - do: + search: + index: test + body: + query: + prefix: + my_field: + value: "[2024-06-08T" + - match: { hits.total.value: 3 } + + # regexp query matches + - do: + search: + index: test + body: + query: + regexp: + my_field: + value: ".*06-08.*cluster-manager node.*" + - match: { hits.total.value: 2 } + + # regexp query matches lowercase-normalized field + - do: + search: + index: test + body: + query: + regexp: + my_field.lower: + value: ".*06-08.*Cluster-Manager Node.*" + - match: { hits.total.value: 2 } + + - do: + search: + index: test + body: + query: + regexp: + my_field: + value: ".*06-08.*Cluster-Manager Node.*" + - match: { hits.total.value: 0 } + + # wildcard match-all works + - do: + search: + index: test + body: + query: + wildcard: + my_field: + value: "*" + - match: { hits.total.value: 6 } + + # regexp match-all works + - do: + search: + index: test + body: + query: + regexp: + my_field: + value: ".*" + - match: { hits.total.value: 6 } + + # terms query on wildcard field matches + - do: + search: + index: test + body: + query: + terms: { my_field: [ "AbCd" ] } + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "5" } + + # case insensitive query on wildcard field + - do: + search: + index: test + body: + query: + wildcard: + my_field: + value: "AbCd" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "5" } + + - do: + search: + index: test + body: + query: + wildcard: + my_field: + value: "AbCd" + case_insensitive: true + - match: { hits.total.value: 2 } + - match: { hits.hits.0._id: "5" } + - match: { hits.hits.1._id: "7" } diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_wildcard.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_wildcard.yml new file mode 100644 index 0000000000000..29518931a5b8b --- /dev/null +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_wildcard.yml @@ -0,0 +1,200 @@ +# refactored from rest-api-spec/src/main/resources/rest-api-spec/test/search/270_wildcard_fieldtype_queries.yml +--- +"search after upgrade": + # "term query matches exact value" + - do: + search: + index: test + body: + query: + term: + my_field: "AbCd" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "5" } + + - do: + search: + index: test + body: + query: + term: + my_field.doc_values: "AbCd" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "5" } + + # term query matches lowercase-normalized value + - do: + search: + index: test + body: + query: + term: + my_field.lower: "abcd" + - match: { hits.total.value: 2 } + - match: { hits.hits.0._id: "5" } + - match: { hits.hits.1._id: "7" } + + - do: + search: + index: test + body: + query: + term: + my_field.lower: "ABCD" + - match: { hits.total.value: 2 } + - match: { hits.hits.0._id: "5" } + - match: { hits.hits.1._id: "7" } + + - do: + search: + index: test + body: + query: + term: + my_field: "abcd" + - match: { hits.total.value: 0 } + + # wildcard query matches + - do: + search: + index: test + body: + query: + wildcard: + my_field: + value: "*Node*Exception*" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "1" } + + # wildcard query matches lowercase-normalized field + - do: + search: + index: test + body: + query: + wildcard: + my_field.lower: + value: "*node*exception*" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "1" } + + - do: + search: + index: test + body: + query: + wildcard: + my_field.lower: + value: "*NODE*EXCEPTION*" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "1" } + + - do: + search: + index: test + body: + query: + wildcard: + my_field: + value: "*node*exception*" + - match: { hits.total.value: 0 } + + # prefix query matches + - do: + search: + index: test + body: + query: + prefix: + my_field: + value: "[2024-06-08T" + - match: { hits.total.value: 3 } + + # regexp query matches + - do: + search: + index: test + body: + query: + regexp: + my_field: + value: ".*06-08.*cluster-manager node.*" + - match: { hits.total.value: 2 } + + # regexp query matches lowercase-normalized field + - do: + search: + index: test + body: + query: + regexp: + my_field.lower: + value: ".*06-08.*Cluster-Manager Node.*" + - match: { hits.total.value: 2 } + + - do: + search: + index: test + body: + query: + regexp: + my_field: + value: ".*06-08.*Cluster-Manager Node.*" + - match: { hits.total.value: 0 } + + # wildcard match-all works + - do: + search: + index: test + body: + query: + wildcard: + my_field: + value: "*" + - match: { hits.total.value: 6 } + + # regexp match-all works + - do: + search: + index: test + body: + query: + regexp: + my_field: + value: ".*" + - match: { hits.total.value: 6 } + + # terms query on wildcard field matches + - do: + search: + index: test + body: + query: + terms: { my_field: [ "AbCd" ] } + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "5" } + + # case insensitive query on wildcard field + - do: + search: + index: test + body: + query: + wildcard: + my_field: + value: "AbCd" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "5" } + + - do: + search: + index: test + body: + query: + wildcard: + my_field: + value: "AbCd" + case_insensitive: true + - match: { hits.total.value: 2 } + - match: { hits.hits.0._id: "5" } + - match: { hits.hits.1._id: "7" } diff --git a/server/src/main/java/org/opensearch/index/mapper/WildcardFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/WildcardFieldMapper.java index 07dbe695bbbbb..20c5ce87ad1c7 100644 --- a/server/src/main/java/org/opensearch/index/mapper/WildcardFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/WildcardFieldMapper.java @@ -159,6 +159,7 @@ public WildcardFieldMapper build(BuilderContext context) { } + public static final int NGRAM_SIZE = 3; public static final String CONTENT_TYPE = "wildcard"; public static final TypeParser PARSER = new TypeParser((n, c) -> new WildcardFieldMapper.Builder(n, c.getIndexAnalyzers())); @@ -230,97 +231,49 @@ protected void parseCreateField(ParseContext context) throws IOException { /** * Tokenizer to emit tokens to support wildcard first-phase matching. *

- * Will emit all substrings of length 1,2, and 3, with 0-valued anchors for the prefix/suffix. + * Will emit all substrings of only 3, with 0-valued anchors for the prefix/suffix. *

* For example, given the string "lucene", output the following terms: *

- * [0, 'l'] + * [0, 0, 'l'] * [0, 'l', 'u'] - * ['l'] - * ['l', 'u'] * ['l', 'u', 'c'] - * ['u'] - * ['u','c'] * ['u','c','e'] - * ['c'] - * ['c', 'e'] * ['c', 'e', 'n'] - * ['e'] - * ['e', 'n'] * ['e', 'n', 'e'] - * ['n'] - * ['n', 'e'] * ['n', 'e', 0] - * ['e'] - * ['e', 0] + * ['e', 0, 0] *

* Visible for testing. */ static final class WildcardFieldTokenizer extends Tokenizer { private final CharTermAttribute charTermAttribute = addAttribute(CharTermAttribute.class); - private final char[] buffer = new char[3]; // Ring buffer for up to 3 chars - private int offset = 0; // Position in the buffer - private int length = 2; // First token is anchor + first char + private final char[] buffer = new char[NGRAM_SIZE]; // Ring buffer for up to 3 chars + private int offset = NGRAM_SIZE - 1; // next position in buffer to store next input char @Override public void reset() throws IOException { super.reset(); - buffer[0] = 0; - int firstChar = input.read(); - if (firstChar != -1) { - buffer[1] = (char) firstChar; - int secondChar = input.read(); - if (secondChar != -1) { - buffer[2] = (char) secondChar; - } else { - buffer[2] = 0; - } - } else { - buffer[1] = 0; + for (int i = 0; i < NGRAM_SIZE - 1; i++) { + buffer[i] = 0; } - } @Override public boolean incrementToken() throws IOException { - charTermAttribute.setLength(length); - int numZeroes = 0; - for (int i = 0; i < length; i++) { - char curChar = buffer[(i + offset) % 3]; - if (curChar == 0) { - numZeroes++; - } - charTermAttribute.buffer()[i] = buffer[(i + offset) % 3]; - } - if (numZeroes == 2) { - // Two zeroes usually means we're done. - if (length == 3 && charTermAttribute.buffer()[1] != 0) { - // The only case where we're not done is if the input has exactly 1 character, so the buffer - // contains 0, char, 0. In that case, we return char now, then return char, 0 on the next iteration - charTermAttribute.buffer()[0] = charTermAttribute.buffer()[1]; - charTermAttribute.buffer()[1] = 0; - charTermAttribute.setLength(1); - length = 2; - offset = 1; - return true; - } - return false; - } - if (length == 3) { - // Read the next character, overwriting the current offset - int nextChar = input.read(); - if (nextChar != -1) { - buffer[offset] = (char) nextChar; - } else { - // End of input. Pad with extra 0 to trigger the logic above. - buffer[offset] = 0; - } - offset = (offset + 1) % 3; - length = 1; - } else { - length = length + 1; + charTermAttribute.setLength(NGRAM_SIZE); + int c = input.read(); + c = c == -1 ? 0 : c; + + buffer[offset++ % NGRAM_SIZE] = (char) c; + boolean has_next = false; + for (int i = 0; i < NGRAM_SIZE; i++) { + char curChar = buffer[(offset + i) % NGRAM_SIZE]; + charTermAttribute.buffer()[i] = curChar; + has_next |= curChar != 0; } - return true; + + return has_next; } } @@ -479,8 +432,8 @@ public Query wildcardQuery(String value, MultiTermQuery.RewriteMethod method, bo Query approximation; if (requiredNGrams.isEmpty()) { // This only happens when all characters are wildcard characters (* or ?), - // or it's the empty string. - if (value.length() == 0 || value.contains("?")) { + // or it's only contains sequential characters less than NGRAM_SIZE (which defaults to 3). + if (findNonWildcardSequence(value, 0) != value.length() || value.length() == 0 || value.contains("?")) { approximation = this.existsQuery(context); } else { return existsQuery(context); @@ -502,15 +455,20 @@ static Set getRequiredNGrams(String value, boolean regexpMode) { int pos = 0; String rawSequence = null; String currentSequence = null; + char[] buffer = new char[NGRAM_SIZE]; if (!value.startsWith("?") && !value.startsWith("*")) { // Can add prefix term rawSequence = getNonWildcardSequence(value, 0); currentSequence = performEscape(rawSequence, regexpMode); - if (currentSequence.length() == 1) { - terms.add(new String(new char[] { 0, currentSequence.charAt(0) })); - } else { - terms.add(new String(new char[] { 0, currentSequence.charAt(0), currentSequence.charAt(1) })); + + // buffer[0] is automatically set to 0 + Arrays.fill(buffer, (char) 0); + int startIdx = Math.max(NGRAM_SIZE - currentSequence.length(), 1); + for (int j = 0; j < currentSequence.length() && j < NGRAM_SIZE - 1; j++) { + buffer[startIdx + j] = currentSequence.charAt(j); } + + terms.add(new String(buffer)); } else { pos = findNonWildcardSequence(value, pos); rawSequence = getNonWildcardSequence(value, pos); @@ -518,23 +476,27 @@ static Set getRequiredNGrams(String value, boolean regexpMode) { while (pos < value.length()) { boolean isEndOfValue = pos + rawSequence.length() == value.length(); currentSequence = performEscape(rawSequence, regexpMode); - if (!currentSequence.isEmpty() && currentSequence.length() < 3 && !isEndOfValue && pos > 0) { - // If this is a prefix or suffix of length < 3, then we already have a longer token including the anchor. - terms.add(currentSequence); - } else { - for (int i = 0; i < currentSequence.length() - 2; i++) { - terms.add(currentSequence.substring(i, i + 3)); - } + + for (int i = 0; i < currentSequence.length() - NGRAM_SIZE + 1; i++) { + terms.add(currentSequence.substring(i, i + 3)); } if (isEndOfValue) { // This is the end of the input. We can attach a suffix anchor. - if (currentSequence.length() == 1) { - terms.add(new String(new char[] { currentSequence.charAt(0), 0 })); - } else { - char a = currentSequence.charAt(currentSequence.length() - 2); - char b = currentSequence.charAt(currentSequence.length() - 1); - terms.add(new String(new char[] { a, b, 0 })); + // special case when we should generate '0xxxxxxx0', where we have (NGRAM_SIZE - 2) * x + Arrays.fill(buffer, (char) 0); + if (pos == 0 && currentSequence.length() == NGRAM_SIZE - 2) { + for (int i = 0; i < currentSequence.length(); i++) { + buffer[i + 1] = currentSequence.charAt(i); + } + terms.add(new String(buffer)); + Arrays.fill(buffer, (char) 0); + } + int rightStartIdx = NGRAM_SIZE - currentSequence.length() - 2; + rightStartIdx = rightStartIdx < 0 ? NGRAM_SIZE - 2 : rightStartIdx; + for (int j = 0; j < currentSequence.length() && j < NGRAM_SIZE - 1; j++) { + buffer[rightStartIdx - j] = currentSequence.charAt(currentSequence.length() - j - 1); } + terms.add(new String(buffer)); } pos = findNonWildcardSequence(value, pos + rawSequence.length()); rawSequence = getNonWildcardSequence(value, pos); diff --git a/server/src/test/java/org/opensearch/index/mapper/WildcardFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/WildcardFieldMapperTests.java index b19e3687cf944..25aacb41f029d 100644 --- a/server/src/test/java/org/opensearch/index/mapper/WildcardFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/WildcardFieldMapperTests.java @@ -82,22 +82,11 @@ public void testTokenizer() throws IOException { List.of( WildcardFieldTypeTests.prefixAnchored("p"), WildcardFieldTypeTests.prefixAnchored("pi"), - "p", - "pi", "pic", - "i", - "ic", "ick", - "c", - "ck", "ckl", - "k", - "kl", "kle", - "l", - "le", WildcardFieldTypeTests.suffixAnchored("le"), - "e", WildcardFieldTypeTests.suffixAnchored("e") ), terms @@ -111,7 +100,14 @@ public void testTokenizer() throws IOException { terms.add(charTermAttribute.toString()); } } - assertEquals(List.of(WildcardFieldTypeTests.prefixAnchored("a"), "a", WildcardFieldTypeTests.suffixAnchored("a")), terms); + assertEquals( + List.of( + WildcardFieldTypeTests.prefixAnchored("a"), + WildcardFieldTypeTests.suffixAnchored((char) 0 + "a"), + WildcardFieldTypeTests.suffixAnchored("a") + ), + terms + ); } public void testEnableDocValues() throws IOException { @@ -188,13 +184,8 @@ public void testNormalizer() throws IOException { List.of( WildcardFieldTypeTests.prefixAnchored("a"), WildcardFieldTypeTests.prefixAnchored("ab"), - "a", - "ab", "abc", - "b", - "bc", WildcardFieldTypeTests.suffixAnchored("bc"), - "c", WildcardFieldTypeTests.suffixAnchored("c") ), terms @@ -242,13 +233,8 @@ public void testNullValue() throws IOException { List.of( WildcardFieldTypeTests.prefixAnchored("u"), WildcardFieldTypeTests.prefixAnchored("ur"), - "u", - "ur", "uri", - "r", - "ri", WildcardFieldTypeTests.suffixAnchored("ri"), - "i", WildcardFieldTypeTests.suffixAnchored("i") ), terms @@ -281,16 +267,9 @@ public void testDefaults() throws Exception { List.of( WildcardFieldTypeTests.prefixAnchored("1"), WildcardFieldTypeTests.prefixAnchored("12"), - "1", - "12", "123", - "2", - "23", "234", - "3", - "34", WildcardFieldTypeTests.suffixAnchored("34"), - "4", WildcardFieldTypeTests.suffixAnchored("4") ), terms diff --git a/server/src/test/java/org/opensearch/index/mapper/WildcardFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/WildcardFieldTypeTests.java index 1a813495e9033..851e791660d82 100644 --- a/server/src/test/java/org/opensearch/index/mapper/WildcardFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/WildcardFieldTypeTests.java @@ -20,11 +20,19 @@ public class WildcardFieldTypeTests extends FieldTypeTestCase { static String prefixAnchored(String val) { - return (char) 0 + val; + String ret = (char) 0 + val; + if (ret.length() < WildcardFieldMapper.NGRAM_SIZE) { + ret = prefixAnchored(ret); + } + return ret; } static String suffixAnchored(String val) { - return val + (char) 0; + String ret = val + (char) 0; + if (ret.length() < WildcardFieldMapper.NGRAM_SIZE) { + ret = suffixAnchored(ret); + } + return ret; } public void testTermQuery() { @@ -104,13 +112,14 @@ public void testEscapedWildcardQuery() { ft.wildcardQuery("\\**\\*", null, null) ); - assertEquals(new WildcardFieldMapper.WildcardMatchingQuery("field", builder.build(), "\\*"), ft.wildcardQuery("\\*", null, null)); - - expectedTerms.remove(suffixAnchored("*")); + expectedTerms.add(prefixAnchored("*" + (char) 0)); builder = new BooleanQuery.Builder(); for (String term : expectedTerms) { builder.add(new TermQuery(new Term("field", term)), BooleanClause.Occur.FILTER); } + assertEquals(new WildcardFieldMapper.WildcardMatchingQuery("field", builder.build(), "\\*"), ft.wildcardQuery("\\*", null, null)); + builder = new BooleanQuery.Builder(); + builder.add(new TermQuery(new Term("field", prefixAnchored("*"))), BooleanClause.Occur.FILTER); assertEquals(new WildcardFieldMapper.WildcardMatchingQuery("field", builder.build(), "\\**"), ft.wildcardQuery("\\**", null, null)); } @@ -119,7 +128,6 @@ public void testMultipleWildcardsInQuery() { MappedFieldType ft = new WildcardFieldMapper.WildcardFieldType("field"); Set expectedTerms = new HashSet<>(); expectedTerms.add(prefixAnchored("a")); - expectedTerms.add("cd"); expectedTerms.add("efg"); expectedTerms.add(suffixAnchored("h")); BooleanQuery.Builder builder = new BooleanQuery.Builder(); @@ -153,27 +161,27 @@ public void testRegexpQuery() { assertTrue(actualMatchingQuery.getSecondPhaseMatcher().test("foo_apple_foo")); assertFalse(actualMatchingQuery.getSecondPhaseMatcher().test("foo_apply_foo")); - pattern = "ab(zz|cd|ef.*)(hi|jk)"; + pattern = "abc(zzz|def|ghi.*)(jkl|mno)"; builder = new BooleanQuery.Builder(); - builder.add(new TermQuery(new Term("field", "ab")), BooleanClause.Occur.FILTER); + builder.add(new TermQuery(new Term("field", "abc")), BooleanClause.Occur.FILTER); builder.add( - new BooleanQuery.Builder().add(new TermQuery(new Term("field", "zz")), BooleanClause.Occur.SHOULD) - .add(new TermQuery(new Term("field", "cd")), BooleanClause.Occur.SHOULD) - .add(new TermQuery(new Term("field", "ef")), BooleanClause.Occur.SHOULD) + new BooleanQuery.Builder().add(new TermQuery(new Term("field", "zzz")), BooleanClause.Occur.SHOULD) + .add(new TermQuery(new Term("field", "def")), BooleanClause.Occur.SHOULD) + .add(new TermQuery(new Term("field", "ghi")), BooleanClause.Occur.SHOULD) .build(), BooleanClause.Occur.FILTER ); builder.add( - new BooleanQuery.Builder().add(new TermQuery(new Term("field", "hi")), BooleanClause.Occur.SHOULD) - .add(new TermQuery(new Term("field", "jk")), BooleanClause.Occur.SHOULD) + new BooleanQuery.Builder().add(new TermQuery(new Term("field", "jkl")), BooleanClause.Occur.SHOULD) + .add(new TermQuery(new Term("field", "mno")), BooleanClause.Occur.SHOULD) .build(), BooleanClause.Occur.FILTER ); actual = ft.regexpQuery(pattern, 0, 0, 1000, null, null); assertEquals(new WildcardFieldMapper.WildcardMatchingQuery("field", builder.build(), "/" + pattern + "/"), actual); actualMatchingQuery = (WildcardFieldMapper.WildcardMatchingQuery) actual; - assertTrue(actualMatchingQuery.getSecondPhaseMatcher().test("abcdjk")); - assertTrue(actualMatchingQuery.getSecondPhaseMatcher().test("abefqwertyhi")); + assertTrue(actualMatchingQuery.getSecondPhaseMatcher().test("abcdefmno")); + assertTrue(actualMatchingQuery.getSecondPhaseMatcher().test("abcghiqwertyjkl")); } public void testWildcardMatchAll() { From 43e589a0cad13034d06da528ac76c1b9a341ac4a Mon Sep 17 00:00:00 2001 From: Craig Perkins Date: Wed, 19 Feb 2025 18:20:37 -0500 Subject: [PATCH 010/550] Explicitly disable FeatureFlag in MetadataCreateIndexServiceTests.testCreateIndexWithContextDisabled (#17384) * Segregate tests in MetadataCreateIndexServiceTests that rely on FeatureFlags being enabled Signed-off-by: Craig Perkins * Remove duplicate methods Signed-off-by: Craig Perkins * Remove unnecessary license Signed-off-by: Craig Perkins * Explicitly disable FF Signed-off-by: Craig Perkins * Remove multiple try blocks Signed-off-by: Craig Perkins * Clean up FF in tearDown Signed-off-by: Craig Perkins --------- Signed-off-by: Craig Perkins --- .../MetadataCreateIndexServiceTests.java | 264 ++++++++---------- 1 file changed, 124 insertions(+), 140 deletions(-) diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index 0bb9ec28a1efc..cc35426ee15b8 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -243,6 +243,14 @@ public void setupCreateIndexRequestAndAliasValidator() { ); } + @After + public void tearDown() throws Exception { + super.tearDown(); + // clear any FeatureFlags needed for individual tests + FeatureFlags.initializeFeatureFlags(Settings.EMPTY); + clusterSettings = null; + } + private ClusterState createClusterState(String name, int numShards, int numReplicas, Settings settings) { int numRoutingShards = settings.getAsInt(IndexMetadata.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.getKey(), numShards); Metadata.Builder metaBuilder = Metadata.builder(); @@ -2304,6 +2312,8 @@ public void testIndexCreationWithIndexStoreTypeRemoteStoreThrowsException() { } public void testCreateIndexWithContextDisabled() throws Exception { + // Explicitly disable the FF + FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES, false).build()); request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test").context(new Context(randomAlphaOfLength(5))); withTemporaryClusterService((clusterService, threadPool) -> { MetadataCreateIndexService checkerService = new MetadataCreateIndexService( @@ -2337,42 +2347,35 @@ public void testCreateIndexWithContextDisabled() throws Exception { public void testCreateIndexWithContextAbsent() throws Exception { FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES, true).build()); - try { - request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test").context(new Context(randomAlphaOfLength(5))); - withTemporaryClusterService((clusterService, threadPool) -> { - MetadataCreateIndexService checkerService = new MetadataCreateIndexService( - Settings.EMPTY, - clusterService, - indicesServices, - null, - null, - createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), - mock(Environment.class), - IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, - threadPool, - null, - new SystemIndices(Collections.emptyMap()), - false, - new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE, - repositoriesServiceSupplier - ); - CountDownLatch counter = new CountDownLatch(1); - InvalidIndexContextException exception = expectThrows( - InvalidIndexContextException.class, - () -> checkerService.validateContext(request) - ); - assertTrue( - "Invalid exception message." + exception.getMessage(), - exception.getMessage().contains("index specifies a context which is not loaded on the cluster.") - ); - }); - } finally { - // Disable so that other tests which are not dependent on this are not impacted. - FeatureFlags.initializeFeatureFlags( - Settings.builder().put(FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES, false).build() + request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test").context(new Context(randomAlphaOfLength(5))); + withTemporaryClusterService((clusterService, threadPool) -> { + MetadataCreateIndexService checkerService = new MetadataCreateIndexService( + Settings.EMPTY, + clusterService, + indicesServices, + null, + null, + createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), + mock(Environment.class), + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + threadPool, + null, + new SystemIndices(Collections.emptyMap()), + false, + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE, + repositoriesServiceSupplier ); - } + CountDownLatch counter = new CountDownLatch(1); + InvalidIndexContextException exception = expectThrows( + InvalidIndexContextException.class, + () -> checkerService.validateContext(request) + ); + assertTrue( + "Invalid exception message." + exception.getMessage(), + exception.getMessage().contains("index specifies a context which is not loaded on the cluster.") + ); + }); } public void testApplyContext() throws IOException { @@ -2420,63 +2423,56 @@ public void testApplyContext() throws IOException { } String contextName = randomAlphaOfLength(5); - try { - request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test").context(new Context(contextName)); - withTemporaryClusterService((clusterService, threadPool) -> { - MetadataCreateIndexService checkerService = new MetadataCreateIndexService( - Settings.EMPTY, - clusterService, - indicesServices, - null, - null, - createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), - mock(Environment.class), - IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, - threadPool, - null, - new SystemIndices(Collections.emptyMap()), - false, - new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE, - repositoriesServiceSupplier - ); + request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test").context(new Context(contextName)); + withTemporaryClusterService((clusterService, threadPool) -> { + MetadataCreateIndexService checkerService = new MetadataCreateIndexService( + Settings.EMPTY, + clusterService, + indicesServices, + null, + null, + createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), + mock(Environment.class), + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + threadPool, + null, + new SystemIndices(Collections.emptyMap()), + false, + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE, + repositoriesServiceSupplier + ); - ClusterState mockState = mock(ClusterState.class); - Metadata metadata = mock(Metadata.class); - - when(mockState.metadata()).thenReturn(metadata); - when(metadata.systemTemplatesLookup()).thenReturn(Map.of(contextName, new TreeMap<>() { - { - put(1L, contextName); - } - })); - when(metadata.componentTemplates()).thenReturn(Map.of(contextName, componentTemplate.get())); - - try { - Template template = checkerService.applyContext(request, mockState, allMappings, settingsBuilder); - assertEquals(componentTemplate.get().template(), template); - - assertEquals(2, allMappings.size()); - assertEquals(mappings, allMappings.get(0)); - assertEquals( - MapperService.parseMapping(NamedXContentRegistry.EMPTY, componentTemplate.get().template().mappings().toString()), - allMappings.get(1) - ); - - assertEquals("60s", settingsBuilder.get(INDEX_REFRESH_INTERVAL_SETTING.getKey())); - assertEquals("log_byte_size", settingsBuilder.get(INDEX_MERGE_POLICY.getKey())); - assertEquals("best_compression", settingsBuilder.get(EngineConfig.INDEX_CODEC_SETTING.getKey())); - assertEquals("false", settingsBuilder.get(INDEX_SOFT_DELETES_SETTING.getKey())); - } catch (IOException ex) { - throw new AssertionError(ex); + ClusterState mockState = mock(ClusterState.class); + Metadata metadata = mock(Metadata.class); + + when(mockState.metadata()).thenReturn(metadata); + when(metadata.systemTemplatesLookup()).thenReturn(Map.of(contextName, new TreeMap<>() { + { + put(1L, contextName); } - }); - } finally { - // Disable so that other tests which are not dependent on this are not impacted. - FeatureFlags.initializeFeatureFlags( - Settings.builder().put(FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES, false).build() - ); - } + })); + when(metadata.componentTemplates()).thenReturn(Map.of(contextName, componentTemplate.get())); + + try { + Template template = checkerService.applyContext(request, mockState, allMappings, settingsBuilder); + assertEquals(componentTemplate.get().template(), template); + + assertEquals(2, allMappings.size()); + assertEquals(mappings, allMappings.get(0)); + assertEquals( + MapperService.parseMapping(NamedXContentRegistry.EMPTY, componentTemplate.get().template().mappings().toString()), + allMappings.get(1) + ); + + assertEquals("60s", settingsBuilder.get(INDEX_REFRESH_INTERVAL_SETTING.getKey())); + assertEquals("log_byte_size", settingsBuilder.get(INDEX_MERGE_POLICY.getKey())); + assertEquals("best_compression", settingsBuilder.get(EngineConfig.INDEX_CODEC_SETTING.getKey())); + assertEquals("false", settingsBuilder.get(INDEX_SOFT_DELETES_SETTING.getKey())); + } catch (IOException ex) { + throw new AssertionError(ex); + } + }); } public void testApplyContextWithSettingsOverlap() throws IOException { @@ -2508,55 +2504,48 @@ public void testApplyContextWithSettingsOverlap() throws IOException { } String contextName = randomAlphaOfLength(5); - try { - request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test").context(new Context(contextName)); - withTemporaryClusterService((clusterService, threadPool) -> { - MetadataCreateIndexService checkerService = new MetadataCreateIndexService( - Settings.EMPTY, - clusterService, - indicesServices, - null, - null, - createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), - mock(Environment.class), - IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, - threadPool, - null, - new SystemIndices(Collections.emptyMap()), - false, - new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE, - repositoriesServiceSupplier - ); + request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test").context(new Context(contextName)); + withTemporaryClusterService((clusterService, threadPool) -> { + MetadataCreateIndexService checkerService = new MetadataCreateIndexService( + Settings.EMPTY, + clusterService, + indicesServices, + null, + null, + createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), + mock(Environment.class), + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + threadPool, + null, + new SystemIndices(Collections.emptyMap()), + false, + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE, + repositoriesServiceSupplier + ); - ClusterState mockState = mock(ClusterState.class); - Metadata metadata = mock(Metadata.class); + ClusterState mockState = mock(ClusterState.class); + Metadata metadata = mock(Metadata.class); - when(mockState.metadata()).thenReturn(metadata); - when(metadata.systemTemplatesLookup()).thenReturn(Map.of(contextName, new TreeMap<>() { - { - put(1L, contextName); - } - })); - when(metadata.componentTemplates()).thenReturn(Map.of(contextName, componentTemplate.get())); + when(mockState.metadata()).thenReturn(metadata); + when(metadata.systemTemplatesLookup()).thenReturn(Map.of(contextName, new TreeMap<>() { + { + put(1L, contextName); + } + })); + when(metadata.componentTemplates()).thenReturn(Map.of(contextName, componentTemplate.get())); - ValidationException validationException = expectThrows( - ValidationException.class, - () -> checkerService.applyContext(request, mockState, List.of(), settingsBuilder) - ); - assertEquals(1, validationException.validationErrors().size()); - assertTrue( - "Invalid exception message: " + validationException.getMessage(), - validationException.getMessage() - .contains("Cannot apply context template as user provide settings have overlap with the included context template") - ); - }); - } finally { - // Disable so that other tests which are not dependent on this are not impacted. - FeatureFlags.initializeFeatureFlags( - Settings.builder().put(FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES, false).build() + ValidationException validationException = expectThrows( + ValidationException.class, + () -> checkerService.applyContext(request, mockState, List.of(), settingsBuilder) ); - } + assertEquals(1, validationException.validationErrors().size()); + assertTrue( + "Invalid exception message: " + validationException.getMessage(), + validationException.getMessage() + .contains("Cannot apply context template as user provide settings have overlap with the included context template") + ); + }); } private IndexTemplateMetadata addMatchingTemplate(Consumer configurator) { @@ -2632,9 +2621,4 @@ private DiscoveryNode getRemoteNode() { ); } - @After - public void shutdown() throws Exception { - clusterSettings = null; - } - } From e3a6ccadc942c64e83bd224031bc4d1c6ab14623 Mon Sep 17 00:00:00 2001 From: Asim M Date: Thu, 20 Feb 2025 00:41:50 +0000 Subject: [PATCH 011/550] Introduce `execution_hint` for Cardinality aggregation (#17312) --------- Signed-off-by: Siddharth Rayabharam Signed-off-by: Asim Mahmood Signed-off-by: Asim M Co-authored-by: Siddharth Rayabharam Co-authored-by: Craig Perkins --- CHANGELOG-3.0.md | 1 + .../CardinalityAggregationBuilder.java | 39 +++-- .../metrics/CardinalityAggregator.java | 20 ++- .../metrics/CardinalityAggregatorFactory.java | 36 ++++- .../CardinalityAggregatorSupplier.java | 3 +- .../metrics/CardinalityAggregatorTests.java | 137 ++++++++++++++++++ .../aggregations/AggregatorTestCase.java | 8 +- 7 files changed, 222 insertions(+), 22 deletions(-) diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index bc5e63dbdf8ce..67fef874777e2 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -16,6 +16,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Views, simplify data access and manipulation by providing a virtual layer over one or more indices ([#11957](https://github.com/opensearch-project/OpenSearch/pull/11957)) - Added pull-based Ingestion (APIs, for ingestion source, a Kafka plugin, and IngestionEngine that pulls data from the ingestion source) ([#16958](https://github.com/opensearch-project/OpenSearch/pull/16958)) - Added ConfigurationUtils to core for the ease of configuration parsing [#17223](https://github.com/opensearch-project/OpenSearch/pull/17223) +- Add execution_hint to cardinality aggregator request (#[17312](https://github.com/opensearch-project/OpenSearch/pull/17312)) ### Dependencies - Update Apache Lucene to 10.1.0 ([#16366](https://github.com/opensearch-project/OpenSearch/pull/16366)) diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregationBuilder.java index a7516a6fd6b24..f77bbfbd48461 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregationBuilder.java @@ -68,6 +68,7 @@ public final class CardinalityAggregationBuilder extends ValuesSourceAggregation private static final ParseField REHASH = new ParseField("rehash").withAllDeprecated("no replacement - values will always be rehashed"); public static final ParseField PRECISION_THRESHOLD_FIELD = new ParseField("precision_threshold"); + public static final ParseField EXECUTION_HINT_FIELD = new ParseField("execution_hint"); public static final ObjectParser PARSER = ObjectParser.fromBuilder( NAME, @@ -76,6 +77,7 @@ public final class CardinalityAggregationBuilder extends ValuesSourceAggregation static { ValuesSourceAggregationBuilder.declareFields(PARSER, true, false, false); PARSER.declareLong(CardinalityAggregationBuilder::precisionThreshold, CardinalityAggregationBuilder.PRECISION_THRESHOLD_FIELD); + PARSER.declareString(CardinalityAggregationBuilder::executionHint, CardinalityAggregationBuilder.EXECUTION_HINT_FIELD); PARSER.declareLong((b, v) -> {/*ignore*/}, REHASH); } @@ -85,6 +87,8 @@ public static void registerAggregators(ValuesSourceRegistry.Builder builder) { private Long precisionThreshold = null; + private String executionHint = null; + public CardinalityAggregationBuilder(String name) { super(name); } @@ -96,6 +100,7 @@ public CardinalityAggregationBuilder( ) { super(clone, factoriesBuilder, metadata); this.precisionThreshold = clone.precisionThreshold; + this.executionHint = clone.executionHint; } @Override @@ -111,6 +116,9 @@ public CardinalityAggregationBuilder(StreamInput in) throws IOException { if (in.readBoolean()) { precisionThreshold = in.readLong(); } + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + executionHint = in.readOptionalString(); + } } @Override @@ -125,6 +133,9 @@ protected void innerWriteTo(StreamOutput out) throws IOException { if (hasPrecisionThreshold) { out.writeLong(precisionThreshold); } + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeOptionalString(executionHint); + } } @Override @@ -146,13 +157,9 @@ public CardinalityAggregationBuilder precisionThreshold(long precisionThreshold) return this; } - /** - * Get the precision threshold. Higher values improve accuracy but also - * increase memory usage. Will return null if the - * precisionThreshold has not been set yet. - */ - public Long precisionThreshold() { - return precisionThreshold; + public CardinalityAggregationBuilder executionHint(String executionHint) { + this.executionHint = executionHint; + return this; } @Override @@ -162,7 +169,16 @@ protected CardinalityAggregatorFactory innerBuild( AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder ) throws IOException { - return new CardinalityAggregatorFactory(name, config, precisionThreshold, queryShardContext, parent, subFactoriesBuilder, metadata); + return new CardinalityAggregatorFactory( + name, + config, + precisionThreshold, + queryShardContext, + parent, + subFactoriesBuilder, + metadata, + executionHint + ); } @Override @@ -170,12 +186,15 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th if (precisionThreshold != null) { builder.field(PRECISION_THRESHOLD_FIELD.getPreferredName(), precisionThreshold); } + if (executionHint != null) { + builder.field(EXECUTION_HINT_FIELD.getPreferredName(), executionHint); + } return builder; } @Override public int hashCode() { - return Objects.hash(super.hashCode(), precisionThreshold); + return Objects.hash(super.hashCode(), precisionThreshold, executionHint); } @Override @@ -184,7 +203,7 @@ public boolean equals(Object obj) { if (obj == null || getClass() != obj.getClass()) return false; if (super.equals(obj) == false) return false; CardinalityAggregationBuilder other = (CardinalityAggregationBuilder) obj; - return Objects.equals(precisionThreshold, other.precisionThreshold); + return Objects.equals(precisionThreshold, other.precisionThreshold) && Objects.equals(executionHint, other.executionHint); } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregator.java index d578c37af8818..f95dbf67fe8af 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregator.java @@ -89,6 +89,7 @@ public class CardinalityAggregator extends NumericMetricsAggregator.SingleValue private static final Logger logger = LogManager.getLogger(CardinalityAggregator.class); + private final CardinalityAggregatorFactory.ExecutionMode executionMode; private final int precision; private final ValuesSource valuesSource; @@ -113,7 +114,8 @@ public CardinalityAggregator( int precision, SearchContext context, Aggregator parent, - Map metadata + Map metadata, + CardinalityAggregatorFactory.ExecutionMode executionMode ) throws IOException { super(name, context, parent, metadata); // TODO: Stop using nulls here @@ -121,6 +123,7 @@ public CardinalityAggregator( this.precision = precision; this.counts = valuesSource == null ? null : new HyperLogLogPlusPlus(precision, context.bigArrays(), 1); this.valuesSourceConfig = valuesSourceConfig; + this.executionMode = executionMode; } @Override @@ -144,14 +147,17 @@ private Collector pickCollector(LeafReaderContext ctx) throws IOException { } Collector collector = null; - if (valuesSource instanceof ValuesSource.Bytes.WithOrdinals) { - ValuesSource.Bytes.WithOrdinals source = (ValuesSource.Bytes.WithOrdinals) valuesSource; + if (valuesSource instanceof ValuesSource.Bytes.WithOrdinals source) { final SortedSetDocValues ordinalValues = source.ordinalsValues(ctx); final long maxOrd = ordinalValues.getValueCount(); if (maxOrd == 0) { emptyCollectorsUsed++; return new EmptyCollector(); - } else { + } else if (executionMode == CardinalityAggregatorFactory.ExecutionMode.ORDINALS) { // Force OrdinalsCollector + ordinalsCollectorsUsed++; + collector = new OrdinalsCollector(counts, ordinalValues, context.bigArrays()); + } else if (executionMode == null) { + // no hint provided, fall back to heuristics final long ordinalsMemoryUsage = OrdinalsCollector.memoryOverhead(maxOrd); final long countsMemoryUsage = HyperLogLogPlusPlus.memoryUsage(precision); // only use ordinals if they don't increase memory usage by more than 25% @@ -164,7 +170,7 @@ private Collector pickCollector(LeafReaderContext ctx) throws IOException { } } - if (collector == null) { // not able to build an OrdinalsCollector + if (collector == null) { // not able to build an OrdinalsCollector, or hint is direct stringHashingCollectorsUsed++; collector = new DirectCollector(counts, MurmurHash3Values.hash(valuesSource.bytesValues(ctx))); } @@ -480,7 +486,7 @@ public void close() { * * @opensearch.internal */ - private static class DirectCollector extends Collector { + static class DirectCollector extends Collector { private final MurmurHash3Values hashes; private final HyperLogLogPlusPlus counts; @@ -517,7 +523,7 @@ public void close() { * * @opensearch.internal */ - private static class OrdinalsCollector extends Collector { + static class OrdinalsCollector extends Collector { private static final long SHALLOW_FIXEDBITSET_SIZE = RamUsageEstimator.shallowSizeOfInstance(FixedBitSet.class); diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorFactory.java index 980667b45324e..3d82386d12e57 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorFactory.java @@ -44,6 +44,7 @@ import org.opensearch.search.internal.SearchContext; import java.io.IOException; +import java.util.Locale; import java.util.Map; /** @@ -53,6 +54,33 @@ */ class CardinalityAggregatorFactory extends ValuesSourceAggregatorFactory { + /** + * Execution mode for cardinality agg + * + * @opensearch.internal + */ + public enum ExecutionMode { + DIRECT, + ORDINALS; + + ExecutionMode() {} + + public static ExecutionMode fromString(String value) { + try { + return ExecutionMode.valueOf(value.toUpperCase(Locale.ROOT)); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException("Unknown execution_hint: [" + value + "], expected any of [direct, ordinals]"); + } + } + + @Override + public String toString() { + return this.name().toLowerCase(Locale.ROOT); + } + } + + private final ExecutionMode executionMode; + private final Long precisionThreshold; CardinalityAggregatorFactory( @@ -62,10 +90,12 @@ class CardinalityAggregatorFactory extends ValuesSourceAggregatorFactory { QueryShardContext queryShardContext, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, - Map metadata + Map metadata, + String executionHint ) throws IOException { super(name, config, queryShardContext, parent, subFactoriesBuilder, metadata); this.precisionThreshold = precisionThreshold; + this.executionMode = executionHint == null ? null : ExecutionMode.fromString(executionHint); } public static void registerAggregators(ValuesSourceRegistry.Builder builder) { @@ -74,7 +104,7 @@ public static void registerAggregators(ValuesSourceRegistry.Builder builder) { @Override protected Aggregator createUnmapped(SearchContext searchContext, Aggregator parent, Map metadata) throws IOException { - return new CardinalityAggregator(name, config, precision(), searchContext, parent, metadata); + return new CardinalityAggregator(name, config, precision(), searchContext, parent, metadata, executionMode); } @Override @@ -86,7 +116,7 @@ protected Aggregator doCreateInternal( ) throws IOException { return queryShardContext.getValuesSourceRegistry() .getAggregator(CardinalityAggregationBuilder.REGISTRY_KEY, config) - .build(name, config, precision(), searchContext, parent, metadata); + .build(name, config, precision(), searchContext, parent, metadata, executionMode); } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorSupplier.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorSupplier.java index d5cb0242762fd..42426697e7629 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorSupplier.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorSupplier.java @@ -51,6 +51,7 @@ Aggregator build( int precision, SearchContext context, Aggregator parent, - Map metadata + Map metadata, + CardinalityAggregatorFactory.ExecutionMode executionMode ) throws IOException; } diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorTests.java index 060e06f7336b3..ca65c888f3363 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorTests.java @@ -37,6 +37,7 @@ import org.apache.lucene.document.IntPoint; import org.apache.lucene.document.KeywordField; import org.apache.lucene.document.NumericDocValuesField; +import org.apache.lucene.document.SortedDocValuesField; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.DirectoryReader; @@ -66,6 +67,7 @@ import org.opensearch.search.aggregations.AggregationBuilder; import org.opensearch.search.aggregations.AggregatorTestCase; import org.opensearch.search.aggregations.InternalAggregation; +import org.opensearch.search.aggregations.LeafBucketCollector; import org.opensearch.search.aggregations.MultiBucketConsumerService; import org.opensearch.search.aggregations.pipeline.PipelineAggregator; import org.opensearch.search.aggregations.support.AggregationInspectionHelper; @@ -497,4 +499,139 @@ protected CountingAggregator createCountingAggregator( ) ); } + + private void testAggregationExecutionHint( + AggregationBuilder aggregationBuilder, + Query query, + CheckedConsumer buildIndex, + Consumer verify, + Consumer verifyCollector, + MappedFieldType fieldType + ) throws IOException { + try (Directory directory = newDirectory()) { + RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); + buildIndex.accept(indexWriter); + indexWriter.close(); + + try (IndexReader indexReader = DirectoryReader.open(directory)) { + IndexSearcher indexSearcher = newSearcher(indexReader, true, true); + + CountingAggregator aggregator = new CountingAggregator( + new AtomicInteger(), + createAggregator(aggregationBuilder, indexSearcher, fieldType) + ); + aggregator.preCollection(); + indexSearcher.search(query, aggregator); + aggregator.postCollection(); + + MultiBucketConsumerService.MultiBucketConsumer reduceBucketConsumer = new MultiBucketConsumerService.MultiBucketConsumer( + Integer.MAX_VALUE, + new NoneCircuitBreakerService().getBreaker(CircuitBreaker.REQUEST) + ); + InternalAggregation.ReduceContext context = InternalAggregation.ReduceContext.forFinalReduction( + aggregator.context().bigArrays(), + getMockScriptService(), + reduceBucketConsumer, + PipelineAggregator.PipelineTree.EMPTY + ); + InternalCardinality topLevel = (InternalCardinality) aggregator.buildTopLevel(); + InternalCardinality card = (InternalCardinality) topLevel.reduce(Collections.singletonList(topLevel), context); + doAssertReducedMultiBucketConsumer(card, reduceBucketConsumer); + + verify.accept(card); + verifyCollector.accept(aggregator.getSelectedCollector()); + } + } + } + + public void testInvalidExecutionHint() throws IOException { + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType("number", NumberFieldMapper.NumberType.LONG); + final CardinalityAggregationBuilder aggregationBuilder = new CardinalityAggregationBuilder("_name").field("number") + .executionHint("invalid"); + assertThrows(IllegalArgumentException.class, () -> testAggregationExecutionHint(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + iw.addDocument(singleton(new NumericDocValuesField("number", 7))); + iw.addDocument(singleton(new NumericDocValuesField("number", 8))); + iw.addDocument(singleton(new NumericDocValuesField("number", 9))); + }, card -> { + assertEquals(3, card.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(card)); + }, collector -> { assertTrue(collector instanceof CardinalityAggregator.DirectCollector); }, fieldType)); + } + + public void testNoExecutionHintWithNumericDocValues() throws IOException { + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType("number", NumberFieldMapper.NumberType.LONG); + final CardinalityAggregationBuilder aggregationBuilder = new CardinalityAggregationBuilder("_name").field("number"); + testAggregationExecutionHint(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + iw.addDocument(singleton(new NumericDocValuesField("number", 7))); + iw.addDocument(singleton(new NumericDocValuesField("number", 8))); + iw.addDocument(singleton(new NumericDocValuesField("number", 9))); + }, card -> { + assertEquals(3, card.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(card)); + }, collector -> { assertTrue(collector instanceof CardinalityAggregator.DirectCollector); }, fieldType); + } + + public void testDirectExecutionHintWithNumericDocValues() throws IOException { + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType("number", NumberFieldMapper.NumberType.LONG); + final CardinalityAggregationBuilder aggregationBuilder = new CardinalityAggregationBuilder("_name").field("number") + .executionHint("direct"); + testAggregationExecutionHint(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + iw.addDocument(singleton(new NumericDocValuesField("number", 7))); + iw.addDocument(singleton(new NumericDocValuesField("number", 8))); + iw.addDocument(singleton(new NumericDocValuesField("number", 9))); + }, card -> { + assertEquals(3, card.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(card)); + }, collector -> { assertTrue(collector instanceof CardinalityAggregator.DirectCollector); }, fieldType); + } + + public void testOrdinalsExecutionHintWithNumericDocValues() throws IOException { + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType("number", NumberFieldMapper.NumberType.LONG); + final CardinalityAggregationBuilder aggregationBuilder = new CardinalityAggregationBuilder("_name").field("number") + .executionHint("ordinals"); + testAggregationExecutionHint(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + iw.addDocument(singleton(new NumericDocValuesField("number", 7))); + iw.addDocument(singleton(new NumericDocValuesField("number", 8))); + iw.addDocument(singleton(new NumericDocValuesField("number", 9))); + }, card -> { + assertEquals(3, card.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(card)); + }, collector -> { assertTrue(collector instanceof CardinalityAggregator.DirectCollector); }, fieldType); + } + + public void testNoExecutionHintWithByteValues() throws IOException { + MappedFieldType fieldType = new KeywordFieldMapper.KeywordFieldType("field"); + final CardinalityAggregationBuilder aggregationBuilder = new CardinalityAggregationBuilder("_name").field("field"); + + testAggregationExecutionHint(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + iw.addDocument(singleton(new SortedDocValuesField("field", new BytesRef()))); + }, card -> { + assertEquals(1, card.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(card)); + }, collector -> { assertTrue(collector instanceof CardinalityAggregator.OrdinalsCollector); }, fieldType); + } + + public void testDirectExecutionHintWithByteValues() throws IOException { + MappedFieldType fieldType = new KeywordFieldMapper.KeywordFieldType("field"); + final CardinalityAggregationBuilder aggregationBuilder = new CardinalityAggregationBuilder("_name").field("field") + .executionHint("direct"); + testAggregationExecutionHint(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + iw.addDocument(singleton(new SortedDocValuesField("field", new BytesRef()))); + }, card -> { + assertEquals(1, card.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(card)); + }, collector -> { assertTrue(collector instanceof CardinalityAggregator.DirectCollector); }, fieldType); + } + + public void testOrdinalsExecutionHintWithByteValues() throws IOException { + MappedFieldType fieldType = new KeywordFieldMapper.KeywordFieldType("field"); + final CardinalityAggregationBuilder aggregationBuilder = new CardinalityAggregationBuilder("_name").field("field") + .executionHint("ordinals"); + testAggregationExecutionHint(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + iw.addDocument(singleton(new SortedDocValuesField("field", new BytesRef()))); + }, card -> { + assertEquals(1, card.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(card)); + }, collector -> { assertTrue(collector instanceof CardinalityAggregator.OrdinalsCollector); }, fieldType); + } } diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java index 78e3d4f50a0d5..eba1769ad882d 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java @@ -1331,6 +1331,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { protected static class CountingAggregator extends Aggregator { private final AtomicInteger collectCounter; public final Aggregator delegate; + private LeafBucketCollector selectedCollector; public CountingAggregator(AtomicInteger collectCounter, Aggregator delegate) { this.collectCounter = collectCounter; @@ -1341,6 +1342,10 @@ public AtomicInteger getCollectCount() { return collectCounter; } + public LeafBucketCollector getSelectedCollector() { + return selectedCollector; + } + @Override public void close() { delegate.close(); @@ -1381,7 +1386,8 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx) throws IOExce return new LeafBucketCollector() { @Override public void collect(int doc, long bucket) throws IOException { - delegate.getLeafCollector(ctx).collect(doc, bucket); + selectedCollector = delegate.getLeafCollector(ctx); + selectedCollector.collect(doc, bucket); collectCounter.incrementAndGet(); } }; From 1b7c0552c3f43461b91a77ff6e937b6f27705a51 Mon Sep 17 00:00:00 2001 From: Owais Kazi Date: Wed, 19 Feb 2025 17:42:09 -0800 Subject: [PATCH 012/550] Bump jetty version in hdfs-fixture to 9.4.57.v20241219 (#17395) Signed-off-by: Owais --- CHANGELOG-3.0.md | 1 + test/fixtures/hdfs-fixture/build.gradle | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index 67fef874777e2..39b7c758d5ac7 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -21,6 +21,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Dependencies - Update Apache Lucene to 10.1.0 ([#16366](https://github.com/opensearch-project/OpenSearch/pull/16366)) - Bump Apache HttpCore5/HttpClient5 dependencies from 5.2.5/5.3.1 to 5.3.1/5.4.1 to support ExtendedSocketOption in HttpAsyncClient ([#16757](https://github.com/opensearch-project/OpenSearch/pull/16757)) +- Bumps `jetty` version from 9.4.55.v20240627 to 9.4.57.v20241219 ### Changed - Changed locale provider from COMPAT to CLDR ([#14345](https://github.com/opensearch-project/OpenSearch/pull/14345)) diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 88add6d359e54..b3311a71c3555 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -33,7 +33,7 @@ apply plugin: 'opensearch.java' group = 'hdfs' versions << [ - 'jetty': '9.4.55.v20240627' + 'jetty': '9.4.57.v20241219' ] dependencies { From 10fa39d11dc65fdc412a075deaa06205371c664f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 20 Feb 2025 07:36:07 -0500 Subject: [PATCH 013/550] Bump me.champeau.gradle.japicmp from 0.4.5 to 0.4.6 in /server (#17375) * Bump me.champeau.gradle.japicmp from 0.4.5 to 0.4.6 in /server Bumps me.champeau.gradle.japicmp from 0.4.5 to 0.4.6. --- updated-dependencies: - dependency-name: me.champeau.gradle.japicmp dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- CHANGELOG.md | 1 + server/build.gradle | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index aa6e7bce8655d..03f2b019d514a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.apache.ant:ant` from 1.10.14 to 1.10.15 ([#17288](https://github.com/opensearch-project/OpenSearch/pull/17288)) - Bump netty from 4.1.117.Final to 4.1.118.Final ([#17320](https://github.com/opensearch-project/OpenSearch/pull/17320)) - Bump `reactor_netty` from 1.1.26 to 1.1.27 ([#17322](https://github.com/opensearch-project/OpenSearch/pull/17322)) +- Bump `me.champeau.gradle.japicmp` from 0.4.5 to 0.4.6 ([#17375](https://github.com/opensearch-project/OpenSearch/pull/17375)) ### Changed - Convert transport-reactor-netty4 to use gradle version catalog [#17233](https://github.com/opensearch-project/OpenSearch/pull/17233) diff --git a/server/build.gradle b/server/build.gradle index 74a9d1a59922d..e1512fb4b2c58 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -36,7 +36,7 @@ plugins { id('opensearch.publish') id('opensearch.internal-cluster-test') id('opensearch.optional-dependencies') - id('me.champeau.gradle.japicmp') version '0.4.5' + id('me.champeau.gradle.japicmp') version '0.4.6' } publishing { From bad652bd84ec5dca9ab9333efba2e9729babdd79 Mon Sep 17 00:00:00 2001 From: kkewwei Date: Thu, 20 Feb 2025 22:50:27 +0800 Subject: [PATCH 014/550] Fix Flaky Test testPendingTasksWithClusterNotRecoveredBlock (#17397) Signed-off-by: kkewwei Signed-off-by: kkewwei --- .../action/admin/cluster/tasks/PendingTasksBlocksIT.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java index 2be4acd16671f..337403fc734a4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java @@ -91,11 +91,11 @@ public void testPendingTasksWithClusterNotRecoveredBlock() throws Exception { } // restart the cluster but prevent it from performing state recovery - final int nodeCount = client().admin().cluster().prepareNodesInfo("data:true", "cluster_manager:true").get().getNodes().size(); + final int dataNodesCount = client().admin().cluster().prepareNodesInfo("data:true").get().getNodes().size(); internalCluster().fullRestart(new InternalTestCluster.RestartCallback() { @Override public Settings onNodeStopped(String nodeName) { - return Settings.builder().put(GatewayService.RECOVER_AFTER_DATA_NODES_SETTING.getKey(), nodeCount + 1).build(); + return Settings.builder().put(GatewayService.RECOVER_AFTER_DATA_NODES_SETTING.getKey(), dataNodesCount + 1).build(); } @Override From f652abc00c233b5367dbc5a9b3621fc612418cb3 Mon Sep 17 00:00:00 2001 From: kkewwei Date: Fri, 21 Feb 2025 01:04:21 +0800 Subject: [PATCH 015/550] Fix Flaky Test ShuffleForcedMergePolicyTests.testDiagnostics (#17392) Signed-off-by: kkewwei Signed-off-by: kkewwei --- .../lucene/index/ShuffleForcedMergePolicyTests.java | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/server/src/test/java/org/opensearch/lucene/index/ShuffleForcedMergePolicyTests.java b/server/src/test/java/org/opensearch/lucene/index/ShuffleForcedMergePolicyTests.java index 46e5d4a76cd9d..58fdb2c503b7d 100644 --- a/server/src/test/java/org/opensearch/lucene/index/ShuffleForcedMergePolicyTests.java +++ b/server/src/test/java/org/opensearch/lucene/index/ShuffleForcedMergePolicyTests.java @@ -43,6 +43,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.MergePolicy; import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.index.TieredMergePolicy; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.store.Directory; @@ -59,13 +60,17 @@ public class ShuffleForcedMergePolicyTests extends BaseMergePolicyTestCase { public void testDiagnostics() throws IOException { try (Directory dir = newDirectory()) { IndexWriterConfig iwc = newIndexWriterConfig().setMaxFullFlushMergeWaitMillis(0); - MergePolicy mp = new ShuffleForcedMergePolicy(newTieredMergePolicy()); + TieredMergePolicy tieredMergePolicy = newTieredMergePolicy(); + // ensure only trigger one Merge when flushing, and there are remaining segments to be force merged + tieredMergePolicy.setSegmentsPerTier(8); + tieredMergePolicy.setMaxMergeAtOnce(8); + MergePolicy mp = new ShuffleForcedMergePolicy(tieredMergePolicy); iwc.setMergePolicy(mp); boolean sorted = random().nextBoolean(); if (sorted) { iwc.setIndexSort(new Sort(new SortField("sort", SortField.Type.INT))); } - int numDocs = atLeast(100); + int numDocs = 90 + random().nextInt(10); try (IndexWriter writer = new IndexWriter(dir, iwc)) { for (int i = 0; i < numDocs; i++) { From 636dea48ec51aa86400ae2b0991f46bb2086d8a3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 20 Feb 2025 13:35:13 -0500 Subject: [PATCH 016/550] Bump net.minidev:json-smart from 2.5.1 to 2.5.2 in /test/fixtures/hdfs-fixture (#17378) * Bump net.minidev:json-smart in /test/fixtures/hdfs-fixture Bumps [net.minidev:json-smart](https://github.com/netplex/json-smart-v2) from 2.5.1 to 2.5.2. - [Release notes](https://github.com/netplex/json-smart-v2/releases) - [Commits](https://github.com/netplex/json-smart-v2/compare/2.5.1...2.5.2) --- updated-dependencies: - dependency-name: net.minidev:json-smart dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- CHANGELOG.md | 1 + test/fixtures/hdfs-fixture/build.gradle | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 03f2b019d514a..ab4138c452894 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump netty from 4.1.117.Final to 4.1.118.Final ([#17320](https://github.com/opensearch-project/OpenSearch/pull/17320)) - Bump `reactor_netty` from 1.1.26 to 1.1.27 ([#17322](https://github.com/opensearch-project/OpenSearch/pull/17322)) - Bump `me.champeau.gradle.japicmp` from 0.4.5 to 0.4.6 ([#17375](https://github.com/opensearch-project/OpenSearch/pull/17375)) +- Bump `net.minidev:json-smart` from 2.5.1 to 2.5.2 ([#17378](https://github.com/opensearch-project/OpenSearch/pull/17378)) ### Changed - Convert transport-reactor-netty4 to use gradle version catalog [#17233](https://github.com/opensearch-project/OpenSearch/pull/17233) diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index b3311a71c3555..d69ddcbd1a07c 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -65,7 +65,7 @@ dependencies { api "com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:${versions.jackson}" api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api "com.fasterxml.woodstox:woodstox-core:${versions.woodstox}" - api 'net.minidev:json-smart:2.5.1' + api 'net.minidev:json-smart:2.5.2' api "org.mockito:mockito-core:${versions.mockito}" api "com.google.protobuf:protobuf-java:${versions.protobuf}" api "org.jetbrains.kotlin:kotlin-stdlib:${versions.kotlin}" From abe2333e5315ac6482d61477de22921895472d8f Mon Sep 17 00:00:00 2001 From: Rishabh Maurya Date: Thu, 20 Feb 2025 13:39:07 -0800 Subject: [PATCH 017/550] Arrow Flight Server bootstrap logic (#16962) * Arrow Flight Server bootstrap logic * new plugin for StreamManager implementation * integration with server module * support for SslContext in Flight server and client * ClientManager for creating a pool of flight clients for data nodes * custom event loop group and thread pool for server and client channel Signed-off-by: Rishabh Maurya * interim changes - integration with Auxiliary Transport Signed-off-by: Rishabh Maurya * changes to use grpc-netty-shaded Signed-off-by: Rishabh Maurya * Update javadoc Signed-off-by: Rishabh Maurya * fix the shaded dependencies Signed-off-by: Rishabh Maurya * Move arrow-flight-rpc from module to plugin Signed-off-by: Rishabh Maurya * remove unnecessary imports Signed-off-by: Rishabh Maurya * rebase fixes Signed-off-by: Rishabh Maurya * Fix permissions and other runtime issues Signed-off-by: Rishabh Maurya * Remove StreamManagerWrapper and Node.java changes from PR Signed-off-by: Rishabh Maurya * Fix permissions for test Signed-off-by: Rishabh Maurya * remove testGetFlightClientLocationExecutionError as thread interruption was causing client close failure Signed-off-by: Rishabh Maurya * Fix the issue with single node ClientManager Signed-off-by: Rishabh Maurya * Fix flight server integ test on unix machine Signed-off-by: Rishabh Maurya * suppress JSM removal warning Signed-off-by: Rishabh Maurya * Fix security policy and FlightClientManagerTests Signed-off-by: Rishabh Maurya * remove StreamManagerWrapper from the PR Signed-off-by: Rishabh Maurya * Set multi-release in manifest while shadowing arrow-memory-shaded Signed-off-by: Rishabh Maurya * Disable jacocoReport for shaded projects Signed-off-by: Rishabh Maurya * Remove multi version classes from arrow-memory-shaded Signed-off-by: Rishabh Maurya * Address the PR comments Signed-off-by: Rishabh Maurya * Move the arrow-memory-shaded and flight within flight-rpc plugin Signed-off-by: Rishabh Maurya * Move the arrow-memory-shaded and flight within flight-rpc plugin Signed-off-by: Rishabh Maurya * Detach SPI from Apache Arrow (not needed at the moment), drop all shaded libs (not needed at the moment) Signed-off-by: Andriy Redko * Rebase and other minor refactoring Signed-off-by: Rishabh Maurya * Address PR comments - majorly move away from grpc-netty-shaded Signed-off-by: Rishabh Maurya * remove arrow flight, client from codecov as the package is non opensearch is just for overriding purpose Signed-off-by: Rishabh Maurya * change compileOnly to implementation dep for arrow-spi Signed-off-by: Rishabh Maurya * Rebase from main and fixes related to netty version bump Signed-off-by: Rishabh Maurya * Simplify cloning and overriding logic for FlightServer and FlightClient Signed-off-by: Rishabh Maurya * Only clone FlightClient::Builder class Signed-off-by: Andriy Redko * Only clone FlightServer::Builder class Signed-off-by: Andriy Redko * Update min supported version to 3.0.0 Co-authored-by: Andriy Redko Signed-off-by: Rishabh Maurya * Fix java security permission issue Signed-off-by: Rishabh Maurya * Address PR comments Signed-off-by: Rishabh Maurya * Fix netty system properties Signed-off-by: Rishabh Maurya * Move flight service and other components of flight-rpc-plugin behind feature flag Signed-off-by: Rishabh Maurya * remove system property value set numDirectArenas Signed-off-by: Rishabh Maurya --------- Signed-off-by: Rishabh Maurya Signed-off-by: Andriy Redko Signed-off-by: Andriy Redko Co-authored-by: Andriy Redko Co-authored-by: Andriy Redko --- CHANGELOG-3.0.md | 1 + codecov.yml | 1 + gradle/libs.versions.toml | 2 +- libs/arrow-spi/build.gradle | 70 +- .../licenses/arrow-format-17.0.0.jar.sha1 | 1 - .../arrow-memory-core-17.0.0.jar.sha1 | 1 - .../arrow-memory-netty-17.0.0.jar.sha1 | 1 - ...-memory-netty-buffer-patch-17.0.0.jar.sha1 | 1 - .../licenses/arrow-vector-17.0.0.jar.sha1 | 1 - .../licenses/jackson-databind-LICENSE.txt | 8 - .../licenses/jackson-databind-NOTICE.txt | 20 - .../licenses/netty-common-NOTICE.txt | 264 -- .../opensearch/arrow/spi/StreamManager.java | 4 +- .../opensearch/arrow/spi/StreamProducer.java | 12 +- .../opensearch/arrow/spi/StreamReader.java | 5 +- .../opensearch/arrow/spi/package-info.java | 2 +- plugins/arrow-flight-rpc/build.gradle | 301 +++ .../licenses/arrow-format-18.1.0.jar.sha1 | 1 + .../licenses/arrow-format-LICENSE.txt | 0 .../licenses/arrow-format-NOTICE.txt | 0 .../arrow-memory-core-18.1.0.jar.sha1 | 1 + .../licenses/arrow-memory-core-LICENSE.txt | 0 .../licenses/arrow-memory-core-NOTICE.txt | 0 .../arrow-memory-netty-18.1.0.jar.sha1 | 1 + .../licenses/arrow-memory-netty-LICENSE.txt | 0 .../licenses/arrow-memory-netty-NOTICE.txt | 0 ...-memory-netty-buffer-patch-18.1.0.jar.sha1 | 1 + ...rrow-memory-netty-buffer-patch-LICENSE.txt | 0 ...arrow-memory-netty-buffer-patch-NOTICE.txt | 0 .../licenses/arrow-vector-18.1.0.jar.sha1 | 1 + .../licenses/arrow-vector-LICENSE.txt | 0 .../licenses/arrow-vector-NOTICE.txt | 0 .../licenses/commons-codec-1.16.1.jar.sha1 | 0 .../licenses/commons-codec-LICENSE.txt | 0 .../licenses/commons-codec-NOTICE.txt | 0 .../licenses/failureaccess-1.0.1.jar.sha1 | 1 + .../licenses/failureaccess-LICENSE.txt | 202 ++ .../licenses/failureaccess-NOTICE.txt | 0 .../licenses/flatbuffers-java-2.0.0.jar.sha1 | 0 .../licenses/flatbuffers-java-LICENSE.txt | 0 .../licenses/flatbuffers-java-NOTICE.txt | 0 .../licenses/flight-core-18.1.0.jar.sha1 | 1 + .../licenses/flight-core-LICENSE.txt | 2261 +++++++++++++++++ .../licenses/flight-core-NOTICE.txt | 84 + .../licenses/grpc-LICENSE.txt | 4 +- .../arrow-flight-rpc/licenses/grpc-NOTICE.txt | 62 + .../licenses/grpc-api-1.68.2.jar.sha1 | 1 + .../licenses/grpc-core-1.68.2.jar.sha1 | 1 + .../licenses/grpc-netty-1.68.2.jar.sha1 | 1 + .../licenses/grpc-protobuf-1.68.2.jar.sha1 | 1 + .../grpc-protobuf-lite-1.68.2.jar.sha1 | 1 + .../licenses/grpc-stub-1.68.2.jar.sha1 | 1 + .../licenses/guava-33.3.1-jre.jar.sha1 | 1 + .../licenses/guava-LICENSE.txt | 202 ++ .../licenses/guava-NOTICE.txt | 0 .../arrow-flight-rpc/licenses/jackson-LICENSE | 0 .../arrow-flight-rpc/licenses/jackson-NOTICE | 0 .../jackson-annotations-2.18.2.jar.sha1 | 0 .../licenses/jackson-databind-2.18.2.jar.sha1 | 0 .../licenses/jsr305-3.0.2.jar.sha1 | 1 + .../licenses/jsr305-LICENSE.txt | 29 + .../licenses/jsr305-NOTICE.txt | 1 + .../licenses/netty-LICENSE.txt | 0 .../licenses/netty-NOTICE.txt | 0 .../netty-buffer-4.1.118.Final.jar.sha1 | 0 .../netty-codec-4.1.118.Final.jar.sha1 | 1 + .../netty-codec-http-4.1.118.Final.jar.sha1 | 1 + .../netty-codec-http2-4.1.118.Final.jar.sha1 | 1 + .../netty-common-4.1.118.Final.jar.sha1 | 0 .../netty-handler-4.1.118.Final.jar.sha1 | 1 + .../netty-resolver-4.1.118.Final.jar.sha1 | 1 + ...tty-tcnative-classes-2.0.66.Final.jar.sha1 | 1 + .../netty-transport-4.1.118.Final.jar.sha1 | 1 + ...sport-classes-epoll-4.1.118.Final.jar.sha1 | 1 + ...-native-unix-common-4.1.118.Final.jar.sha1 | 1 + .../licenses/parquet-arrow-1.13.1.jar.sha1 | 1 + .../licenses/parquet-arrow-LICENSE.txt | 218 ++ .../licenses/parquet-arrow-NOTICE.txt | 94 + .../licenses/perfmark-api-0.27.0.jar.sha1 | 1 + .../licenses/perfmark-api-LICENSE.txt | 201 ++ .../licenses/perfmark-api-NOTICE.txt | 41 + .../licenses/slf4j-api-1.7.36.jar.sha1 | 0 .../licenses/slf4j-api-LICENSE.txt | 0 .../licenses/slf4j-api-NOTICE.txt | 0 .../arrow/flight/ArrowFlightServerIT.java | 59 + .../apache/arrow/flight/OSFlightClient.java | 250 ++ .../apache/arrow/flight/OSFlightServer.java | 478 ++++ .../org/apache/arrow/flight/package-info.java | 13 + .../flight/api/FlightServerInfoAction.java | 65 + .../arrow/flight/api/NodeFlightInfo.java | 99 + .../flight/api/NodesFlightInfoAction.java | 29 + .../flight/api/NodesFlightInfoRequest.java | 73 + .../flight/api/NodesFlightInfoResponse.java | 111 + .../api/TransportNodesFlightInfoAction.java | 113 + .../arrow/flight/api/package-info.java | 12 + .../flight/bootstrap/FlightClientManager.java | 252 ++ .../arrow/flight/bootstrap/FlightService.java | 170 ++ .../flight/bootstrap/FlightStreamPlugin.java | 264 ++ .../flight/bootstrap/ServerComponents.java | 286 +++ .../arrow/flight/bootstrap/ServerConfig.java | 218 ++ .../arrow/flight/bootstrap/package-info.java | 12 + .../tls/DefaultSslContextProvider.java | 104 + .../bootstrap/tls/SslContextProvider.java | 35 + .../flight/bootstrap/tls/package-info.java | 12 + .../opensearch/arrow/flight/package-info.java | 12 + .../plugin-metadata/plugin-security.policy | 45 + .../arrow/flight/FlightStreamPluginTests.java | 104 + .../api/FlightServerInfoActionTests.java | 101 + .../arrow/flight/api/NodeFlightInfoTests.java | 160 ++ .../api/NodesFlightInfoRequestTests.java | 39 + .../api/NodesFlightInfoResponseTests.java | 241 ++ .../TransportNodesFlightInfoActionTests.java | 176 ++ .../bootstrap/FlightClientManagerTests.java | 384 +++ .../flight/bootstrap/FlightServiceTests.java | 160 ++ .../flight/bootstrap/ServerConfigTests.java | 80 + server/build.gradle | 1 + .../common/settings/FeatureFlagSettings.java | 3 +- .../opensearch/common/util/FeatureFlags.java | 6 +- .../DefaultSecureTransportParameters.java | 37 + .../SecureTransportSettingsProvider.java | 14 + .../plugins/StreamManagerPlugin.java | 27 + .../opensearch/test/OpenSearchTestCase.java | 8 +- 122 files changed, 7987 insertions(+), 386 deletions(-) delete mode 100644 libs/arrow-spi/licenses/arrow-format-17.0.0.jar.sha1 delete mode 100644 libs/arrow-spi/licenses/arrow-memory-core-17.0.0.jar.sha1 delete mode 100644 libs/arrow-spi/licenses/arrow-memory-netty-17.0.0.jar.sha1 delete mode 100644 libs/arrow-spi/licenses/arrow-memory-netty-buffer-patch-17.0.0.jar.sha1 delete mode 100644 libs/arrow-spi/licenses/arrow-vector-17.0.0.jar.sha1 delete mode 100644 libs/arrow-spi/licenses/jackson-databind-LICENSE.txt delete mode 100644 libs/arrow-spi/licenses/jackson-databind-NOTICE.txt delete mode 100644 libs/arrow-spi/licenses/netty-common-NOTICE.txt create mode 100644 plugins/arrow-flight-rpc/build.gradle create mode 100644 plugins/arrow-flight-rpc/licenses/arrow-format-18.1.0.jar.sha1 rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/arrow-format-LICENSE.txt (100%) rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/arrow-format-NOTICE.txt (100%) create mode 100644 plugins/arrow-flight-rpc/licenses/arrow-memory-core-18.1.0.jar.sha1 rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/arrow-memory-core-LICENSE.txt (100%) rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/arrow-memory-core-NOTICE.txt (100%) create mode 100644 plugins/arrow-flight-rpc/licenses/arrow-memory-netty-18.1.0.jar.sha1 rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/arrow-memory-netty-LICENSE.txt (100%) rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/arrow-memory-netty-NOTICE.txt (100%) create mode 100644 plugins/arrow-flight-rpc/licenses/arrow-memory-netty-buffer-patch-18.1.0.jar.sha1 rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/arrow-memory-netty-buffer-patch-LICENSE.txt (100%) rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/arrow-memory-netty-buffer-patch-NOTICE.txt (100%) create mode 100644 plugins/arrow-flight-rpc/licenses/arrow-vector-18.1.0.jar.sha1 rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/arrow-vector-LICENSE.txt (100%) rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/arrow-vector-NOTICE.txt (100%) rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/commons-codec-1.16.1.jar.sha1 (100%) rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/commons-codec-LICENSE.txt (100%) rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/commons-codec-NOTICE.txt (100%) create mode 100644 plugins/arrow-flight-rpc/licenses/failureaccess-1.0.1.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/failureaccess-LICENSE.txt rename libs/arrow-spi/licenses/flatbuffers-java-NOTICE.txt => plugins/arrow-flight-rpc/licenses/failureaccess-NOTICE.txt (100%) rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/flatbuffers-java-2.0.0.jar.sha1 (100%) rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/flatbuffers-java-LICENSE.txt (100%) rename libs/arrow-spi/licenses/slf4j-api-NOTICE.txt => plugins/arrow-flight-rpc/licenses/flatbuffers-java-NOTICE.txt (100%) create mode 100644 plugins/arrow-flight-rpc/licenses/flight-core-18.1.0.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/flight-core-LICENSE.txt create mode 100644 plugins/arrow-flight-rpc/licenses/flight-core-NOTICE.txt rename libs/arrow-spi/licenses/netty-common-LICENSE.txt => plugins/arrow-flight-rpc/licenses/grpc-LICENSE.txt (99%) create mode 100644 plugins/arrow-flight-rpc/licenses/grpc-NOTICE.txt create mode 100644 plugins/arrow-flight-rpc/licenses/grpc-api-1.68.2.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/grpc-core-1.68.2.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/grpc-netty-1.68.2.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/grpc-protobuf-1.68.2.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/grpc-protobuf-lite-1.68.2.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/grpc-stub-1.68.2.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/guava-33.3.1-jre.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/guava-LICENSE.txt create mode 100644 plugins/arrow-flight-rpc/licenses/guava-NOTICE.txt rename libs/arrow-spi/licenses/jackson-annotations-LICENSE.txt => plugins/arrow-flight-rpc/licenses/jackson-LICENSE (100%) rename libs/arrow-spi/licenses/jackson-annotations-NOTICE.txt => plugins/arrow-flight-rpc/licenses/jackson-NOTICE (100%) rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/jackson-annotations-2.18.2.jar.sha1 (100%) rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/jackson-databind-2.18.2.jar.sha1 (100%) create mode 100644 plugins/arrow-flight-rpc/licenses/jsr305-3.0.2.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/jsr305-LICENSE.txt create mode 100644 plugins/arrow-flight-rpc/licenses/jsr305-NOTICE.txt rename libs/arrow-spi/licenses/netty-buffer-LICENSE.txt => plugins/arrow-flight-rpc/licenses/netty-LICENSE.txt (100%) rename libs/arrow-spi/licenses/netty-buffer-NOTICE.txt => plugins/arrow-flight-rpc/licenses/netty-NOTICE.txt (100%) rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/netty-buffer-4.1.118.Final.jar.sha1 (100%) create mode 100644 plugins/arrow-flight-rpc/licenses/netty-codec-4.1.118.Final.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/netty-codec-http-4.1.118.Final.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/netty-codec-http2-4.1.118.Final.jar.sha1 rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/netty-common-4.1.118.Final.jar.sha1 (100%) create mode 100644 plugins/arrow-flight-rpc/licenses/netty-handler-4.1.118.Final.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/netty-resolver-4.1.118.Final.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/netty-tcnative-classes-2.0.66.Final.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/netty-transport-4.1.118.Final.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/netty-transport-classes-epoll-4.1.118.Final.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/netty-transport-native-unix-common-4.1.118.Final.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/parquet-arrow-1.13.1.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/parquet-arrow-LICENSE.txt create mode 100644 plugins/arrow-flight-rpc/licenses/parquet-arrow-NOTICE.txt create mode 100644 plugins/arrow-flight-rpc/licenses/perfmark-api-0.27.0.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/perfmark-api-LICENSE.txt create mode 100644 plugins/arrow-flight-rpc/licenses/perfmark-api-NOTICE.txt rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/slf4j-api-1.7.36.jar.sha1 (100%) rename {libs/arrow-spi => plugins/arrow-flight-rpc}/licenses/slf4j-api-LICENSE.txt (100%) create mode 100644 plugins/arrow-flight-rpc/licenses/slf4j-api-NOTICE.txt create mode 100644 plugins/arrow-flight-rpc/src/internalClusterTest/java/org/opensearch/arrow/flight/ArrowFlightServerIT.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/apache/arrow/flight/OSFlightClient.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/apache/arrow/flight/OSFlightServer.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/apache/arrow/flight/package-info.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/FlightServerInfoAction.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodeFlightInfo.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodesFlightInfoAction.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodesFlightInfoRequest.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodesFlightInfoResponse.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/TransportNodesFlightInfoAction.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/package-info.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/FlightClientManager.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/FlightService.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/FlightStreamPlugin.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/ServerComponents.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/ServerConfig.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/package-info.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/tls/DefaultSslContextProvider.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/tls/SslContextProvider.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/tls/package-info.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/package-info.java create mode 100644 plugins/arrow-flight-rpc/src/main/plugin-metadata/plugin-security.policy create mode 100644 plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/FlightStreamPluginTests.java create mode 100644 plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/FlightServerInfoActionTests.java create mode 100644 plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/NodeFlightInfoTests.java create mode 100644 plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/NodesFlightInfoRequestTests.java create mode 100644 plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/NodesFlightInfoResponseTests.java create mode 100644 plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/TransportNodesFlightInfoActionTests.java create mode 100644 plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/bootstrap/FlightClientManagerTests.java create mode 100644 plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/bootstrap/FlightServiceTests.java create mode 100644 plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/bootstrap/ServerConfigTests.java create mode 100644 server/src/main/java/org/opensearch/plugins/StreamManagerPlugin.java diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index 39b7c758d5ac7..fc2fcd361f497 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -17,6 +17,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Added pull-based Ingestion (APIs, for ingestion source, a Kafka plugin, and IngestionEngine that pulls data from the ingestion source) ([#16958](https://github.com/opensearch-project/OpenSearch/pull/16958)) - Added ConfigurationUtils to core for the ease of configuration parsing [#17223](https://github.com/opensearch-project/OpenSearch/pull/17223) - Add execution_hint to cardinality aggregator request (#[17312](https://github.com/opensearch-project/OpenSearch/pull/17312)) +- Arrow Flight RPC plugin with Flight server bootstrap logic and client for internode communication ([#16962](https://github.com/opensearch-project/OpenSearch/pull/16962)) ### Dependencies - Update Apache Lucene to 10.1.0 ([#16366](https://github.com/opensearch-project/OpenSearch/pull/16366)) diff --git a/codecov.yml b/codecov.yml index dac8f30956846..e22af90bcdbe1 100644 --- a/codecov.yml +++ b/codecov.yml @@ -4,6 +4,7 @@ codecov: ignore: - "test" - "benchmarks" + - "plugins/arrow-flight-rpc/**/org/apache/arrow/flight/**" coverage: precision: 2 diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index 8cd210bbcb65a..abdd87394b35c 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -82,7 +82,7 @@ opentelemetry = "1.46.0" opentelemetrysemconv = "1.29.0-alpha" # arrow dependencies -arrow = "17.0.0" +arrow = "18.1.0" flatbuffers = "2.0.0" [libraries] diff --git a/libs/arrow-spi/build.gradle b/libs/arrow-spi/build.gradle index d14b7e88cfb8c..90a4c162e428b 100644 --- a/libs/arrow-spi/build.gradle +++ b/libs/arrow-spi/build.gradle @@ -10,79 +10,11 @@ */ testingConventions.enabled = false + dependencies { api project(':libs:opensearch-core') - api "org.apache.arrow:arrow-vector:${versions.arrow}" - api "org.apache.arrow:arrow-format:${versions.arrow}" - api "org.apache.arrow:arrow-memory-core:${versions.arrow}" - runtimeOnly "org.apache.arrow:arrow-memory-netty-buffer-patch:${versions.arrow}" - runtimeOnly "org.apache.arrow:arrow-memory-netty:${versions.arrow}" - runtimeOnly "io.netty:netty-buffer:${versions.netty}" - runtimeOnly "io.netty:netty-common:${versions.netty}" - - runtimeOnly "com.google.flatbuffers:flatbuffers-java:${versions.flatbuffers}" - runtimeOnly "org.slf4j:slf4j-api:${versions.slf4j}" - runtimeOnly "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" - api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" - - implementation "commons-codec:commons-codec:${versions.commonscodec}" } tasks.named('forbiddenApisMain').configure { replaceSignatureFiles 'jdk-signatures' } - -tasks.named('thirdPartyAudit').configure { - ignoreMissingClasses( - // Logging frameworks - 'org.apache.commons.logging.Log', - 'org.apache.commons.logging.LogFactory', - 'org.apache.log4j.Level', - 'org.apache.log4j.Logger', - 'org.slf4j.impl.StaticLoggerBinder', - 'org.slf4j.impl.StaticMDCBinder', - 'org.slf4j.impl.StaticMarkerBinder', - - // Reactor BlockHound - 'reactor.blockhound.BlockHound$Builder', - 'reactor.blockhound.integration.BlockHoundIntegration' - ) - - ignoreViolations( - "io.netty.util.internal.PlatformDependent0", - "io.netty.util.internal.PlatformDependent0\$1", - "io.netty.util.internal.PlatformDependent0\$2", - "io.netty.util.internal.PlatformDependent0\$3", - "io.netty.util.internal.PlatformDependent0\$4", - "io.netty.util.internal.PlatformDependent0\$6", - "io.netty.util.internal.shaded.org.jctools.queues.BaseLinkedQueueConsumerNodeRef", - "io.netty.util.internal.shaded.org.jctools.queues.BaseLinkedQueueProducerNodeRef", - "io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueColdProducerFields", - "io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueConsumerFields", - "io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueProducerFields", - "io.netty.util.internal.shaded.org.jctools.queues.LinkedQueueNode", - "io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueConsumerIndexField", - "io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueProducerIndexField", - "io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueConsumerIndexField", - "io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerIndexField", - "io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerLimitField", - "io.netty.util.internal.shaded.org.jctools.util.UnsafeAccess", - "io.netty.util.internal.shaded.org.jctools.util.UnsafeLongArrayAccess", - "io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess", - "io.netty.util.internal.shaded.org.jctools.queues.unpadded.MpscUnpaddedArrayQueueConsumerIndexField", - "io.netty.util.internal.shaded.org.jctools.queues.unpadded.MpscUnpaddedArrayQueueProducerIndexField", - "io.netty.util.internal.shaded.org.jctools.queues.unpadded.MpscUnpaddedArrayQueueProducerLimitField", - "org.apache.arrow.memory.ArrowBuf", - "org.apache.arrow.memory.util.ByteFunctionHelpers", - "org.apache.arrow.memory.util.MemoryUtil", - "org.apache.arrow.memory.util.MemoryUtil\$1", - "org.apache.arrow.memory.util.hash.MurmurHasher", - "org.apache.arrow.memory.util.hash.SimpleHasher", - "org.apache.arrow.vector.BaseFixedWidthVector", - "org.apache.arrow.vector.BitVectorHelper", - "org.apache.arrow.vector.Decimal256Vector", - "org.apache.arrow.vector.DecimalVector", - "org.apache.arrow.vector.util.DecimalUtility", - "org.apache.arrow.vector.util.VectorAppender" - ) -} diff --git a/libs/arrow-spi/licenses/arrow-format-17.0.0.jar.sha1 b/libs/arrow-spi/licenses/arrow-format-17.0.0.jar.sha1 deleted file mode 100644 index 34fd4704eac91..0000000000000 --- a/libs/arrow-spi/licenses/arrow-format-17.0.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5d052f20fd1193840eb59818515e710156c364b2 \ No newline at end of file diff --git a/libs/arrow-spi/licenses/arrow-memory-core-17.0.0.jar.sha1 b/libs/arrow-spi/licenses/arrow-memory-core-17.0.0.jar.sha1 deleted file mode 100644 index ea312f4f5e51a..0000000000000 --- a/libs/arrow-spi/licenses/arrow-memory-core-17.0.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -51c5287ef5a624656bb38da7684078905b1a88c9 \ No newline at end of file diff --git a/libs/arrow-spi/licenses/arrow-memory-netty-17.0.0.jar.sha1 b/libs/arrow-spi/licenses/arrow-memory-netty-17.0.0.jar.sha1 deleted file mode 100644 index f77b3d836b77b..0000000000000 --- a/libs/arrow-spi/licenses/arrow-memory-netty-17.0.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -de65a34dfeada4d47b161871fa39fa0a2ab4c39c \ No newline at end of file diff --git a/libs/arrow-spi/licenses/arrow-memory-netty-buffer-patch-17.0.0.jar.sha1 b/libs/arrow-spi/licenses/arrow-memory-netty-buffer-patch-17.0.0.jar.sha1 deleted file mode 100644 index b21b4e8cc7d23..0000000000000 --- a/libs/arrow-spi/licenses/arrow-memory-netty-buffer-patch-17.0.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cdfdaa1bd5135bd869515fc205392ba92dcc1509 \ No newline at end of file diff --git a/libs/arrow-spi/licenses/arrow-vector-17.0.0.jar.sha1 b/libs/arrow-spi/licenses/arrow-vector-17.0.0.jar.sha1 deleted file mode 100644 index 8f9fddc882396..0000000000000 --- a/libs/arrow-spi/licenses/arrow-vector-17.0.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -16685545e4734382c1fcdaf12ac9b0a7d1fc06c0 \ No newline at end of file diff --git a/libs/arrow-spi/licenses/jackson-databind-LICENSE.txt b/libs/arrow-spi/licenses/jackson-databind-LICENSE.txt deleted file mode 100644 index f5f45d26a49d6..0000000000000 --- a/libs/arrow-spi/licenses/jackson-databind-LICENSE.txt +++ /dev/null @@ -1,8 +0,0 @@ -This copy of Jackson JSON processor streaming parser/generator is licensed under the -Apache (Software) License, version 2.0 ("the License"). -See the License for details about distribution rights, and the -specific rights regarding derivate works. - -You may obtain a copy of the License at: - -http://www.apache.org/licenses/LICENSE-2.0 diff --git a/libs/arrow-spi/licenses/jackson-databind-NOTICE.txt b/libs/arrow-spi/licenses/jackson-databind-NOTICE.txt deleted file mode 100644 index 4c976b7b4cc58..0000000000000 --- a/libs/arrow-spi/licenses/jackson-databind-NOTICE.txt +++ /dev/null @@ -1,20 +0,0 @@ -# Jackson JSON processor - -Jackson is a high-performance, Free/Open Source JSON processing library. -It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has -been in development since 2007. -It is currently developed by a community of developers, as well as supported -commercially by FasterXML.com. - -## Licensing - -Jackson core and extension components may licensed under different licenses. -To find the details that apply to this artifact see the accompanying LICENSE file. -For more information, including possible other licensing options, contact -FasterXML.com (http://fasterxml.com). - -## Credits - -A list of contributors may be found from CREDITS file, which is included -in some artifacts (usually source distributions); but is always available -from the source code management (SCM) system project uses. diff --git a/libs/arrow-spi/licenses/netty-common-NOTICE.txt b/libs/arrow-spi/licenses/netty-common-NOTICE.txt deleted file mode 100644 index 971865b7c1c23..0000000000000 --- a/libs/arrow-spi/licenses/netty-common-NOTICE.txt +++ /dev/null @@ -1,264 +0,0 @@ - - The Netty Project - ================= - -Please visit the Netty web site for more information: - - * https://netty.io/ - -Copyright 2014 The Netty Project - -The Netty Project licenses this file to you under the Apache License, -version 2.0 (the "License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at: - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -License for the specific language governing permissions and limitations -under the License. - -Also, please refer to each LICENSE..txt file, which is located in -the 'license' directory of the distribution file, for the license terms of the -components that this product depends on. - -------------------------------------------------------------------------------- -This product contains the extensions to Java Collections Framework which has -been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene: - - * LICENSE: - * license/LICENSE.jsr166y.txt (Public Domain) - * HOMEPAGE: - * http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/ - * http://viewvc.jboss.org/cgi-bin/viewvc.cgi/jbosscache/experimental/jsr166/ - -This product contains a modified version of Robert Harder's Public Domain -Base64 Encoder and Decoder, which can be obtained at: - - * LICENSE: - * license/LICENSE.base64.txt (Public Domain) - * HOMEPAGE: - * http://iharder.sourceforge.net/current/java/base64/ - -This product contains a modified portion of 'Webbit', an event based -WebSocket and HTTP server, which can be obtained at: - - * LICENSE: - * license/LICENSE.webbit.txt (BSD License) - * HOMEPAGE: - * https://github.com/joewalnes/webbit - -This product contains a modified portion of 'SLF4J', a simple logging -facade for Java, which can be obtained at: - - * LICENSE: - * license/LICENSE.slf4j.txt (MIT License) - * HOMEPAGE: - * https://www.slf4j.org/ - -This product contains a modified portion of 'Apache Harmony', an open source -Java SE, which can be obtained at: - - * NOTICE: - * license/NOTICE.harmony.txt - * LICENSE: - * license/LICENSE.harmony.txt (Apache License 2.0) - * HOMEPAGE: - * https://archive.apache.org/dist/harmony/ - -This product contains a modified portion of 'jbzip2', a Java bzip2 compression -and decompression library written by Matthew J. Francis. It can be obtained at: - - * LICENSE: - * license/LICENSE.jbzip2.txt (MIT License) - * HOMEPAGE: - * https://code.google.com/p/jbzip2/ - -This product contains a modified portion of 'libdivsufsort', a C API library to construct -the suffix array and the Burrows-Wheeler transformed string for any input string of -a constant-size alphabet written by Yuta Mori. It can be obtained at: - - * LICENSE: - * license/LICENSE.libdivsufsort.txt (MIT License) - * HOMEPAGE: - * https://github.com/y-256/libdivsufsort - -This product contains a modified portion of Nitsan Wakart's 'JCTools', Java Concurrency Tools for the JVM, - which can be obtained at: - - * LICENSE: - * license/LICENSE.jctools.txt (ASL2 License) - * HOMEPAGE: - * https://github.com/JCTools/JCTools - -This product optionally depends on 'JZlib', a re-implementation of zlib in -pure Java, which can be obtained at: - - * LICENSE: - * license/LICENSE.jzlib.txt (BSD style License) - * HOMEPAGE: - * http://www.jcraft.com/jzlib/ - -This product optionally depends on 'Compress-LZF', a Java library for encoding and -decoding data in LZF format, written by Tatu Saloranta. It can be obtained at: - - * LICENSE: - * license/LICENSE.compress-lzf.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/ning/compress - -This product optionally depends on 'lz4', a LZ4 Java compression -and decompression library written by Adrien Grand. It can be obtained at: - - * LICENSE: - * license/LICENSE.lz4.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/jpountz/lz4-java - -This product optionally depends on 'lzma-java', a LZMA Java compression -and decompression library, which can be obtained at: - - * LICENSE: - * license/LICENSE.lzma-java.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/jponge/lzma-java - -This product optionally depends on 'zstd-jni', a zstd-jni Java compression -and decompression library, which can be obtained at: - - * LICENSE: - * license/LICENSE.zstd-jni.txt (BSD) - * HOMEPAGE: - * https://github.com/luben/zstd-jni - -This product contains a modified portion of 'jfastlz', a Java port of FastLZ compression -and decompression library written by William Kinney. It can be obtained at: - - * LICENSE: - * license/LICENSE.jfastlz.txt (MIT License) - * HOMEPAGE: - * https://code.google.com/p/jfastlz/ - -This product contains a modified portion of and optionally depends on 'Protocol Buffers', Google's data -interchange format, which can be obtained at: - - * LICENSE: - * license/LICENSE.protobuf.txt (New BSD License) - * HOMEPAGE: - * https://github.com/google/protobuf - -This product optionally depends on 'Bouncy Castle Crypto APIs' to generate -a temporary self-signed X.509 certificate when the JVM does not provide the -equivalent functionality. It can be obtained at: - - * LICENSE: - * license/LICENSE.bouncycastle.txt (MIT License) - * HOMEPAGE: - * https://www.bouncycastle.org/ - -This product optionally depends on 'Snappy', a compression library produced -by Google Inc, which can be obtained at: - - * LICENSE: - * license/LICENSE.snappy.txt (New BSD License) - * HOMEPAGE: - * https://github.com/google/snappy - -This product optionally depends on 'JBoss Marshalling', an alternative Java -serialization API, which can be obtained at: - - * LICENSE: - * license/LICENSE.jboss-marshalling.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/jboss-remoting/jboss-marshalling - -This product optionally depends on 'Caliper', Google's micro- -benchmarking framework, which can be obtained at: - - * LICENSE: - * license/LICENSE.caliper.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/google/caliper - -This product optionally depends on 'Apache Commons Logging', a logging -framework, which can be obtained at: - - * LICENSE: - * license/LICENSE.commons-logging.txt (Apache License 2.0) - * HOMEPAGE: - * https://commons.apache.org/logging/ - -This product optionally depends on 'Apache Log4J', a logging framework, which -can be obtained at: - - * LICENSE: - * license/LICENSE.log4j.txt (Apache License 2.0) - * HOMEPAGE: - * https://logging.apache.org/log4j/ - -This product optionally depends on 'Aalto XML', an ultra-high performance -non-blocking XML processor, which can be obtained at: - - * LICENSE: - * license/LICENSE.aalto-xml.txt (Apache License 2.0) - * HOMEPAGE: - * https://wiki.fasterxml.com/AaltoHome - -This product contains a modified version of 'HPACK', a Java implementation of -the HTTP/2 HPACK algorithm written by Twitter. It can be obtained at: - - * LICENSE: - * license/LICENSE.hpack.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/twitter/hpack - -This product contains a modified version of 'HPACK', a Java implementation of -the HTTP/2 HPACK algorithm written by Cory Benfield. It can be obtained at: - - * LICENSE: - * license/LICENSE.hyper-hpack.txt (MIT License) - * HOMEPAGE: - * https://github.com/python-hyper/hpack/ - -This product contains a modified version of 'HPACK', a Java implementation of -the HTTP/2 HPACK algorithm written by Tatsuhiro Tsujikawa. It can be obtained at: - - * LICENSE: - * license/LICENSE.nghttp2-hpack.txt (MIT License) - * HOMEPAGE: - * https://github.com/nghttp2/nghttp2/ - -This product contains a modified portion of 'Apache Commons Lang', a Java library -provides utilities for the java.lang API, which can be obtained at: - - * LICENSE: - * license/LICENSE.commons-lang.txt (Apache License 2.0) - * HOMEPAGE: - * https://commons.apache.org/proper/commons-lang/ - - -This product contains the Maven wrapper scripts from 'Maven Wrapper', that provides an easy way to ensure a user has everything necessary to run the Maven build. - - * LICENSE: - * license/LICENSE.mvn-wrapper.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/takari/maven-wrapper - -This product contains the dnsinfo.h header file, that provides a way to retrieve the system DNS configuration on MacOS. -This private header is also used by Apple's open source - mDNSResponder (https://opensource.apple.com/tarballs/mDNSResponder/). - - * LICENSE: - * license/LICENSE.dnsinfo.txt (Apple Public Source License 2.0) - * HOMEPAGE: - * https://www.opensource.apple.com/source/configd/configd-453.19/dnsinfo/dnsinfo.h - -This product optionally depends on 'Brotli4j', Brotli compression and -decompression for Java., which can be obtained at: - - * LICENSE: - * license/LICENSE.brotli4j.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/hyperxpro/Brotli4j diff --git a/libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/StreamManager.java b/libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/StreamManager.java index cdb83f032356a..3bee05f0110d1 100644 --- a/libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/StreamManager.java +++ b/libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/StreamManager.java @@ -34,7 +34,7 @@ public interface StreamManager extends AutoCloseable { * @return A StreamTicket that can be used to access the stream * @throws IllegalArgumentException if producer is null or parentTaskId is invalid */ - StreamTicket registerStream(StreamProducer producer, TaskId parentTaskId); + StreamTicket registerStream(StreamProducer producer, TaskId parentTaskId); /** * Creates a stream reader for consuming Arrow data using a valid ticket. @@ -46,7 +46,7 @@ public interface StreamManager extends AutoCloseable { * @throws IllegalArgumentException if the ticket is invalid * @throws IllegalStateException if the stream has been cancelled or closed */ - StreamReader getStreamReader(StreamTicket ticket); + StreamReader getStreamReader(StreamTicket ticket); /** * Gets the StreamTicketFactory instance associated with this StreamManager. diff --git a/libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/StreamProducer.java b/libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/StreamProducer.java index c5cd6f16adfdd..6ca5b8944319b 100644 --- a/libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/StreamProducer.java +++ b/libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/StreamProducer.java @@ -8,8 +8,6 @@ package org.opensearch.arrow.spi; -import org.apache.arrow.memory.BufferAllocator; -import org.apache.arrow.vector.VectorSchemaRoot; import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.core.tasks.TaskId; @@ -77,7 +75,7 @@ * @see StreamReader */ @ExperimentalApi -public interface StreamProducer extends Closeable { +public interface StreamProducer extends Closeable { /** * Creates a VectorSchemaRoot that defines the schema for this stream. This schema will be used @@ -86,7 +84,7 @@ public interface StreamProducer extends Closeable { * @param allocator The allocator to use for creating vectors * @return A new VectorSchemaRoot instance */ - VectorSchemaRoot createRoot(BufferAllocator allocator); + VectorRoot createRoot(Allocator allocator); /** * Creates a job that will produce the stream data in batches. The job will populate @@ -95,7 +93,7 @@ public interface StreamProducer extends Closeable { * @param allocator The allocator to use for any additional memory allocations * @return A new BatchedJob instance */ - BatchedJob createJob(BufferAllocator allocator); + BatchedJob createJob(Allocator allocator); /** * Provides an estimate of the total number of rows that will be produced. @@ -113,7 +111,7 @@ public interface StreamProducer extends Closeable { /** * BatchedJob interface for producing stream data in batches. */ - interface BatchedJob { + interface BatchedJob { /** * Executes the batch processing job. Implementations should populate the root with data @@ -122,7 +120,7 @@ interface BatchedJob { * @param root The VectorSchemaRoot to populate with data * @param flushSignal Signal to coordinate with consumers */ - void run(VectorSchemaRoot root, FlushSignal flushSignal); + void run(VectorRoot root, FlushSignal flushSignal); /** * Called to signal producer when the job is canceled. diff --git a/libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/StreamReader.java b/libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/StreamReader.java index b258652988b96..74ad3875238a9 100644 --- a/libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/StreamReader.java +++ b/libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/StreamReader.java @@ -8,7 +8,6 @@ package org.opensearch.arrow.spi; -import org.apache.arrow.vector.VectorSchemaRoot; import org.opensearch.common.annotation.ExperimentalApi; import java.io.Closeable; @@ -37,7 +36,7 @@ * @see StreamProducer */ @ExperimentalApi -public interface StreamReader extends Closeable { +public interface StreamReader extends Closeable { /** * Blocking request to load next batch into root. @@ -52,5 +51,5 @@ public interface StreamReader extends Closeable { * * @return the VectorSchemaRoot */ - VectorSchemaRoot getRoot(); + VectorRoot getRoot(); } diff --git a/libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/package-info.java b/libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/package-info.java index d075ecaa764bb..14227d69da8b0 100644 --- a/libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/package-info.java +++ b/libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/package-info.java @@ -7,6 +7,6 @@ */ /** - * Contains Apache Arrow related classes and Stream generic interfaces + * Contains Stream producer, consumer and manager generic interfaces */ package org.opensearch.arrow.spi; diff --git a/plugins/arrow-flight-rpc/build.gradle b/plugins/arrow-flight-rpc/build.gradle new file mode 100644 index 0000000000000..f3a166bc39ae7 --- /dev/null +++ b/plugins/arrow-flight-rpc/build.gradle @@ -0,0 +1,301 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +apply plugin: 'opensearch.internal-cluster-test' + +opensearchplugin { + description = 'Arrow flight based Stream implementation' + classname = 'org.opensearch.arrow.flight.bootstrap.FlightStreamPlugin' +} + +dependencies { + implementation project(':libs:opensearch-arrow-spi') + compileOnly 'org.checkerframework:checker-qual:3.44.0' + + implementation "org.apache.arrow:arrow-vector:${versions.arrow}" + implementation "org.apache.arrow:arrow-format:${versions.arrow}" + implementation "org.apache.arrow:flight-core:${versions.arrow}" + implementation "org.apache.arrow:arrow-memory-core:${versions.arrow}" + + runtimeOnly "org.apache.arrow:arrow-memory-netty:${versions.arrow}" + runtimeOnly "org.apache.arrow:arrow-memory-netty-buffer-patch:${versions.arrow}" + + implementation "io.netty:netty-buffer:${versions.netty}" + implementation "io.netty:netty-common:${versions.netty}" + + implementation "io.netty:netty-codec:${versions.netty}" + implementation "io.netty:netty-codec-http:${versions.netty}" + implementation "io.netty:netty-codec-http2:${versions.netty}" + implementation "io.netty:netty-handler:${versions.netty}" + implementation "io.netty:netty-resolver:${versions.netty}" + implementation "io.netty:netty-transport:${versions.netty}" + implementation "io.netty:netty-transport-native-unix-common:${versions.netty}" + implementation "io.netty:netty-transport-classes-epoll:${versions.netty}" + implementation "io.netty:netty-tcnative-classes:2.0.66.Final" + + implementation "org.slf4j:slf4j-api:${versions.slf4j}" + runtimeOnly "com.google.flatbuffers:flatbuffers-java:${versions.flatbuffers}" + runtimeOnly "commons-codec:commons-codec:${versions.commonscodec}" + + implementation "io.grpc:grpc-api:${versions.grpc}" + runtimeOnly "io.grpc:grpc-core:${versions.grpc}" + implementation "io.grpc:grpc-stub:${versions.grpc}" + implementation "io.grpc:grpc-netty:${versions.grpc}" + + runtimeOnly group: 'com.google.code.findbugs', name: 'jsr305', version: '3.0.2' + compileOnly 'org.immutables:value:2.10.1' + annotationProcessor 'org.immutables:value:2.10.1' + + runtimeOnly 'io.perfmark:perfmark-api:0.27.0' + runtimeOnly 'org.apache.parquet:parquet-arrow:1.13.1' + runtimeOnly "io.grpc:grpc-protobuf-lite:${versions.grpc}" + runtimeOnly "io.grpc:grpc-protobuf:${versions.grpc}" + implementation "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" + implementation "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" + + runtimeOnly "com.google.guava:failureaccess:1.0.1" + compileOnly "com.google.errorprone:error_prone_annotations:2.31.0" + runtimeOnly('com.google.guava:guava:33.3.1-jre') { + attributes { + attribute(Attribute.of('org.gradle.jvm.environment', String), 'standard-jvm') + } + } +} + +tasks.named('test').configure { + jacoco { + excludes = ['org/apache/arrow/flight/**'] + } +} + +test { + systemProperty 'io.netty.allocator.numDirectArenas', '1' + systemProperty 'io.netty.noUnsafe', 'false' + systemProperty 'io.netty.tryUnsafe', 'true' + systemProperty 'io.netty.tryReflectionSetAccessible', 'true' +} + +internalClusterTest { + systemProperty 'io.netty.allocator.numDirectArenas', '1' + systemProperty 'io.netty.noUnsafe', 'false' + systemProperty 'io.netty.tryUnsafe', 'true' + systemProperty 'io.netty.tryReflectionSetAccessible', 'true' +} + +spotless { + java { + // Files to exclude from formatting + targetExclude 'src/main/java/org/apache/arrow/flight/**/*.java' + } +} + + +tasks.named("dependencyLicenses").configure { + mapping from: /netty-.*/, to: 'netty' + mapping from: /grpc-.*/, to: 'grpc' + mapping from: /jackson-.*/, to: 'jackson' +} + +tasks.named('forbiddenApisMain').configure { + replaceSignatureFiles 'jdk-signatures' + + excludes = [ + 'org/apache/arrow/flight/OSFlightServer$Builder.class', + 'org/apache/arrow/flight/OSFlightClient$Builder.class', + 'org/opensearch/flight/bootstrap/server/ServerConfig$Netty4Configs.class', + 'org/opensearch/flight/bootstrap/server/ServerConfig.class', + 'org/opensearch/flight/bootstrap/tls/DefaultSslContextProvider.class', + 'org/apache/arrow/flight/OpenSearchFlightClient$Builder.class' + ] +} + + +tasks.named('thirdPartyAudit').configure { + ignoreMissingClasses( + 'com.google.gson.stream.JsonReader', + 'com.google.gson.stream.JsonToken', + 'org.apache.parquet.schema.GroupType', + 'com.google.rpc.Status', + 'com.google.rpc.Status$Builder', + // Parquet Schema classes + 'org.apache.parquet.schema.LogicalTypeAnnotation', + 'org.apache.parquet.schema.LogicalTypeAnnotation$DateLogicalTypeAnnotation', + 'org.apache.parquet.schema.LogicalTypeAnnotation$DecimalLogicalTypeAnnotation', + 'org.apache.parquet.schema.LogicalTypeAnnotation$IntLogicalTypeAnnotation', + 'org.apache.parquet.schema.LogicalTypeAnnotation$IntervalLogicalTypeAnnotation', + 'org.apache.parquet.schema.LogicalTypeAnnotation$ListLogicalTypeAnnotation', + 'org.apache.parquet.schema.LogicalTypeAnnotation$LogicalTypeAnnotationVisitor', + 'org.apache.parquet.schema.LogicalTypeAnnotation$StringLogicalTypeAnnotation', + 'org.apache.parquet.schema.LogicalTypeAnnotation$TimeLogicalTypeAnnotation', + 'org.apache.parquet.schema.LogicalTypeAnnotation$TimeUnit', + 'org.apache.parquet.schema.LogicalTypeAnnotation$TimestampLogicalTypeAnnotation', + 'org.apache.parquet.schema.MessageType', + 'org.apache.parquet.schema.OriginalType', + 'org.apache.parquet.schema.PrimitiveType', + 'org.apache.parquet.schema.PrimitiveType$PrimitiveTypeName', + 'org.apache.parquet.schema.PrimitiveType$PrimitiveTypeNameConverter', + 'org.apache.parquet.schema.Type', + 'org.apache.parquet.schema.Type$Repetition', + 'org.apache.parquet.schema.Types', + 'org.apache.parquet.schema.Types$BaseListBuilder', + 'org.apache.parquet.schema.Types$GroupBuilder', + 'org.apache.parquet.schema.Types$ListBuilder', + 'org.apache.parquet.schema.Types$PrimitiveBuilder', + + 'com.aayushatharva.brotli4j.Brotli4jLoader', + 'com.aayushatharva.brotli4j.decoder.DecoderJNI$Status', + 'com.aayushatharva.brotli4j.decoder.DecoderJNI$Wrapper', + 'com.aayushatharva.brotli4j.encoder.BrotliEncoderChannel', + 'com.aayushatharva.brotli4j.encoder.Encoder$Mode', + 'com.aayushatharva.brotli4j.encoder.Encoder$Parameters', + // classes are missing + + // from io.netty.logging.CommonsLoggerFactory (netty) + 'org.apache.commons.logging.Log', + 'org.apache.commons.logging.LogFactory', + + 'org.slf4j.impl.StaticLoggerBinder', + 'org.slf4j.impl.StaticMDCBinder', + 'org.slf4j.impl.StaticMarkerBinder', + + // from Log4j (deliberate, Netty will fallback to Log4j 2) + 'org.apache.log4j.Level', + 'org.apache.log4j.Logger', + + // from io.netty.handler.ssl.util.BouncyCastleSelfSignedCertGenerator (netty) + 'org.bouncycastle.cert.X509v3CertificateBuilder', + 'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter', + 'org.bouncycastle.operator.jcajce.JcaContentSignerBuilder', + 'org.bouncycastle.openssl.PEMEncryptedKeyPair', + 'org.bouncycastle.openssl.PEMParser', + 'org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter', + 'org.bouncycastle.openssl.jcajce.JceOpenSSLPKCS8DecryptorProviderBuilder', + 'org.bouncycastle.openssl.jcajce.JcePEMDecryptorProviderBuilder', + 'org.bouncycastle.pkcs.PKCS8EncryptedPrivateKeyInfo', + + // from io.netty.handler.ssl.JettyNpnSslEngine (netty) + 'org.eclipse.jetty.npn.NextProtoNego$ClientProvider', + 'org.eclipse.jetty.npn.NextProtoNego$ServerProvider', + 'org.eclipse.jetty.npn.NextProtoNego', + + // from io.netty.handler.codec.marshalling.ChannelBufferByteInput (netty) + 'org.jboss.marshalling.ByteInput', + + // from io.netty.handler.codec.marshalling.ChannelBufferByteOutput (netty) + 'org.jboss.marshalling.ByteOutput', + + // from io.netty.handler.codec.marshalling.CompatibleMarshallingEncoder (netty) + 'org.jboss.marshalling.Marshaller', + + // from io.netty.handler.codec.marshalling.ContextBoundUnmarshallerProvider (netty) + 'org.jboss.marshalling.MarshallerFactory', + 'org.jboss.marshalling.MarshallingConfiguration', + 'org.jboss.marshalling.Unmarshaller', + + 'com.google.protobuf.nano.CodedOutputByteBufferNano', + 'com.google.protobuf.nano.MessageNano', + 'com.ning.compress.BufferRecycler', + 'com.ning.compress.lzf.ChunkDecoder', + 'com.ning.compress.lzf.ChunkEncoder', + 'com.ning.compress.lzf.LZFChunk', + 'com.ning.compress.lzf.LZFEncoder', + 'com.ning.compress.lzf.util.ChunkDecoderFactory', + 'com.ning.compress.lzf.util.ChunkEncoderFactory', + 'lzma.sdk.lzma.Encoder', + 'net.jpountz.lz4.LZ4Compressor', + 'net.jpountz.lz4.LZ4Factory', + 'net.jpountz.lz4.LZ4FastDecompressor', + 'net.jpountz.xxhash.XXHash32', + 'net.jpountz.xxhash.XXHashFactory', + 'org.eclipse.jetty.alpn.ALPN$ClientProvider', + 'org.eclipse.jetty.alpn.ALPN$ServerProvider', + 'org.eclipse.jetty.alpn.ALPN', + + 'org.conscrypt.AllocatedBuffer', + 'org.conscrypt.BufferAllocator', + 'org.conscrypt.Conscrypt', + 'org.conscrypt.HandshakeListener', + + 'reactor.blockhound.BlockHound$Builder', + 'reactor.blockhound.integration.BlockHoundIntegration', + + 'com.google.protobuf.util.Timestamps' + ) + ignoreViolations( + // Guava internal classes + 'com.google.common.cache.Striped64', + 'com.google.common.cache.Striped64$1', + 'com.google.common.cache.Striped64$Cell', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$1', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$2', + 'com.google.common.hash.Striped64', + 'com.google.common.hash.Striped64$1', + 'com.google.common.hash.Striped64$Cell', + 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator', + 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1', + 'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper', + 'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper$1', + + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$1', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$2', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$3', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$4', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$5', + 'io.netty.util.internal.PlatformDependent0', + 'io.netty.util.internal.PlatformDependent0$1', + 'io.netty.util.internal.PlatformDependent0$2', + 'io.netty.util.internal.PlatformDependent0$3', + 'io.netty.util.internal.PlatformDependent0$4', + 'io.netty.util.internal.PlatformDependent0$6', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseLinkedQueueConsumerNodeRef', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseLinkedQueueProducerNodeRef', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueColdProducerFields', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueConsumerFields', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueProducerFields', + 'io.netty.util.internal.shaded.org.jctools.queues.LinkedQueueNode', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueProducerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerLimitField', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeAccess', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeLongArrayAccess', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess', + 'io.netty.util.internal.PlatformDependent0', + 'io.netty.util.internal.PlatformDependent0$1', + 'io.netty.util.internal.PlatformDependent0$2', + 'io.netty.util.internal.PlatformDependent0$3', + 'io.netty.util.internal.PlatformDependent0$4', + 'io.netty.util.internal.PlatformDependent0$6', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseLinkedQueueConsumerNodeRef', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseLinkedQueueProducerNodeRef', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueColdProducerFields', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueConsumerFields', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueProducerFields', + 'io.netty.util.internal.shaded.org.jctools.queues.LinkedQueueNode', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueProducerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerLimitField', + 'io.netty.util.internal.shaded.org.jctools.queues.unpadded.MpscUnpaddedArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.unpadded.MpscUnpaddedArrayQueueProducerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.unpadded.MpscUnpaddedArrayQueueProducerLimitField', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeAccess', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeLongArrayAccess', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess', + 'org.apache.arrow.memory.util.MemoryUtil', + 'org.apache.arrow.memory.util.MemoryUtil$1' + + ) +} diff --git a/plugins/arrow-flight-rpc/licenses/arrow-format-18.1.0.jar.sha1 b/plugins/arrow-flight-rpc/licenses/arrow-format-18.1.0.jar.sha1 new file mode 100644 index 0000000000000..6372bcd89eefd --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/arrow-format-18.1.0.jar.sha1 @@ -0,0 +1 @@ +9d356b6f20620f5619ff85b174f97ae507df4997 \ No newline at end of file diff --git a/libs/arrow-spi/licenses/arrow-format-LICENSE.txt b/plugins/arrow-flight-rpc/licenses/arrow-format-LICENSE.txt similarity index 100% rename from libs/arrow-spi/licenses/arrow-format-LICENSE.txt rename to plugins/arrow-flight-rpc/licenses/arrow-format-LICENSE.txt diff --git a/libs/arrow-spi/licenses/arrow-format-NOTICE.txt b/plugins/arrow-flight-rpc/licenses/arrow-format-NOTICE.txt similarity index 100% rename from libs/arrow-spi/licenses/arrow-format-NOTICE.txt rename to plugins/arrow-flight-rpc/licenses/arrow-format-NOTICE.txt diff --git a/plugins/arrow-flight-rpc/licenses/arrow-memory-core-18.1.0.jar.sha1 b/plugins/arrow-flight-rpc/licenses/arrow-memory-core-18.1.0.jar.sha1 new file mode 100644 index 0000000000000..1a4da42973bfe --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/arrow-memory-core-18.1.0.jar.sha1 @@ -0,0 +1 @@ +35f4853d512f06759759b40b53bac850867886f8 \ No newline at end of file diff --git a/libs/arrow-spi/licenses/arrow-memory-core-LICENSE.txt b/plugins/arrow-flight-rpc/licenses/arrow-memory-core-LICENSE.txt similarity index 100% rename from libs/arrow-spi/licenses/arrow-memory-core-LICENSE.txt rename to plugins/arrow-flight-rpc/licenses/arrow-memory-core-LICENSE.txt diff --git a/libs/arrow-spi/licenses/arrow-memory-core-NOTICE.txt b/plugins/arrow-flight-rpc/licenses/arrow-memory-core-NOTICE.txt similarity index 100% rename from libs/arrow-spi/licenses/arrow-memory-core-NOTICE.txt rename to plugins/arrow-flight-rpc/licenses/arrow-memory-core-NOTICE.txt diff --git a/plugins/arrow-flight-rpc/licenses/arrow-memory-netty-18.1.0.jar.sha1 b/plugins/arrow-flight-rpc/licenses/arrow-memory-netty-18.1.0.jar.sha1 new file mode 100644 index 0000000000000..291d435138e30 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/arrow-memory-netty-18.1.0.jar.sha1 @@ -0,0 +1 @@ +9e9e08d0b548d2c02c632e5daaf176e588810d22 \ No newline at end of file diff --git a/libs/arrow-spi/licenses/arrow-memory-netty-LICENSE.txt b/plugins/arrow-flight-rpc/licenses/arrow-memory-netty-LICENSE.txt similarity index 100% rename from libs/arrow-spi/licenses/arrow-memory-netty-LICENSE.txt rename to plugins/arrow-flight-rpc/licenses/arrow-memory-netty-LICENSE.txt diff --git a/libs/arrow-spi/licenses/arrow-memory-netty-NOTICE.txt b/plugins/arrow-flight-rpc/licenses/arrow-memory-netty-NOTICE.txt similarity index 100% rename from libs/arrow-spi/licenses/arrow-memory-netty-NOTICE.txt rename to plugins/arrow-flight-rpc/licenses/arrow-memory-netty-NOTICE.txt diff --git a/plugins/arrow-flight-rpc/licenses/arrow-memory-netty-buffer-patch-18.1.0.jar.sha1 b/plugins/arrow-flight-rpc/licenses/arrow-memory-netty-buffer-patch-18.1.0.jar.sha1 new file mode 100644 index 0000000000000..40c7b2992d715 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/arrow-memory-netty-buffer-patch-18.1.0.jar.sha1 @@ -0,0 +1 @@ +86c8fbdb6ab220603ea3a215f48a7f793ac6a08d \ No newline at end of file diff --git a/libs/arrow-spi/licenses/arrow-memory-netty-buffer-patch-LICENSE.txt b/plugins/arrow-flight-rpc/licenses/arrow-memory-netty-buffer-patch-LICENSE.txt similarity index 100% rename from libs/arrow-spi/licenses/arrow-memory-netty-buffer-patch-LICENSE.txt rename to plugins/arrow-flight-rpc/licenses/arrow-memory-netty-buffer-patch-LICENSE.txt diff --git a/libs/arrow-spi/licenses/arrow-memory-netty-buffer-patch-NOTICE.txt b/plugins/arrow-flight-rpc/licenses/arrow-memory-netty-buffer-patch-NOTICE.txt similarity index 100% rename from libs/arrow-spi/licenses/arrow-memory-netty-buffer-patch-NOTICE.txt rename to plugins/arrow-flight-rpc/licenses/arrow-memory-netty-buffer-patch-NOTICE.txt diff --git a/plugins/arrow-flight-rpc/licenses/arrow-vector-18.1.0.jar.sha1 b/plugins/arrow-flight-rpc/licenses/arrow-vector-18.1.0.jar.sha1 new file mode 100644 index 0000000000000..d526f82b6f06e --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/arrow-vector-18.1.0.jar.sha1 @@ -0,0 +1 @@ +b1fb77f4ef36fd52afe480ba12b7da77367eb88c \ No newline at end of file diff --git a/libs/arrow-spi/licenses/arrow-vector-LICENSE.txt b/plugins/arrow-flight-rpc/licenses/arrow-vector-LICENSE.txt similarity index 100% rename from libs/arrow-spi/licenses/arrow-vector-LICENSE.txt rename to plugins/arrow-flight-rpc/licenses/arrow-vector-LICENSE.txt diff --git a/libs/arrow-spi/licenses/arrow-vector-NOTICE.txt b/plugins/arrow-flight-rpc/licenses/arrow-vector-NOTICE.txt similarity index 100% rename from libs/arrow-spi/licenses/arrow-vector-NOTICE.txt rename to plugins/arrow-flight-rpc/licenses/arrow-vector-NOTICE.txt diff --git a/libs/arrow-spi/licenses/commons-codec-1.16.1.jar.sha1 b/plugins/arrow-flight-rpc/licenses/commons-codec-1.16.1.jar.sha1 similarity index 100% rename from libs/arrow-spi/licenses/commons-codec-1.16.1.jar.sha1 rename to plugins/arrow-flight-rpc/licenses/commons-codec-1.16.1.jar.sha1 diff --git a/libs/arrow-spi/licenses/commons-codec-LICENSE.txt b/plugins/arrow-flight-rpc/licenses/commons-codec-LICENSE.txt similarity index 100% rename from libs/arrow-spi/licenses/commons-codec-LICENSE.txt rename to plugins/arrow-flight-rpc/licenses/commons-codec-LICENSE.txt diff --git a/libs/arrow-spi/licenses/commons-codec-NOTICE.txt b/plugins/arrow-flight-rpc/licenses/commons-codec-NOTICE.txt similarity index 100% rename from libs/arrow-spi/licenses/commons-codec-NOTICE.txt rename to plugins/arrow-flight-rpc/licenses/commons-codec-NOTICE.txt diff --git a/plugins/arrow-flight-rpc/licenses/failureaccess-1.0.1.jar.sha1 b/plugins/arrow-flight-rpc/licenses/failureaccess-1.0.1.jar.sha1 new file mode 100644 index 0000000000000..4798b37e20691 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/failureaccess-1.0.1.jar.sha1 @@ -0,0 +1 @@ +1dcf1de382a0bf95a3d8b0849546c88bac1292c9 \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/failureaccess-LICENSE.txt b/plugins/arrow-flight-rpc/licenses/failureaccess-LICENSE.txt new file mode 100644 index 0000000000000..7a4a3ea2424c0 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/failureaccess-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/libs/arrow-spi/licenses/flatbuffers-java-NOTICE.txt b/plugins/arrow-flight-rpc/licenses/failureaccess-NOTICE.txt similarity index 100% rename from libs/arrow-spi/licenses/flatbuffers-java-NOTICE.txt rename to plugins/arrow-flight-rpc/licenses/failureaccess-NOTICE.txt diff --git a/libs/arrow-spi/licenses/flatbuffers-java-2.0.0.jar.sha1 b/plugins/arrow-flight-rpc/licenses/flatbuffers-java-2.0.0.jar.sha1 similarity index 100% rename from libs/arrow-spi/licenses/flatbuffers-java-2.0.0.jar.sha1 rename to plugins/arrow-flight-rpc/licenses/flatbuffers-java-2.0.0.jar.sha1 diff --git a/libs/arrow-spi/licenses/flatbuffers-java-LICENSE.txt b/plugins/arrow-flight-rpc/licenses/flatbuffers-java-LICENSE.txt similarity index 100% rename from libs/arrow-spi/licenses/flatbuffers-java-LICENSE.txt rename to plugins/arrow-flight-rpc/licenses/flatbuffers-java-LICENSE.txt diff --git a/libs/arrow-spi/licenses/slf4j-api-NOTICE.txt b/plugins/arrow-flight-rpc/licenses/flatbuffers-java-NOTICE.txt similarity index 100% rename from libs/arrow-spi/licenses/slf4j-api-NOTICE.txt rename to plugins/arrow-flight-rpc/licenses/flatbuffers-java-NOTICE.txt diff --git a/plugins/arrow-flight-rpc/licenses/flight-core-18.1.0.jar.sha1 b/plugins/arrow-flight-rpc/licenses/flight-core-18.1.0.jar.sha1 new file mode 100644 index 0000000000000..fc2e34539cf04 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/flight-core-18.1.0.jar.sha1 @@ -0,0 +1 @@ +82494895fcb0656967680442f63ce1214e532d52 \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/flight-core-LICENSE.txt b/plugins/arrow-flight-rpc/licenses/flight-core-LICENSE.txt new file mode 100644 index 0000000000000..7bb1330a1002b --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/flight-core-LICENSE.txt @@ -0,0 +1,2261 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- + +src/arrow/util (some portions): Apache 2.0, and 3-clause BSD + +Some portions of this module are derived from code in the Chromium project, +copyright (c) Google inc and (c) The Chromium Authors and licensed under the +Apache 2.0 License or the under the 3-clause BSD license: + + Copyright (c) 2013 The Chromium Authors. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +This project includes code from Daniel Lemire's FrameOfReference project. + +https://github.com/lemire/FrameOfReference/blob/6ccaf9e97160f9a3b299e23a8ef739e711ef0c71/src/bpacking.cpp +https://github.com/lemire/FrameOfReference/blob/146948b6058a976bc7767262ad3a2ce201486b93/scripts/turbopacking64.py + +Copyright: 2013 Daniel Lemire +Home page: http://lemire.me/en/ +Project page: https://github.com/lemire/FrameOfReference +License: Apache License Version 2.0 http://www.apache.org/licenses/LICENSE-2.0 + +-------------------------------------------------------------------------------- + +This project includes code from the TensorFlow project + +Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +This project includes code from the NumPy project. + +https://github.com/numpy/numpy/blob/e1f191c46f2eebd6cb892a4bfe14d9dd43a06c4e/numpy/core/src/multiarray/multiarraymodule.c#L2910 + +https://github.com/numpy/numpy/blob/68fd82271b9ea5a9e50d4e761061dfcca851382a/numpy/core/src/multiarray/datetime.c + +Copyright (c) 2005-2017, NumPy Developers. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the NumPy Developers nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +This project includes code from the Boost project + +Boost Software License - Version 1.0 - August 17th, 2003 + +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: + +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- + +This project includes code from the FlatBuffers project + +Copyright 2014 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +This project includes code from the tslib project + +Copyright 2015 Microsoft Corporation. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +This project includes code from the jemalloc project + +https://github.com/jemalloc/jemalloc + +Copyright (C) 2002-2017 Jason Evans . +All rights reserved. +Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved. +Copyright (C) 2009-2017 Facebook, Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +1. Redistributions of source code must retain the above copyright notice(s), + this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice(s), + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY EXPRESS +OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------------------- + +This project includes code from the Go project, BSD 3-clause license + PATENTS +weak patent termination clause +(https://github.com/golang/go/blob/master/PATENTS). + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +This project includes code from the hs2client + +https://github.com/cloudera/hs2client + +Copyright 2016 Cloudera Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +The script ci/scripts/util_wait_for_it.sh has the following license + +Copyright (c) 2016 Giles Hall + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- + +The script r/configure has the following license (MIT) + +Copyright (c) 2017, Jeroen Ooms and Jim Hester + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- + +cpp/src/arrow/util/logging.cc, cpp/src/arrow/util/logging.h and +cpp/src/arrow/util/logging-test.cc are adapted from +Ray Project (https://github.com/ray-project/ray) (Apache 2.0). + +Copyright (c) 2016 Ray Project (https://github.com/ray-project/ray) + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- +The files cpp/src/arrow/vendored/datetime/date.h, cpp/src/arrow/vendored/datetime/tz.h, +cpp/src/arrow/vendored/datetime/tz_private.h, cpp/src/arrow/vendored/datetime/ios.h, +cpp/src/arrow/vendored/datetime/ios.mm, +cpp/src/arrow/vendored/datetime/tz.cpp are adapted from +Howard Hinnant's date library (https://github.com/HowardHinnant/date) +It is licensed under MIT license. + +The MIT License (MIT) +Copyright (c) 2015, 2016, 2017 Howard Hinnant +Copyright (c) 2016 Adrian Colomitchi +Copyright (c) 2017 Florian Dang +Copyright (c) 2017 Paul Thompson +Copyright (c) 2018 Tomasz Kamiński + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- + +The file cpp/src/arrow/util/utf8.h includes code adapted from the page + https://bjoern.hoehrmann.de/utf-8/decoder/dfa/ +with the following license (MIT) + +Copyright (c) 2008-2009 Bjoern Hoehrmann + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- + +The files in cpp/src/arrow/vendored/xxhash/ have the following license +(BSD 2-Clause License) + +xxHash Library +Copyright (c) 2012-2014, Yann Collet +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +You can contact the author at : +- xxHash homepage: http://www.xxhash.com +- xxHash source repository : https://github.com/Cyan4973/xxHash + +-------------------------------------------------------------------------------- + +The files in cpp/src/arrow/vendored/double-conversion/ have the following license +(BSD 3-Clause License) + +Copyright 2006-2011, the V8 project authors. All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +The files in cpp/src/arrow/vendored/uriparser/ have the following license +(BSD 3-Clause License) + +uriparser - RFC 3986 URI parsing library + +Copyright (C) 2007, Weijia Song +Copyright (C) 2007, Sebastian Pipping +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + * Redistributions of source code must retain the above + copyright notice, this list of conditions and the following + disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials + provided with the distribution. + + * Neither the name of the nor the names of its + contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +The files under dev/tasks/conda-recipes have the following license + +BSD 3-clause license +Copyright (c) 2015-2018, conda-forge +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +The files in cpp/src/arrow/vendored/utfcpp/ have the following license + +Copyright 2006-2018 Nemanja Trifunovic + +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: + +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- + +This project includes code from Apache Kudu. + + * cpp/cmake_modules/CompilerInfo.cmake is based on Kudu's cmake_modules/CompilerInfo.cmake + +Copyright: 2016 The Apache Software Foundation. +Home page: https://kudu.apache.org/ +License: http://www.apache.org/licenses/LICENSE-2.0 + +-------------------------------------------------------------------------------- + +This project includes code from Apache Impala (incubating), formerly +Impala. The Impala code and rights were donated to the ASF as part of the +Incubator process after the initial code imports into Apache Parquet. + +Copyright: 2012 Cloudera, Inc. +Copyright: 2016 The Apache Software Foundation. +Home page: http://impala.apache.org/ +License: http://www.apache.org/licenses/LICENSE-2.0 + +-------------------------------------------------------------------------------- + +This project includes code from Apache Aurora. + +* dev/release/{release,changelog,release-candidate} are based on the scripts from + Apache Aurora + +Copyright: 2016 The Apache Software Foundation. +Home page: https://aurora.apache.org/ +License: http://www.apache.org/licenses/LICENSE-2.0 + +-------------------------------------------------------------------------------- + +This project includes code from the Google styleguide. + +* cpp/build-support/cpplint.py is based on the scripts from the Google styleguide. + +Copyright: 2009 Google Inc. All rights reserved. +Homepage: https://github.com/google/styleguide +License: 3-clause BSD + +-------------------------------------------------------------------------------- + +This project includes code from Snappy. + +* cpp/cmake_modules/{SnappyCMakeLists.txt,SnappyConfig.h} are based on code + from Google's Snappy project. + +Copyright: 2009 Google Inc. All rights reserved. +Homepage: https://github.com/google/snappy +License: 3-clause BSD + +-------------------------------------------------------------------------------- + +This project includes code from the manylinux project. + +* python/manylinux1/scripts/{build_python.sh,python-tag-abi-tag.py, + requirements.txt} are based on code from the manylinux project. + +Copyright: 2016 manylinux +Homepage: https://github.com/pypa/manylinux +License: The MIT License (MIT) + +-------------------------------------------------------------------------------- + +This project includes code from the cymove project: + +* python/pyarrow/includes/common.pxd includes code from the cymove project + +The MIT License (MIT) +Copyright (c) 2019 Omer Ozarslan + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE +OR OTHER DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- + +The projects includes code from the Ursabot project under the dev/archery +directory. + +License: BSD 2-Clause + +Copyright 2019 RStudio, Inc. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +This project include code from mingw-w64. + +* cpp/src/arrow/util/cpu-info.cc has a polyfill for mingw-w64 < 5 + +Copyright (c) 2009 - 2013 by the mingw-w64 project +Homepage: https://mingw-w64.org +License: Zope Public License (ZPL) Version 2.1. + +--------------------------------------------------------------------------------- + +This project include code from Google's Asylo project. + +* cpp/src/arrow/result.h is based on status_or.h + +Copyright (c) Copyright 2017 Asylo authors +Homepage: https://asylo.dev/ +License: Apache 2.0 + +-------------------------------------------------------------------------------- + +This project includes code from Google's protobuf project + +* cpp/src/arrow/result.h ARROW_ASSIGN_OR_RAISE is based off ASSIGN_OR_RETURN +* cpp/src/arrow/util/bit_stream_utils.h contains code from wire_format_lite.h + +Copyright 2008 Google Inc. All rights reserved. +Homepage: https://developers.google.com/protocol-buffers/ +License: + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Code generated by the Protocol Buffer compiler is owned by the owner +of the input file used when generating it. This code is not +standalone and requires a support library to be linked with it. This +support library is itself covered by the above license. + +-------------------------------------------------------------------------------- + +3rdparty dependency LLVM is statically linked in certain binary distributions. +Additionally some sections of source code have been derived from sources in LLVM +and have been clearly labeled as such. LLVM has the following license: + +============================================================================== +The LLVM Project is under the Apache License v2.0 with LLVM Exceptions: +============================================================================== + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +---- LLVM Exceptions to the Apache 2.0 License ---- + +As an exception, if, as a result of your compiling your source code, portions +of this Software are embedded into an Object form of such source code, you +may redistribute such embedded portions in such Object form without complying +with the conditions of Sections 4(a), 4(b) and 4(d) of the License. + +In addition, if you combine or link compiled forms of this Software with +software that is licensed under the GPLv2 ("Combined Software") and if a +court of competent jurisdiction determines that the patent provision (Section +3), the indemnity provision (Section 9) or other Section of the License +conflicts with the conditions of the GPLv2, you may retroactively and +prospectively choose to deem waived or otherwise exclude such Section(s) of +the License, but only in their entirety and only with respect to the Combined +Software. + +============================================================================== +Software from third parties included in the LLVM Project: +============================================================================== +The LLVM Project contains third party software which is under different license +terms. All such code will be identified clearly using at least one of two +mechanisms: +1) It will be in a separate directory tree with its own `LICENSE.txt` or + `LICENSE` file at the top containing the specific license and restrictions + which apply to that software, or +2) It will contain specific license and restriction terms at the top of every + file. + +-------------------------------------------------------------------------------- + +3rdparty dependency gRPC is statically linked in certain binary +distributions, like the python wheels. gRPC has the following license: + +Copyright 2014 gRPC authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +3rdparty dependency Apache Thrift is statically linked in certain binary +distributions, like the python wheels. Apache Thrift has the following license: + +Apache Thrift +Copyright (C) 2006 - 2019, The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +3rdparty dependency Apache ORC is statically linked in certain binary +distributions, like the python wheels. Apache ORC has the following license: + +Apache ORC +Copyright 2013-2019 The Apache Software Foundation + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by Hewlett-Packard: +(c) Copyright [2014-2015] Hewlett-Packard Development Company, L.P + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +3rdparty dependency zstd is statically linked in certain binary +distributions, like the python wheels. ZSTD has the following license: + +BSD License + +For Zstandard software + +Copyright (c) 2016-present, Facebook, Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name Facebook nor the names of its contributors may be used to + endorse or promote products derived from this software without specific + prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +3rdparty dependency lz4 is statically linked in certain binary +distributions, like the python wheels. lz4 has the following license: + +LZ4 Library +Copyright (c) 2011-2016, Yann Collet +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +3rdparty dependency Brotli is statically linked in certain binary +distributions, like the python wheels. Brotli has the following license: + +Copyright (c) 2009, 2010, 2013-2016 by the Brotli Authors. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +-------------------------------------------------------------------------------- + +3rdparty dependency rapidjson is statically linked in certain binary +distributions, like the python wheels. rapidjson and its dependencies have the +following licenses: + +Tencent is pleased to support the open source community by making RapidJSON +available. + +Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. +All rights reserved. + +If you have downloaded a copy of the RapidJSON binary from Tencent, please note +that the RapidJSON binary is licensed under the MIT License. +If you have downloaded a copy of the RapidJSON source code from Tencent, please +note that RapidJSON source code is licensed under the MIT License, except for +the third-party components listed below which are subject to different license +terms. Your integration of RapidJSON into your own projects may require +compliance with the MIT License, as well as the other licenses applicable to +the third-party components included within RapidJSON. To avoid the problematic +JSON license in your own projects, it's sufficient to exclude the +bin/jsonchecker/ directory, as it's the only code under the JSON license. +A copy of the MIT License is included in this file. + +Other dependencies and licenses: + + Open Source Software Licensed Under the BSD License: + -------------------------------------------------------------------- + + The msinttypes r29 + Copyright (c) 2006-2013 Alexander Chemeris + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY + EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH + DAMAGE. + + Terms of the MIT License: + -------------------------------------------------------------------- + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- + +3rdparty dependency snappy is statically linked in certain binary +distributions, like the python wheels. snappy has the following license: + +Copyright 2011, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Google Inc. nor the names of its contributors may be + used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=== + +Some of the benchmark data in testdata/ is licensed differently: + + - fireworks.jpeg is Copyright 2013 Steinar H. Gunderson, and + is licensed under the Creative Commons Attribution 3.0 license + (CC-BY-3.0). See https://creativecommons.org/licenses/by/3.0/ + for more information. + + - kppkn.gtb is taken from the Gaviota chess tablebase set, and + is licensed under the MIT License. See + https://sites.google.com/site/gaviotachessengine/Home/endgame-tablebases-1 + for more information. + + - paper-100k.pdf is an excerpt (bytes 92160 to 194560) from the paper + “Combinatorial Modeling of Chromatin Features Quantitatively Predicts DNA + Replication Timing in _Drosophila_” by Federico Comoglio and Renato Paro, + which is licensed under the CC-BY license. See + http://www.ploscompbiol.org/static/license for more ifnormation. + + - alice29.txt, asyoulik.txt, plrabn12.txt and lcet10.txt are from Project + Gutenberg. The first three have expired copyrights and are in the public + domain; the latter does not have expired copyright, but is still in the + public domain according to the license information + (http://www.gutenberg.org/ebooks/53). + +-------------------------------------------------------------------------------- + +3rdparty dependency gflags is statically linked in certain binary +distributions, like the python wheels. gflags has the following license: + +Copyright (c) 2006, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +3rdparty dependency glog is statically linked in certain binary +distributions, like the python wheels. glog has the following license: + +Copyright (c) 2008, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +A function gettimeofday in utilities.cc is based on + +http://www.google.com/codesearch/p?hl=en#dR3YEbitojA/COPYING&q=GetSystemTimeAsFileTime%20license:bsd + +The license of this code is: + +Copyright (c) 2003-2008, Jouni Malinen and contributors +All Rights Reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +3. Neither the name(s) of the above-listed copyright holder(s) nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +3rdparty dependency re2 is statically linked in certain binary +distributions, like the python wheels. re2 has the following license: + +Copyright (c) 2009 The RE2 Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of Google Inc. nor the names of its contributors + may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +3rdparty dependency c-ares is statically linked in certain binary +distributions, like the python wheels. c-ares has the following license: + +# c-ares license + +Copyright (c) 2007 - 2018, Daniel Stenberg with many contributors, see AUTHORS +file. + +Copyright 1998 by the Massachusetts Institute of Technology. + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, provided that +the above copyright notice appear in all copies and that both that copyright +notice and this permission notice appear in supporting documentation, and that +the name of M.I.T. not be used in advertising or publicity pertaining to +distribution of the software without specific, written prior permission. +M.I.T. makes no representations about the suitability of this software for any +purpose. It is provided "as is" without express or implied warranty. + +-------------------------------------------------------------------------------- + +3rdparty dependency zlib is redistributed as a dynamically linked shared +library in certain binary distributions, like the python wheels. In the future +this will likely change to static linkage. zlib has the following license: + +zlib.h -- interface of the 'zlib' general purpose compression library + version 1.2.11, January 15th, 2017 + + Copyright (C) 1995-2017 Jean-loup Gailly and Mark Adler + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + Jean-loup Gailly Mark Adler + jloup@gzip.org madler@alumni.caltech.edu + +-------------------------------------------------------------------------------- + +3rdparty dependency openssl is redistributed as a dynamically linked shared +library in certain binary distributions, like the python wheels. openssl +preceding version 3 has the following license: + + LICENSE ISSUES + ============== + + The OpenSSL toolkit stays under a double license, i.e. both the conditions of + the OpenSSL License and the original SSLeay license apply to the toolkit. + See below for the actual license texts. + + OpenSSL License + --------------- + +/* ==================================================================== + * Copyright (c) 1998-2019 The OpenSSL Project. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. All advertising materials mentioning features or use of this + * software must display the following acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" + * + * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For written permission, please contact + * openssl-core@openssl.org. + * + * 5. Products derived from this software may not be called "OpenSSL" + * nor may "OpenSSL" appear in their names without prior written + * permission of the OpenSSL Project. + * + * 6. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit (http://www.openssl.org/)" + * + * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY + * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR + * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + * ==================================================================== + * + * This product includes cryptographic software written by Eric Young + * (eay@cryptsoft.com). This product includes software written by Tim + * Hudson (tjh@cryptsoft.com). + * + */ + + Original SSLeay License + ----------------------- + +/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) + * All rights reserved. + * + * This package is an SSL implementation written + * by Eric Young (eay@cryptsoft.com). + * The implementation was written so as to conform with Netscapes SSL. + * + * This library is free for commercial and non-commercial use as long as + * the following conditions are aheared to. The following conditions + * apply to all code found in this distribution, be it the RC4, RSA, + * lhash, DES, etc., code; not just the SSL code. The SSL documentation + * included with this distribution is covered by the same copyright terms + * except that the holder is Tim Hudson (tjh@cryptsoft.com). + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. + * If this package is used in a product, Eric Young should be given attribution + * as the author of the parts of the library used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@cryptsoft.com)" + * The word 'cryptographic' can be left out if the rouines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ + +-------------------------------------------------------------------------------- + +This project includes code from the rtools-backports project. + +* ci/scripts/PKGBUILD and ci/scripts/r_windows_build.sh are based on code + from the rtools-backports project. + +Copyright: Copyright (c) 2013 - 2019, Алексей and Jeroen Ooms. +All rights reserved. +Homepage: https://github.com/r-windows/rtools-backports +License: 3-clause BSD + +-------------------------------------------------------------------------------- + +Some code from pandas has been adapted for the pyarrow codebase. pandas is +available under the 3-clause BSD license, which follows: + +pandas license +============== + +Copyright (c) 2011-2012, Lambda Foundry, Inc. and PyData Development Team +All rights reserved. + +Copyright (c) 2008-2011 AQR Capital Management, LLC +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the copyright holder nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +Some bits from DyND, in particular aspects of the build system, have been +adapted from libdynd and dynd-python under the terms of the BSD 2-clause +license + +The BSD 2-Clause License + + Copyright (C) 2011-12, Dynamic NDArray Developers + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Dynamic NDArray Developers list: + + * Mark Wiebe + * Continuum Analytics + +-------------------------------------------------------------------------------- + +Some source code from Ibis (https://github.com/cloudera/ibis) has been adapted +for PyArrow. Ibis is released under the Apache License, Version 2.0. + +-------------------------------------------------------------------------------- + +dev/tasks/homebrew-formulae/apache-arrow.rb has the following license: + +BSD 2-Clause License + +Copyright (c) 2009-present, Homebrew contributors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- + +cpp/src/arrow/vendored/base64.cpp has the following license + +ZLIB License + +Copyright (C) 2004-2017 René Nyffenegger + +This source code is provided 'as-is', without any express or implied +warranty. In no event will the author be held liable for any damages arising +from the use of this software. + +Permission is granted to anyone to use this software for any purpose, including +commercial applications, and to alter it and redistribute it freely, subject to +the following restrictions: + +1. The origin of this source code must not be misrepresented; you must not + claim that you wrote the original source code. If you use this source code + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + +2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original source code. + +3. This notice may not be removed or altered from any source distribution. + +René Nyffenegger rene.nyffenegger@adp-gmbh.ch + +-------------------------------------------------------------------------------- + +This project includes code from Folly. + + * cpp/src/arrow/vendored/ProducerConsumerQueue.h + +is based on Folly's + + * folly/Portability.h + * folly/lang/Align.h + * folly/ProducerConsumerQueue.h + +Copyright: Copyright (c) Facebook, Inc. and its affiliates. +Home page: https://github.com/facebook/folly +License: http://www.apache.org/licenses/LICENSE-2.0 + +-------------------------------------------------------------------------------- + +The file cpp/src/arrow/vendored/musl/strptime.c has the following license + +Copyright © 2005-2020 Rich Felker, et al. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- + +The file cpp/cmake_modules/BuildUtils.cmake contains code from + +https://gist.github.com/cristianadam/ef920342939a89fae3e8a85ca9459b49 + +which is made available under the MIT license + +Copyright (c) 2019 Cristian Adam + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- + +The files in cpp/src/arrow/vendored/portable-snippets/ contain code from + +https://github.com/nemequ/portable-snippets + +and have the following copyright notice: + +Each source file contains a preamble explaining the license situation +for that file, which takes priority over this file. With the +exception of some code pulled in from other repositories (such as +µnit, an MIT-licensed project which is used for testing), the code is +public domain, released using the CC0 1.0 Universal dedication (*). + +(*) https://creativecommons.org/publicdomain/zero/1.0/legalcode + +-------------------------------------------------------------------------------- + +The files in cpp/src/arrow/vendored/fast_float/ contain code from + +https://github.com/lemire/fast_float + +which is made available under the Apache License 2.0. + +-------------------------------------------------------------------------------- + +The file python/pyarrow/vendored/docscrape.py contains code from + +https://github.com/numpy/numpydoc/ + +which is made available under the BSD 2-clause license. + +-------------------------------------------------------------------------------- + +The file python/pyarrow/vendored/version.py contains code from + +https://github.com/pypa/packaging/ + +which is made available under both the Apache license v2.0 and the +BSD 2-clause license. + +-------------------------------------------------------------------------------- + +The files in cpp/src/arrow/vendored/pcg contain code from + +https://github.com/imneme/pcg-cpp + +and have the following copyright notice: + +Copyright 2014-2019 Melissa O'Neill , + and the PCG Project contributors. + +SPDX-License-Identifier: (Apache-2.0 OR MIT) + +Licensed under the Apache License, Version 2.0 (provided in +LICENSE-APACHE.txt and at http://www.apache.org/licenses/LICENSE-2.0) +or under the MIT license (provided in LICENSE-MIT.txt and at +http://opensource.org/licenses/MIT), at your option. This file may not +be copied, modified, or distributed except according to those terms. + +Distributed on an "AS IS" BASIS, WITHOUT WARRANTY OF ANY KIND, either +express or implied. See your chosen license for details. + +-------------------------------------------------------------------------------- +r/R/dplyr-count-tally.R (some portions) + +Some portions of this file are derived from code from + +https://github.com/tidyverse/dplyr/ + +which is made available under the MIT license + +Copyright (c) 2013-2019 RStudio and others. + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the “Software”), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- + +The file src/arrow/util/io_util.cc contains code from the CPython project +which is made available under the Python Software Foundation License Version 2. + +-------------------------------------------------------------------------------- + +3rdparty dependency opentelemetry-cpp is statically linked in certain binary +distributions. opentelemetry-cpp is made available under the Apache License 2.0. + +Copyright The OpenTelemetry Authors +SPDX-License-Identifier: Apache-2.0 + +-------------------------------------------------------------------------------- + +ci/conan/ is based on code from Conan Package and Dependency Manager. + +Copyright (c) 2019 Conan.io + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- + +3rdparty dependency UCX is redistributed as a dynamically linked shared +library in certain binary distributions. UCX has the following license: + +Copyright (c) 2014-2015 UT-Battelle, LLC. All rights reserved. +Copyright (C) 2014-2020 Mellanox Technologies Ltd. All rights reserved. +Copyright (C) 2014-2015 The University of Houston System. All rights reserved. +Copyright (C) 2015 The University of Tennessee and The University + of Tennessee Research Foundation. All rights reserved. +Copyright (C) 2016-2020 ARM Ltd. All rights reserved. +Copyright (c) 2016 Los Alamos National Security, LLC. All rights reserved. +Copyright (C) 2016-2020 Advanced Micro Devices, Inc. All rights reserved. +Copyright (C) 2019 UChicago Argonne, LLC. All rights reserved. +Copyright (c) 2018-2020 NVIDIA CORPORATION. All rights reserved. +Copyright (C) 2020 Huawei Technologies Co., Ltd. All rights reserved. +Copyright (C) 2016-2020 Stony Brook University. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +3. Neither the name of the copyright holder nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +The file dev/tasks/r/github.packages.yml contains code from + +https://github.com/ursa-labs/arrow-r-nightly + +which is made available under the Apache License 2.0. + +-------------------------------------------------------------------------------- +.github/actions/sync-nightlies/action.yml (some portions) + +Some portions of this file are derived from code from + +https://github.com/JoshPiper/rsync-docker + +which is made available under the MIT license + +Copyright (c) 2020 Joshua Piper + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- +.github/actions/sync-nightlies/action.yml (some portions) + +Some portions of this file are derived from code from + +https://github.com/burnett01/rsync-deployments + +which is made available under the MIT license + +Copyright (c) 2019-2022 Contention +Copyright (c) 2019-2022 Burnett01 + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- +java/vector/src/main/java/org/apache/arrow/vector/util/IntObjectHashMap.java +java/vector/src/main/java/org/apache/arrow/vector/util/IntObjectMap.java + +These file are derived from code from Netty, which is made available under the +Apache License 2.0. diff --git a/plugins/arrow-flight-rpc/licenses/flight-core-NOTICE.txt b/plugins/arrow-flight-rpc/licenses/flight-core-NOTICE.txt new file mode 100644 index 0000000000000..2089c6fb20358 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/flight-core-NOTICE.txt @@ -0,0 +1,84 @@ +Apache Arrow +Copyright 2016-2024 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +This product includes software from the SFrame project (BSD, 3-clause). +* Copyright (C) 2015 Dato, Inc. +* Copyright (c) 2009 Carnegie Mellon University. + +This product includes software from the Feather project (Apache 2.0) +https://github.com/wesm/feather + +This product includes software from the DyND project (BSD 2-clause) +https://github.com/libdynd + +This product includes software from the LLVM project + * distributed under the University of Illinois Open Source + +This product includes software from the google-lint project + * Copyright (c) 2009 Google Inc. All rights reserved. + +This product includes software from the mman-win32 project + * Copyright https://code.google.com/p/mman-win32/ + * Licensed under the MIT License; + +This product includes software from the LevelDB project + * Copyright (c) 2011 The LevelDB Authors. All rights reserved. + * Use of this source code is governed by a BSD-style license that can be + * Moved from Kudu http://github.com/cloudera/kudu + +This product includes software from the CMake project + * Copyright 2001-2009 Kitware, Inc. + * Copyright 2012-2014 Continuum Analytics, Inc. + * All rights reserved. + +This product includes software from https://github.com/matthew-brett/multibuild (BSD 2-clause) + * Copyright (c) 2013-2016, Matt Terry and Matthew Brett; all rights reserved. + +This product includes software from the Ibis project (Apache 2.0) + * Copyright (c) 2015 Cloudera, Inc. + * https://github.com/cloudera/ibis + +This product includes software from Dremio (Apache 2.0) + * Copyright (C) 2017-2018 Dremio Corporation + * https://github.com/dremio/dremio-oss + +This product includes software from Google Guava (Apache 2.0) + * Copyright (C) 2007 The Guava Authors + * https://github.com/google/guava + +This product include software from CMake (BSD 3-Clause) + * CMake - Cross Platform Makefile Generator + * Copyright 2000-2019 Kitware, Inc. and Contributors + +The web site includes files generated by Jekyll. + +-------------------------------------------------------------------------------- + +This product includes code from Apache Kudu, which includes the following in +its NOTICE file: + + Apache Kudu + Copyright 2016 The Apache Software Foundation + + This product includes software developed at + The Apache Software Foundation (http://www.apache.org/). + + Portions of this software were developed at + Cloudera, Inc (http://www.cloudera.com/). + +-------------------------------------------------------------------------------- + +This product includes code from Apache ORC, which includes the following in +its NOTICE file: + + Apache ORC + Copyright 2013-2019 The Apache Software Foundation + + This product includes software developed by The Apache Software + Foundation (http://www.apache.org/). + + This product includes software developed by Hewlett-Packard: + (c) Copyright [2014-2015] Hewlett-Packard Development Company, L.P diff --git a/libs/arrow-spi/licenses/netty-common-LICENSE.txt b/plugins/arrow-flight-rpc/licenses/grpc-LICENSE.txt similarity index 99% rename from libs/arrow-spi/licenses/netty-common-LICENSE.txt rename to plugins/arrow-flight-rpc/licenses/grpc-LICENSE.txt index 62589edd12a37..d645695673349 100644 --- a/libs/arrow-spi/licenses/netty-common-LICENSE.txt +++ b/plugins/arrow-flight-rpc/licenses/grpc-LICENSE.txt @@ -1,7 +1,7 @@ Apache License Version 2.0, January 2004 - https://www.apache.org/licenses/ + http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION @@ -193,7 +193,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/plugins/arrow-flight-rpc/licenses/grpc-NOTICE.txt b/plugins/arrow-flight-rpc/licenses/grpc-NOTICE.txt new file mode 100644 index 0000000000000..f70c5620cf75a --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/grpc-NOTICE.txt @@ -0,0 +1,62 @@ +Copyright 2014 The gRPC Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +----------------------------------------------------------------------- + +This product contains a modified portion of 'OkHttp', an open source +HTTP & SPDY client for Android and Java applications, which can be obtained +at: + + * LICENSE: + * okhttp/third_party/okhttp/LICENSE (Apache License 2.0) + * HOMEPAGE: + * https://github.com/square/okhttp + * LOCATION_IN_GRPC: + * okhttp/third_party/okhttp + +This product contains a modified portion of 'Envoy', an open source +cloud-native high-performance edge/middle/service proxy, which can be +obtained at: + + * LICENSE: + * xds/third_party/envoy/LICENSE (Apache License 2.0) + * NOTICE: + * xds/third_party/envoy/NOTICE + * HOMEPAGE: + * https://www.envoyproxy.io + * LOCATION_IN_GRPC: + * xds/third_party/envoy + +This product contains a modified portion of 'protoc-gen-validate (PGV)', +an open source protoc plugin to generate polyglot message validators, +which can be obtained at: + + * LICENSE: + * xds/third_party/protoc-gen-validate/LICENSE (Apache License 2.0) + * NOTICE: + * xds/third_party/protoc-gen-validate/NOTICE + * HOMEPAGE: + * https://github.com/envoyproxy/protoc-gen-validate + * LOCATION_IN_GRPC: + * xds/third_party/protoc-gen-validate + +This product contains a modified portion of 'udpa', +an open source universal data plane API, which can be obtained at: + + * LICENSE: + * xds/third_party/udpa/LICENSE (Apache License 2.0) + * HOMEPAGE: + * https://github.com/cncf/udpa + * LOCATION_IN_GRPC: + * xds/third_party/udpa diff --git a/plugins/arrow-flight-rpc/licenses/grpc-api-1.68.2.jar.sha1 b/plugins/arrow-flight-rpc/licenses/grpc-api-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..1844172dec982 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/grpc-api-1.68.2.jar.sha1 @@ -0,0 +1 @@ +a257a5dd25dda1c97a99b56d5b9c1e56c12ae554 \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/grpc-core-1.68.2.jar.sha1 b/plugins/arrow-flight-rpc/licenses/grpc-core-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..e20345d29e914 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/grpc-core-1.68.2.jar.sha1 @@ -0,0 +1 @@ +b0fd51a1c029785d1c9ae2cfc80a296b60dfcfdb \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/grpc-netty-1.68.2.jar.sha1 b/plugins/arrow-flight-rpc/licenses/grpc-netty-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..36be00ed13330 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/grpc-netty-1.68.2.jar.sha1 @@ -0,0 +1 @@ +3c3279d2e3520195fd26e0c3d9aca2ed1157d8c3 \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/grpc-protobuf-1.68.2.jar.sha1 b/plugins/arrow-flight-rpc/licenses/grpc-protobuf-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..e861b41837f33 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/grpc-protobuf-1.68.2.jar.sha1 @@ -0,0 +1 @@ +35b28e0d57874021cd31e76dd4a795f76a82471e \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/grpc-protobuf-lite-1.68.2.jar.sha1 b/plugins/arrow-flight-rpc/licenses/grpc-protobuf-lite-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..b2401f9752829 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/grpc-protobuf-lite-1.68.2.jar.sha1 @@ -0,0 +1 @@ +a53064b896adcfefe74362a33e111492351dfc03 \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/grpc-stub-1.68.2.jar.sha1 b/plugins/arrow-flight-rpc/licenses/grpc-stub-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..118464f8f48ff --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/grpc-stub-1.68.2.jar.sha1 @@ -0,0 +1 @@ +d58ee1cf723b4b5536d44b67e328c163580a8d98 \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/guava-33.3.1-jre.jar.sha1 b/plugins/arrow-flight-rpc/licenses/guava-33.3.1-jre.jar.sha1 new file mode 100644 index 0000000000000..ce59350c0d430 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/guava-33.3.1-jre.jar.sha1 @@ -0,0 +1 @@ +852f8b363da0111e819460021ca693cacca3e8db \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/guava-LICENSE.txt b/plugins/arrow-flight-rpc/licenses/guava-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/guava-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/arrow-flight-rpc/licenses/guava-NOTICE.txt b/plugins/arrow-flight-rpc/licenses/guava-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/libs/arrow-spi/licenses/jackson-annotations-LICENSE.txt b/plugins/arrow-flight-rpc/licenses/jackson-LICENSE similarity index 100% rename from libs/arrow-spi/licenses/jackson-annotations-LICENSE.txt rename to plugins/arrow-flight-rpc/licenses/jackson-LICENSE diff --git a/libs/arrow-spi/licenses/jackson-annotations-NOTICE.txt b/plugins/arrow-flight-rpc/licenses/jackson-NOTICE similarity index 100% rename from libs/arrow-spi/licenses/jackson-annotations-NOTICE.txt rename to plugins/arrow-flight-rpc/licenses/jackson-NOTICE diff --git a/libs/arrow-spi/licenses/jackson-annotations-2.18.2.jar.sha1 b/plugins/arrow-flight-rpc/licenses/jackson-annotations-2.18.2.jar.sha1 similarity index 100% rename from libs/arrow-spi/licenses/jackson-annotations-2.18.2.jar.sha1 rename to plugins/arrow-flight-rpc/licenses/jackson-annotations-2.18.2.jar.sha1 diff --git a/libs/arrow-spi/licenses/jackson-databind-2.18.2.jar.sha1 b/plugins/arrow-flight-rpc/licenses/jackson-databind-2.18.2.jar.sha1 similarity index 100% rename from libs/arrow-spi/licenses/jackson-databind-2.18.2.jar.sha1 rename to plugins/arrow-flight-rpc/licenses/jackson-databind-2.18.2.jar.sha1 diff --git a/plugins/arrow-flight-rpc/licenses/jsr305-3.0.2.jar.sha1 b/plugins/arrow-flight-rpc/licenses/jsr305-3.0.2.jar.sha1 new file mode 100644 index 0000000000000..c5c92d87b9d6c --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/jsr305-3.0.2.jar.sha1 @@ -0,0 +1 @@ +25ea2e8b0c338a877313bd4672d3fe056ea78f0d \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/jsr305-LICENSE.txt b/plugins/arrow-flight-rpc/licenses/jsr305-LICENSE.txt new file mode 100644 index 0000000000000..0cb8710c4b3e5 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/jsr305-LICENSE.txt @@ -0,0 +1,29 @@ +Copyright (c) 2007-2009, JSR305 expert group +All rights reserved. + +http://www.opensource.org/licenses/bsd-license.php + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of the JSR305 expert group nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + diff --git a/plugins/arrow-flight-rpc/licenses/jsr305-NOTICE.txt b/plugins/arrow-flight-rpc/licenses/jsr305-NOTICE.txt new file mode 100644 index 0000000000000..8d1c8b69c3fce --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/jsr305-NOTICE.txt @@ -0,0 +1 @@ + diff --git a/libs/arrow-spi/licenses/netty-buffer-LICENSE.txt b/plugins/arrow-flight-rpc/licenses/netty-LICENSE.txt similarity index 100% rename from libs/arrow-spi/licenses/netty-buffer-LICENSE.txt rename to plugins/arrow-flight-rpc/licenses/netty-LICENSE.txt diff --git a/libs/arrow-spi/licenses/netty-buffer-NOTICE.txt b/plugins/arrow-flight-rpc/licenses/netty-NOTICE.txt similarity index 100% rename from libs/arrow-spi/licenses/netty-buffer-NOTICE.txt rename to plugins/arrow-flight-rpc/licenses/netty-NOTICE.txt diff --git a/libs/arrow-spi/licenses/netty-buffer-4.1.118.Final.jar.sha1 b/plugins/arrow-flight-rpc/licenses/netty-buffer-4.1.118.Final.jar.sha1 similarity index 100% rename from libs/arrow-spi/licenses/netty-buffer-4.1.118.Final.jar.sha1 rename to plugins/arrow-flight-rpc/licenses/netty-buffer-4.1.118.Final.jar.sha1 diff --git a/plugins/arrow-flight-rpc/licenses/netty-codec-4.1.118.Final.jar.sha1 b/plugins/arrow-flight-rpc/licenses/netty-codec-4.1.118.Final.jar.sha1 new file mode 100644 index 0000000000000..7964f25f0372a --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/netty-codec-4.1.118.Final.jar.sha1 @@ -0,0 +1 @@ +307f665c08ce57333121de4f460479fc0c3c94d4 \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/netty-codec-http-4.1.118.Final.jar.sha1 b/plugins/arrow-flight-rpc/licenses/netty-codec-http-4.1.118.Final.jar.sha1 new file mode 100644 index 0000000000000..7cb43dd276c8a --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/netty-codec-http-4.1.118.Final.jar.sha1 @@ -0,0 +1 @@ +eda08a71294afe78c779b85fd696bc13491507a8 \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/netty-codec-http2-4.1.118.Final.jar.sha1 b/plugins/arrow-flight-rpc/licenses/netty-codec-http2-4.1.118.Final.jar.sha1 new file mode 100644 index 0000000000000..fab58dee2dfbf --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/netty-codec-http2-4.1.118.Final.jar.sha1 @@ -0,0 +1 @@ +e3c35c0685ec9e84c4f84b79feea7c9d185a08d3 \ No newline at end of file diff --git a/libs/arrow-spi/licenses/netty-common-4.1.118.Final.jar.sha1 b/plugins/arrow-flight-rpc/licenses/netty-common-4.1.118.Final.jar.sha1 similarity index 100% rename from libs/arrow-spi/licenses/netty-common-4.1.118.Final.jar.sha1 rename to plugins/arrow-flight-rpc/licenses/netty-common-4.1.118.Final.jar.sha1 diff --git a/plugins/arrow-flight-rpc/licenses/netty-handler-4.1.118.Final.jar.sha1 b/plugins/arrow-flight-rpc/licenses/netty-handler-4.1.118.Final.jar.sha1 new file mode 100644 index 0000000000000..d6eea2494813e --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/netty-handler-4.1.118.Final.jar.sha1 @@ -0,0 +1 @@ +30ebb05b6b0fb071dbfcf713017c4a767a97bb9b \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/netty-resolver-4.1.118.Final.jar.sha1 b/plugins/arrow-flight-rpc/licenses/netty-resolver-4.1.118.Final.jar.sha1 new file mode 100644 index 0000000000000..19fbdbbb19b04 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/netty-resolver-4.1.118.Final.jar.sha1 @@ -0,0 +1 @@ +28c378c19c1779eca1104b400452627f3ebc4aea \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/netty-tcnative-classes-2.0.66.Final.jar.sha1 b/plugins/arrow-flight-rpc/licenses/netty-tcnative-classes-2.0.66.Final.jar.sha1 new file mode 100644 index 0000000000000..7bc4213520498 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/netty-tcnative-classes-2.0.66.Final.jar.sha1 @@ -0,0 +1 @@ +9588bd2f891157538a78d86c945aa34bf9308dda \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/netty-transport-4.1.118.Final.jar.sha1 b/plugins/arrow-flight-rpc/licenses/netty-transport-4.1.118.Final.jar.sha1 new file mode 100644 index 0000000000000..f3b714539e61b --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/netty-transport-4.1.118.Final.jar.sha1 @@ -0,0 +1 @@ +5a27232e5d08218722d94ca14f0b1b4576e7711c \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/netty-transport-classes-epoll-4.1.118.Final.jar.sha1 b/plugins/arrow-flight-rpc/licenses/netty-transport-classes-epoll-4.1.118.Final.jar.sha1 new file mode 100644 index 0000000000000..d53656cd3b7dc --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/netty-transport-classes-epoll-4.1.118.Final.jar.sha1 @@ -0,0 +1 @@ +376ce95507066f0e755d97c1c8bcd6c33f657617 \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/netty-transport-native-unix-common-4.1.118.Final.jar.sha1 b/plugins/arrow-flight-rpc/licenses/netty-transport-native-unix-common-4.1.118.Final.jar.sha1 new file mode 100644 index 0000000000000..f1562364e2848 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/netty-transport-native-unix-common-4.1.118.Final.jar.sha1 @@ -0,0 +1 @@ +9da25a94e6a0edac90da0bc7894e5a54efcb866b \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/parquet-arrow-1.13.1.jar.sha1 b/plugins/arrow-flight-rpc/licenses/parquet-arrow-1.13.1.jar.sha1 new file mode 100644 index 0000000000000..a1b89891ca8e1 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/parquet-arrow-1.13.1.jar.sha1 @@ -0,0 +1 @@ +9e59add52791af8b05c1aefe2a2f8865602c9368 \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/parquet-arrow-LICENSE.txt b/plugins/arrow-flight-rpc/licenses/parquet-arrow-LICENSE.txt new file mode 100644 index 0000000000000..b0065815a5e92 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/parquet-arrow-LICENSE.txt @@ -0,0 +1,218 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +-------------------------------------------------------------------------------- + +This product includes code from Apache Avro. + +Copyright: 2014 The Apache Software Foundation. +Home page: https://avro.apache.org/ +License: http://www.apache.org/licenses/LICENSE-2.0 + +-------------------------------------------------------------------------------- + +This project includes code from Daniel Lemire's JavaFastPFOR project. The +"Lemire" bit packing source code produced by parquet-generator is derived from +the JavaFastPFOR project. + +Copyright: 2013 Daniel Lemire +Home page: http://lemire.me/en/ +Project page: https://github.com/lemire/JavaFastPFOR +License: Apache License Version 2.0 http://www.apache.org/licenses/LICENSE-2.0 + +-------------------------------------------------------------------------------- + +This product includes code from Apache Spark. + +* dev/merge_parquet_pr.py is based on Spark's dev/merge_spark_pr.py + +Copyright: 2014 The Apache Software Foundation. +Home page: https://spark.apache.org/ +License: http://www.apache.org/licenses/LICENSE-2.0 + +-------------------------------------------------------------------------------- + +This product includes code from Twitter's ElephantBird project. + +* parquet-hadoop's UnmaterializableRecordCounter.java includes code from + ElephantBird's LzoRecordReader.java + +Copyright: 2012-2014 Twitter +Home page: https://github.com/twitter/elephant-bird +License: http://www.apache.org/licenses/LICENSE-2.0 + diff --git a/plugins/arrow-flight-rpc/licenses/parquet-arrow-NOTICE.txt b/plugins/arrow-flight-rpc/licenses/parquet-arrow-NOTICE.txt new file mode 100644 index 0000000000000..46300d6cd98fd --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/parquet-arrow-NOTICE.txt @@ -0,0 +1,94 @@ + +Apache Parquet Java +Copyright 2014-2024 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +-------------------------------------------------------------------------------- + +This product includes parquet-tools, initially developed at ARRIS, Inc. with +the following copyright notice: + + Copyright 2013 ARRIS, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- + +This product includes parquet-protobuf, initially developed by Lukas Nalezenc +with the following copyright notice: + + Copyright 2013 Lukas Nalezenec. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- + +This product includes code from Apache Avro, which includes the following in +its NOTICE file: + + Apache Avro + Copyright 2010-2015 The Apache Software Foundation + + This product includes software developed at + The Apache Software Foundation (http://www.apache.org/). + +-------------------------------------------------------------------------------- + +This project includes code from Kite, developed at Cloudera, Inc. with +the following copyright notice: + +| Copyright 2013 Cloudera Inc. +| +| Licensed under the Apache License, Version 2.0 (the "License"); +| you may not use this file except in compliance with the License. +| You may obtain a copy of the License at +| +| http://www.apache.org/licenses/LICENSE-2.0 +| +| Unless required by applicable law or agreed to in writing, software +| distributed under the License is distributed on an "AS IS" BASIS, +| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +| See the License for the specific language governing permissions and +| limitations under the License. + +-------------------------------------------------------------------------------- + +This project includes code from Netflix, Inc. with the following copyright +notice: + +| Copyright 2016 Netflix, Inc. +| +| Licensed under the Apache License, Version 2.0 (the "License"); +| you may not use this file except in compliance with the License. +| You may obtain a copy of the License at +| +| http://www.apache.org/licenses/LICENSE-2.0 +| +| Unless required by applicable law or agreed to in writing, software +| distributed under the License is distributed on an "AS IS" BASIS, +| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +| See the License for the specific language governing permissions and +| limitations under the License. + diff --git a/plugins/arrow-flight-rpc/licenses/perfmark-api-0.27.0.jar.sha1 b/plugins/arrow-flight-rpc/licenses/perfmark-api-0.27.0.jar.sha1 new file mode 100644 index 0000000000000..c85ee41fd9bbd --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/perfmark-api-0.27.0.jar.sha1 @@ -0,0 +1 @@ +f86f575a41b091786a4b027cd9c0c1d2e3fc1c01 \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/perfmark-api-LICENSE.txt b/plugins/arrow-flight-rpc/licenses/perfmark-api-LICENSE.txt new file mode 100644 index 0000000000000..261eeb9e9f8b2 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/perfmark-api-LICENSE.txt @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/arrow-flight-rpc/licenses/perfmark-api-NOTICE.txt b/plugins/arrow-flight-rpc/licenses/perfmark-api-NOTICE.txt new file mode 100644 index 0000000000000..04fbb4e692e51 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/perfmark-api-NOTICE.txt @@ -0,0 +1,41 @@ + +Copyright 2019 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +----------------------------------------------------------------------- + +This product contains a modified portion of 'Catapult', an open source +Trace Event viewer for Chome, Linux, and Android applications, which can +be obtained at: + + * LICENSE: + * traceviewer/src/main/resources/io/perfmark/traceviewer/third_party/catapult/LICENSE (New BSD License) + * HOMEPAGE: + * https://github.com/catapult-project/catapult + +This product contains a modified portion of 'Polymer', a library for Web +Components, which can be obtained at: + * LICENSE: + * traceviewer/src/main/resources/io/perfmark/traceviewer/third_party/polymer/LICENSE (New BSD License) + * HOMEPAGE: + * https://github.com/Polymer/polymer + + +This product contains a modified portion of 'ASM', an open source +Java Bytecode library, which can be obtained at: + + * LICENSE: + * agent/src/main/resources/io/perfmark/agent/third_party/asm/LICENSE (BSD style License) + * HOMEPAGE: + * https://asm.ow2.io/ \ No newline at end of file diff --git a/libs/arrow-spi/licenses/slf4j-api-1.7.36.jar.sha1 b/plugins/arrow-flight-rpc/licenses/slf4j-api-1.7.36.jar.sha1 similarity index 100% rename from libs/arrow-spi/licenses/slf4j-api-1.7.36.jar.sha1 rename to plugins/arrow-flight-rpc/licenses/slf4j-api-1.7.36.jar.sha1 diff --git a/libs/arrow-spi/licenses/slf4j-api-LICENSE.txt b/plugins/arrow-flight-rpc/licenses/slf4j-api-LICENSE.txt similarity index 100% rename from libs/arrow-spi/licenses/slf4j-api-LICENSE.txt rename to plugins/arrow-flight-rpc/licenses/slf4j-api-LICENSE.txt diff --git a/plugins/arrow-flight-rpc/licenses/slf4j-api-NOTICE.txt b/plugins/arrow-flight-rpc/licenses/slf4j-api-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/arrow-flight-rpc/src/internalClusterTest/java/org/opensearch/arrow/flight/ArrowFlightServerIT.java b/plugins/arrow-flight-rpc/src/internalClusterTest/java/org/opensearch/arrow/flight/ArrowFlightServerIT.java new file mode 100644 index 0000000000000..bcad335c7a917 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/internalClusterTest/java/org/opensearch/arrow/flight/ArrowFlightServerIT.java @@ -0,0 +1,59 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight; + +import org.apache.arrow.flight.CallOptions; +import org.apache.arrow.flight.FlightClient; +import org.opensearch.arrow.flight.bootstrap.FlightClientManager; +import org.opensearch.arrow.flight.bootstrap.FlightService; +import org.opensearch.arrow.flight.bootstrap.FlightStreamPlugin; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.FeatureFlagSetter; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.BeforeClass; + +import java.util.Collection; +import java.util.Collections; +import java.util.concurrent.TimeUnit; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, numDataNodes = 5) +public class ArrowFlightServerIT extends OpenSearchIntegTestCase { + + private FlightClientManager flightClientManager; + + @BeforeClass + public static void setupFeatureFlags() { + FeatureFlagSetter.set(FeatureFlags.ARROW_STREAMS_SETTING.getKey()); + } + + @Override + protected Collection> nodePlugins() { + return Collections.singleton(FlightStreamPlugin.class); + } + + @Override + public void setUp() throws Exception { + super.setUp(); + ensureGreen(); + Thread.sleep(1000); + FlightService flightService = internalCluster().getInstance(FlightService.class); + flightClientManager = flightService.getFlightClientManager(); + } + + public void testArrowFlightEndpoint() throws Exception { + for (DiscoveryNode node : getClusterState().nodes()) { + try (FlightClient flightClient = flightClientManager.getFlightClient(node.getId()).get()) { + assertNotNull(flightClient); + flightClient.handshake(CallOptions.timeout(5000L, TimeUnit.MILLISECONDS)); + } + } + } +} diff --git a/plugins/arrow-flight-rpc/src/main/java/org/apache/arrow/flight/OSFlightClient.java b/plugins/arrow-flight-rpc/src/main/java/org/apache/arrow/flight/OSFlightClient.java new file mode 100644 index 0000000000000..0efafd370c651 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/apache/arrow/flight/OSFlightClient.java @@ -0,0 +1,250 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.apache.arrow.flight; + +import io.grpc.netty.GrpcSslContexts; +import io.grpc.netty.NettyChannelBuilder; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.ServerChannel; +import io.netty.handler.ssl.SslContext; +import io.netty.handler.ssl.SslContextBuilder; +import io.netty.handler.ssl.util.InsecureTrustManagerFactory; +import java.io.InputStream; +import java.lang.reflect.InvocationTargetException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutorService; +import javax.net.ssl.SSLException; +import org.apache.arrow.memory.BufferAllocator; +import org.apache.arrow.util.Preconditions; + +/** + * Clone of {@link org.apache.arrow.flight.FlightClient} to support setting SslContext and other settings like SslContext, workerELG, + * executorService and channelType directly. It can be discarded once FlightClient.Builder supports setting SslContext directly. + * Note: This file needs to be cloned with version upgrade of arrow flight-core with above changes. + */ +public class OSFlightClient { + /** A builder for Flight clients. */ + public final static class Builder { + private BufferAllocator allocator; + private Location location; + private boolean forceTls = false; + private int maxInboundMessageSize = OSFlightServer.MAX_GRPC_MESSAGE_SIZE; + private InputStream trustedCertificates = null; + private InputStream clientCertificate = null; + private InputStream clientKey = null; + private String overrideHostname = null; + private List middleware = new ArrayList<>(); + private boolean verifyServer = true; + + private EventLoopGroup workerELG; + private ExecutorService executorService; + private Class channelType; + private SslContext sslContext; + + private Builder() {} + + Builder(BufferAllocator allocator, Location location) { + this.allocator = Preconditions.checkNotNull(allocator); + this.location = Preconditions.checkNotNull(location); + } + + /** Force the client to connect over TLS. */ + public Builder useTls() { + this.forceTls = true; + return this; + } + + /** Override the hostname checked for TLS. Use with caution in production. */ + public Builder overrideHostname(final String hostname) { + this.overrideHostname = hostname; + return this; + } + + /** Set the maximum inbound message size. */ + public Builder maxInboundMessageSize(int maxSize) { + Preconditions.checkArgument(maxSize > 0); + this.maxInboundMessageSize = maxSize; + return this; + } + + /** Set the trusted TLS certificates. */ + public Builder trustedCertificates(final InputStream stream) { + this.trustedCertificates = Preconditions.checkNotNull(stream); + return this; + } + + /** Set the trusted TLS certificates. */ + public Builder clientCertificate( + final InputStream clientCertificate, final InputStream clientKey) { + Preconditions.checkNotNull(clientKey); + this.clientCertificate = Preconditions.checkNotNull(clientCertificate); + this.clientKey = Preconditions.checkNotNull(clientKey); + return this; + } + + public Builder allocator(BufferAllocator allocator) { + this.allocator = Preconditions.checkNotNull(allocator); + return this; + } + + public Builder location(Location location) { + this.location = Preconditions.checkNotNull(location); + return this; + } + + public Builder intercept(FlightClientMiddleware.Factory factory) { + middleware.add(factory); + return this; + } + + public Builder verifyServer(boolean verifyServer) { + this.verifyServer = verifyServer; + return this; + } + + /** Create the client from this builder. */ + public FlightClient build() { + final NettyChannelBuilder builder; + + switch (location.getUri().getScheme()) { + case LocationSchemes.GRPC: + case LocationSchemes.GRPC_INSECURE: + case LocationSchemes.GRPC_TLS: + { + builder = NettyChannelBuilder.forAddress(location.toSocketAddress()); + break; + } + case LocationSchemes.GRPC_DOMAIN_SOCKET: + { + // The implementation is platform-specific, so we have to find the classes at runtime + builder = NettyChannelBuilder.forAddress(location.toSocketAddress()); + try { + try { + // Linux + builder.channelType( + Class.forName("io.netty.channel.epoll.EpollDomainSocketChannel") + .asSubclass(ServerChannel.class)); + final EventLoopGroup elg = + Class.forName("io.netty.channel.epoll.EpollEventLoopGroup") + .asSubclass(EventLoopGroup.class) + .getDeclaredConstructor() + .newInstance(); + builder.eventLoopGroup(elg); + } catch (ClassNotFoundException e) { + // BSD + builder.channelType( + Class.forName("io.netty.channel.kqueue.KQueueDomainSocketChannel") + .asSubclass(ServerChannel.class)); + final EventLoopGroup elg = + Class.forName("io.netty.channel.kqueue.KQueueEventLoopGroup") + .asSubclass(EventLoopGroup.class) + .getDeclaredConstructor() + .newInstance(); + builder.eventLoopGroup(elg); + } + } catch (ClassNotFoundException + | InstantiationException + | IllegalAccessException + | NoSuchMethodException + | InvocationTargetException e) { + throw new UnsupportedOperationException( + "Could not find suitable Netty native transport implementation for domain socket address."); + } + break; + } + default: + throw new IllegalArgumentException( + "Scheme is not supported: " + location.getUri().getScheme()); + } + + if (this.forceTls || LocationSchemes.GRPC_TLS.equals(location.getUri().getScheme())) { + builder.useTransportSecurity(); + + final boolean hasTrustedCerts = this.trustedCertificates != null; + final boolean hasKeyCertPair = this.clientCertificate != null && this.clientKey != null; + if (!this.verifyServer && (hasTrustedCerts || hasKeyCertPair)) { + throw new IllegalArgumentException( + "FlightClient has been configured to disable server verification, " + + "but certificate options have been specified."); + } + + if (sslContext != null) { + builder.sslContext(sslContext); + } else { + final SslContextBuilder sslContextBuilder = GrpcSslContexts.forClient(); + + if (!this.verifyServer) { + sslContextBuilder.trustManager(InsecureTrustManagerFactory.INSTANCE); + } else if (this.trustedCertificates != null + || this.clientCertificate != null + || this.clientKey != null) { + if (this.trustedCertificates != null) { + sslContextBuilder.trustManager(this.trustedCertificates); + } + if (this.clientCertificate != null && this.clientKey != null) { + sslContextBuilder.keyManager(this.clientCertificate, this.clientKey); + } + } + try { + builder.sslContext(sslContextBuilder.build()); + } catch (SSLException e) { + throw new RuntimeException(e); + } + } + + if (this.overrideHostname != null) { + builder.overrideAuthority(this.overrideHostname); + } + } else { + builder.usePlaintext(); + } + + builder + .maxTraceEvents(0) + .maxInboundMessageSize(maxInboundMessageSize) + .maxInboundMetadataSize(maxInboundMessageSize) + .executor(executorService); + + if (channelType != null) { + builder.channelType(channelType); + } + + if (workerELG != null) { + builder.eventLoopGroup(workerELG); + } + + return new FlightClient(allocator, builder.build(), middleware); + } + + public Builder executor(ExecutorService executorService) { + this.executorService = executorService; + return this; + } + + public Builder channelType(Class channelType) { + this.channelType = channelType; + return this; + } + + public Builder eventLoopGroup(EventLoopGroup workerELG) { + this.workerELG = workerELG; + return this; + } + + public Builder sslContext(SslContext sslContext) { + this.sslContext = sslContext; + return this; + } + } + + public static Builder builder() { + return new Builder(); + } +} diff --git a/plugins/arrow-flight-rpc/src/main/java/org/apache/arrow/flight/OSFlightServer.java b/plugins/arrow-flight-rpc/src/main/java/org/apache/arrow/flight/OSFlightServer.java new file mode 100644 index 0000000000000..77e0e38314b44 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/apache/arrow/flight/OSFlightServer.java @@ -0,0 +1,478 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.apache.arrow.flight; + +import io.grpc.Server; +import io.grpc.ServerInterceptors; +import io.grpc.netty.GrpcSslContexts; +import io.grpc.netty.NettyServerBuilder; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.ServerChannel; +import io.netty.handler.ssl.ClientAuth; +import io.netty.handler.ssl.SslContext; +import io.netty.handler.ssl.SslContextBuilder; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.lang.invoke.MethodHandle; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodType; +import java.lang.reflect.InvocationTargetException; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ExecutorService; +import java.util.function.Consumer; +import javax.net.ssl.SSLException; + +import org.apache.arrow.flight.auth.ServerAuthHandler; +import org.apache.arrow.flight.auth.ServerAuthInterceptor; +import org.apache.arrow.flight.auth2.Auth2Constants; +import org.apache.arrow.flight.auth2.CallHeaderAuthenticator; +import org.apache.arrow.flight.auth2.ServerCallHeaderAuthMiddleware; +import org.apache.arrow.flight.grpc.ServerBackpressureThresholdInterceptor; +import org.apache.arrow.flight.grpc.ServerInterceptorAdapter; +import org.apache.arrow.flight.grpc.ServerInterceptorAdapter.KeyFactory; +import org.apache.arrow.memory.BufferAllocator; +import org.apache.arrow.util.Preconditions; + +/** + * Clone of {@link org.apache.arrow.flight.FlightServer} to support setting SslContext. It can be discarded once FlightServer.Builder supports setting SslContext directly. + *

+ * It changes {@link org.apache.arrow.flight.FlightServer.Builder} to allow hook to configure the NettyServerBuilder. + */ +@SuppressWarnings("removal") +public class OSFlightServer { + /** The maximum size of an individual gRPC message. This effectively disables the limit. */ + static final int MAX_GRPC_MESSAGE_SIZE = Integer.MAX_VALUE; + /** The default number of bytes that can be queued on an output stream before blocking. */ + static final int DEFAULT_BACKPRESSURE_THRESHOLD = 10 * 1024 * 1024; // 10MB + + private static final MethodHandle FLIGHT_SERVER_CTOR_MH; + + static { + FLIGHT_SERVER_CTOR_MH = AccessController.doPrivileged((PrivilegedAction) () -> { + try { + return MethodHandles + .privateLookupIn(FlightServer.class, MethodHandles.lookup()) + .findConstructor(FlightServer.class, MethodType.methodType(void.class, Location.class, Server.class, ExecutorService.class)); + } catch (final NoSuchMethodException | IllegalAccessException ex) { + throw new IllegalStateException("Unable to find the FlightServer constructor to invoke", ex); + }} + ); + } + + /** A builder for Flight servers. */ + public final static class Builder { + private BufferAllocator allocator; + private Location location; + private FlightProducer producer; + private final Map builderOptions; + private ServerAuthHandler authHandler = ServerAuthHandler.NO_OP; + private CallHeaderAuthenticator headerAuthenticator = CallHeaderAuthenticator.NO_OP; + private ExecutorService executor = null; + private int maxInboundMessageSize = MAX_GRPC_MESSAGE_SIZE; + private int maxHeaderListSize = MAX_GRPC_MESSAGE_SIZE; + private int backpressureThreshold = DEFAULT_BACKPRESSURE_THRESHOLD; + private InputStream certChain; + private InputStream key; + private InputStream mTlsCACert; + private SslContext sslContext; + private final List> interceptors; + // Keep track of inserted interceptors + private final Set interceptorKeys; + + Builder() { + builderOptions = new HashMap<>(); + interceptors = new ArrayList<>(); + interceptorKeys = new HashSet<>(); + } + + Builder(BufferAllocator allocator, Location location, FlightProducer producer) { + this(); + this.allocator = Preconditions.checkNotNull(allocator); + this.location = Preconditions.checkNotNull(location); + this.producer = Preconditions.checkNotNull(producer); + } + + /** Create the server for this builder. */ + @SuppressWarnings("unchecked") + public FlightServer build() { + // Add the auth middleware if applicable. + if (headerAuthenticator != CallHeaderAuthenticator.NO_OP) { + this.middleware( + FlightServerMiddleware.Key.of(Auth2Constants.AUTHORIZATION_HEADER), + new ServerCallHeaderAuthMiddleware.Factory(headerAuthenticator)); + } + + this.middleware(FlightConstants.HEADER_KEY, new ServerHeaderMiddleware.Factory()); + + final NettyServerBuilder builder; + switch (location.getUri().getScheme()) { + case LocationSchemes.GRPC_DOMAIN_SOCKET: + { + // The implementation is platform-specific, so we have to find the classes at runtime + builder = NettyServerBuilder.forAddress(location.toSocketAddress()); + try { + try { + // Linux + builder.channelType( + Class.forName("io.netty.channel.epoll.EpollServerDomainSocketChannel") + .asSubclass(ServerChannel.class)); + final EventLoopGroup elg = + Class.forName("io.netty.channel.epoll.EpollEventLoopGroup") + .asSubclass(EventLoopGroup.class) + .getConstructor() + .newInstance(); + builder.bossEventLoopGroup(elg).workerEventLoopGroup(elg); + } catch (ClassNotFoundException e) { + // BSD + builder.channelType( + Class.forName("io.netty.channel.kqueue.KQueueServerDomainSocketChannel") + .asSubclass(ServerChannel.class)); + final EventLoopGroup elg = + Class.forName("io.netty.channel.kqueue.KQueueEventLoopGroup") + .asSubclass(EventLoopGroup.class) + .getConstructor() + .newInstance(); + builder.bossEventLoopGroup(elg).workerEventLoopGroup(elg); + } + } catch (ClassNotFoundException + | InstantiationException + | IllegalAccessException + | NoSuchMethodException + | InvocationTargetException e) { + throw new UnsupportedOperationException( + "Could not find suitable Netty native transport implementation for domain socket address."); + } + break; + } + case LocationSchemes.GRPC: + case LocationSchemes.GRPC_INSECURE: + { + builder = NettyServerBuilder.forAddress(location.toSocketAddress()); + break; + } + case LocationSchemes.GRPC_TLS: + { + if (certChain == null) { + throw new IllegalArgumentException( + "Must provide a certificate and key to serve gRPC over TLS"); + } + builder = NettyServerBuilder.forAddress(location.toSocketAddress()); + break; + } + default: + throw new IllegalArgumentException( + "Scheme is not supported: " + location.getUri().getScheme()); + } + + if (certChain != null && sslContext == null) { + SslContextBuilder sslContextBuilder = GrpcSslContexts.forServer(certChain, key); + + if (mTlsCACert != null) { + sslContextBuilder.clientAuth(ClientAuth.REQUIRE).trustManager(mTlsCACert); + } + try { + sslContext = sslContextBuilder.build(); + } catch (SSLException e) { + throw new RuntimeException(e); + } finally { + closeMTlsCACert(); + closeCertChain(); + closeKey(); + } + + builder.sslContext(sslContext); + } else if (sslContext != null) { + builder.sslContext(sslContext); + } + + // Share one executor between the gRPC service, DoPut, and Handshake + final ExecutorService exec; + // We only want to have FlightServer close the gRPC executor if we created it here. We should + // not close + // user-supplied executors. + final ExecutorService grpcExecutor; + if (executor != null) { + exec = executor; + grpcExecutor = null; + } else { + throw new IllegalStateException("GRPC executor must be passed to start Flight server."); + } + + final FlightBindingService flightService = + new FlightBindingService(allocator, producer, authHandler, exec); + builder + .executor(exec) + .maxInboundMessageSize(maxInboundMessageSize) + .maxInboundMetadataSize(maxHeaderListSize) + .addService( + ServerInterceptors.intercept( + flightService, + new ServerBackpressureThresholdInterceptor(backpressureThreshold), + new ServerAuthInterceptor(authHandler))); + + // Allow hooking into the gRPC builder. This is not guaranteed to be available on all Arrow + // versions or + // Flight implementations. + builderOptions.computeIfPresent( + "grpc.builderConsumer", + (key, builderConsumer) -> { + final Consumer consumer = + (Consumer) builderConsumer; + consumer.accept(builder); + return null; + }); + + // Allow explicitly setting some Netty-specific options + builderOptions.computeIfPresent( + "netty.channelType", + (key, channelType) -> { + builder.channelType((Class) channelType); + return null; + }); + builderOptions.computeIfPresent( + "netty.bossEventLoopGroup", + (key, elg) -> { + builder.bossEventLoopGroup((EventLoopGroup) elg); + return null; + }); + builderOptions.computeIfPresent( + "netty.workerEventLoopGroup", + (key, elg) -> { + builder.workerEventLoopGroup((EventLoopGroup) elg); + return null; + }); + + builder.intercept(new ServerInterceptorAdapter(interceptors)); + + try { + return (FlightServer)FLIGHT_SERVER_CTOR_MH.invoke(location, builder.build(), grpcExecutor); + } catch (final Throwable ex) { + throw new IllegalStateException("Unable to instantiate FlightServer", ex); + } + } + + public Builder channelType(Class channelType) { + builderOptions.put("netty.channelType", channelType); + return this; + } + + public Builder workerEventLoopGroup(EventLoopGroup workerELG) { + builderOptions.put("netty.workerEventLoopGroup", workerELG); + return this; + } + + public Builder bossEventLoopGroup(EventLoopGroup bossELG) { + builderOptions.put("netty.bossEventLoopGroup", bossELG); + return this; + } + + public Builder setMaxHeaderListSize(int maxHeaderListSize) { + this.maxHeaderListSize = maxHeaderListSize; + return this; + } + + /** + * Set the maximum size of a message. Defaults to "unlimited", depending on the underlying + * transport. + */ + public Builder maxInboundMessageSize(int maxMessageSize) { + this.maxInboundMessageSize = maxMessageSize; + return this; + } + + /** + * Set the number of bytes that may be queued on a server output stream before writes are + * blocked. + */ + public Builder backpressureThreshold(int backpressureThreshold) { + Preconditions.checkArgument(backpressureThreshold > 0); + this.backpressureThreshold = backpressureThreshold; + return this; + } + + /** + * A small utility function to ensure that InputStream attributes. are closed if they are not + * null + * + * @param stream The InputStream to close (if it is not null). + */ + private void closeInputStreamIfNotNull(InputStream stream) { + if (stream != null) { + try { + stream.close(); + } catch (IOException expected) { + // stream closes gracefully, doesn't expect an exception. + } + } + } + + /** + * A small utility function to ensure that the certChain attribute is closed if it is not null. + * It then sets the attribute to null. + */ + private void closeCertChain() { + closeInputStreamIfNotNull(certChain); + certChain = null; + } + + /** + * A small utility function to ensure that the key attribute is closed if it is not null. It + * then sets the attribute to null. + */ + private void closeKey() { + closeInputStreamIfNotNull(key); + key = null; + } + + /** + * A small utility function to ensure that the mTlsCACert attribute is closed if it is not null. + * It then sets the attribute to null. + */ + private void closeMTlsCACert() { + closeInputStreamIfNotNull(mTlsCACert); + mTlsCACert = null; + } + + /** + * Enable TLS on the server. + * + * @param certChain The certificate chain to use. + * @param key The private key to use. + */ + public Builder useTls(final File certChain, final File key) throws IOException { + closeCertChain(); + this.certChain = new FileInputStream(certChain); + + closeKey(); + this.key = new FileInputStream(key); + + return this; + } + + /** + * Enable Client Verification via mTLS on the server. + * + * @param mTlsCACert The CA certificate to use for verifying clients. + */ + public Builder useMTlsClientVerification(final File mTlsCACert) throws IOException { + closeMTlsCACert(); + this.mTlsCACert = new FileInputStream(mTlsCACert); + return this; + } + + /** + * Enable TLS on the server. + * + * @param certChain The certificate chain to use. + * @param key The private key to use. + */ + public Builder useTls(final InputStream certChain, final InputStream key) throws IOException { + closeCertChain(); + this.certChain = certChain; + + closeKey(); + this.key = key; + + return this; + } + + /** + * Enable mTLS on the server. + * + * @param mTlsCACert The CA certificate to use for verifying clients. + */ + public Builder useMTlsClientVerification(final InputStream mTlsCACert) throws IOException { + closeMTlsCACert(); + this.mTlsCACert = mTlsCACert; + return this; + } + + /** + * Set the executor used by the server. + * + *

Flight will NOT take ownership of the executor. The application must clean it up if one is + * provided. (If not provided, Flight will use a default executor which it will clean up.) + */ + public Builder executor(ExecutorService executor) { + this.executor = executor; + return this; + } + + /** Set the authentication handler. */ + public Builder authHandler(ServerAuthHandler authHandler) { + this.authHandler = authHandler; + return this; + } + + /** Set the header-based authentication mechanism. */ + public Builder headerAuthenticator(CallHeaderAuthenticator headerAuthenticator) { + this.headerAuthenticator = headerAuthenticator; + return this; + } + + /** Provide a transport-specific option. Not guaranteed to have any effect. */ + public Builder transportHint(final String key, Object option) { + builderOptions.put(key, option); + return this; + } + + /** + * Add a Flight middleware component to inspect and modify requests to this service. + * + * @param key An identifier for this middleware component. Service implementations can retrieve + * the middleware instance for the current call using {@link + * org.apache.arrow.flight.FlightProducer.CallContext}. + * @param factory A factory for the middleware. + * @param The middleware type. + * @throws IllegalArgumentException if the key already exists + */ + public Builder middleware( + final FlightServerMiddleware.Key key, final FlightServerMiddleware.Factory factory) { + if (interceptorKeys.contains(key.key)) { + throw new IllegalArgumentException("Key already exists: " + key.key); + } + interceptors.add(new KeyFactory<>(key, factory)); + interceptorKeys.add(key.key); + return this; + } + + public Builder allocator(BufferAllocator allocator) { + this.allocator = Preconditions.checkNotNull(allocator); + return this; + } + + public Builder location(Location location) { + this.location = Preconditions.checkNotNull(location); + return this; + } + + public Builder producer(FlightProducer producer) { + this.producer = Preconditions.checkNotNull(producer); + return this; + } + + public Builder sslContext(SslContext sslContext) { + this.sslContext = sslContext; + return this; + } + } + + public static Builder builder() { + return new Builder(); + } +} diff --git a/plugins/arrow-flight-rpc/src/main/java/org/apache/arrow/flight/package-info.java b/plugins/arrow-flight-rpc/src/main/java/org/apache/arrow/flight/package-info.java new file mode 100644 index 0000000000000..789a88a2d1159 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/apache/arrow/flight/package-info.java @@ -0,0 +1,13 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Clone of FlightServer and FlightClient due to package private access of + * certain configurations. + */ +package org.apache.arrow.flight; diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/FlightServerInfoAction.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/FlightServerInfoAction.java new file mode 100644 index 0000000000000..529bee72c708d --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/FlightServerInfoAction.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.arrow.flight.api; + +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestToXContentListener; +import org.opensearch.transport.client.node.NodeClient; + +import java.util.List; + +import static org.opensearch.rest.RestRequest.Method.GET; + +/** + * It handles GET requests for retrieving Flight server information. + */ +public class FlightServerInfoAction extends BaseRestHandler { + + /** + * Constructor for FlightServerInfoAction. + */ + public FlightServerInfoAction() {} + + /** + * Returns the name of the action. + * @return The name of the action. + */ + @Override + public String getName() { + return "flight_server_info_action"; + } + + /** + * Returns the list of routes for the action. + * @return The list of routes for the action. + */ + @Override + public List routes() { + return List.of(new Route(GET, "/_flight/info"), new Route(GET, "/_flight/info/{nodeId}")); + } + + /** + * Prepares the request for the action. + * @param request The REST request. + * @param client The node client. + * @return The rest channel consumer. + */ + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { + String nodeId = request.param("nodeId"); + if (nodeId != null) { + // Query specific node + NodesFlightInfoRequest nodesRequest = new NodesFlightInfoRequest(nodeId); + return channel -> client.execute(NodesFlightInfoAction.INSTANCE, nodesRequest, new RestToXContentListener<>(channel)); + } else { + NodesFlightInfoRequest nodesRequest = new NodesFlightInfoRequest(); + return channel -> client.execute(NodesFlightInfoAction.INSTANCE, nodesRequest, new RestToXContentListener<>(channel)); + } + } +} diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodeFlightInfo.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodeFlightInfo.java new file mode 100644 index 0000000000000..e804b0c518523 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodeFlightInfo.java @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.api; + +import org.opensearch.action.support.nodes.BaseNodeResponse; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; + +/** + * Represents the response for a node's flight information. + */ +public class NodeFlightInfo extends BaseNodeResponse implements ToXContentObject { + private final BoundTransportAddress boundAddress; + + /** + * Constructor for NodeFlightInfo. + * @param in The stream input to read from. + * @throws IOException If an I/O error occurs. + */ + public NodeFlightInfo(StreamInput in) throws IOException { + super(in); + boundAddress = new BoundTransportAddress(in); + } + + /** + * Constructor for NodeFlightInfo. + * @param node The discovery node. + * @param boundAddress The bound transport address. + */ + public NodeFlightInfo(DiscoveryNode node, BoundTransportAddress boundAddress) { + super(node); + this.boundAddress = boundAddress; + } + + /** + * Writes the node flight information to the stream. + * @param out The stream output to write to. + * @throws IOException If an I/O error occurs. + */ + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + boundAddress.writeTo(out); + } + + /** + * Returns the bound transport address. + * @return The bound transport address. + */ + public BoundTransportAddress getBoundAddress() { + return boundAddress; + } + + /** + * Converts the node flight information to XContent. + * @param builder The XContent builder. + * @param params The parameters for the XContent conversion. + * @return The XContent builder. + * @throws IOException If an I/O error occurs. + */ + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + builder.startObject("flight_server"); + + builder.startArray("bound_addresses"); + for (TransportAddress address : boundAddress.boundAddresses()) { + builder.startObject(); + builder.field("host", address.address().getHostString()); + builder.field("port", address.address().getPort()); + builder.endObject(); + } + builder.endArray(); + + TransportAddress publishAddress = boundAddress.publishAddress(); + builder.startObject("publish_address"); + builder.field("host", publishAddress.address().getHostString()); + builder.field("port", publishAddress.address().getPort()); + builder.endObject(); + + builder.endObject(); + builder.endObject(); + return builder; + } + +} diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodesFlightInfoAction.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodesFlightInfoAction.java new file mode 100644 index 0000000000000..3148c58a1509d --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodesFlightInfoAction.java @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.api; + +import org.opensearch.action.ActionType; + +/** + * Action to retrieve flight info from nodes + */ +public class NodesFlightInfoAction extends ActionType { + /** + * Singleton instance of NodesFlightInfoAction. + */ + public static final NodesFlightInfoAction INSTANCE = new NodesFlightInfoAction(); + /** + * Name of this action. + */ + public static final String NAME = "cluster:admin/flight/info"; + + NodesFlightInfoAction() { + super(NAME, NodesFlightInfoResponse::new); + } +} diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodesFlightInfoRequest.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodesFlightInfoRequest.java new file mode 100644 index 0000000000000..1b707f461819c --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodesFlightInfoRequest.java @@ -0,0 +1,73 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.api; + +import org.opensearch.action.support.nodes.BaseNodesRequest; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.transport.TransportRequest; + +import java.io.IOException; + +/** + * Flight Info Request + */ +public class NodesFlightInfoRequest extends BaseNodesRequest { + + /** + * Constructor for NodesFlightInfoRequest + * @param in StreamInput + * @throws IOException If an I/O error occurs + */ + public NodesFlightInfoRequest(StreamInput in) throws IOException { + super(in); + } + + /** + * Constructor for NodesFlightInfoRequest + * @param nodesIds String array of node IDs + */ + public NodesFlightInfoRequest(String... nodesIds) { + super(nodesIds); + } + + /** + * Writes the request to the given StreamOutput + */ + public static class NodeFlightInfoRequest extends TransportRequest { + NodesFlightInfoRequest request; + + /** + * Constructor for NodeFlightInfoRequest + * @param in StreamInput to read from + * @throws IOException If an I/O error occurs + */ + public NodeFlightInfoRequest(StreamInput in) throws IOException { + super(in); + } + + /** + * Constructor for NodeFlightInfoRequest + * @param request NodesFlightInfoRequest + */ + public NodeFlightInfoRequest(NodesFlightInfoRequest request) { + this.request = request; + } + } + + /** + * Writes the request to the given StreamOutput + * @param out StreamOutput to write to + * @throws IOException If an I/O error occurs + */ + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + } +} diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodesFlightInfoResponse.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodesFlightInfoResponse.java new file mode 100644 index 0000000000000..721cd631924bd --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodesFlightInfoResponse.java @@ -0,0 +1,111 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.api; + +import org.opensearch.action.FailedNodeException; +import org.opensearch.action.support.nodes.BaseNodesResponse; +import org.opensearch.cluster.ClusterName; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.List; + +/** + * Represents the response for nodes flight information. + */ +public class NodesFlightInfoResponse extends BaseNodesResponse implements ToXContentObject { + /** + * Constructs a new NodesFlightInfoResponse instance. + * + * @param in The stream input to read from. + * @throws IOException If an I/O error occurs. + */ + public NodesFlightInfoResponse(StreamInput in) throws IOException { + super(in); + } + + /** + * Constructs a new NodesFlightInfoResponse instance. + * + * @param clusterName The cluster name. + * @param nodes The list of node flight information. + * @param failures The list of failed node exceptions. + */ + public NodesFlightInfoResponse(ClusterName clusterName, List nodes, List failures) { + super(clusterName, nodes, failures); + } + + /** + * Reads the nodes from the given stream input. + * + * @param in The stream input to read from. + * @return The list of node flight information. + * @throws IOException If an I/O error occurs. + */ + @Override + protected List readNodesFrom(StreamInput in) throws IOException { + return in.readList(NodeFlightInfo::new); + } + + /** + * Writes the nodes to the given stream output. + * + * @param out The stream output to write to. + * @param nodes The list of node flight information. + * @throws IOException If an I/O error occurs. + */ + @Override + protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { + out.writeList(nodes); + } + + /** + * Converts the nodes flight information response to XContent. + * @param builder The XContent builder. + * @param params The parameters for the XContent conversion. + * @return The XContent builder. + * @throws IOException If an I/O error occurs. + */ + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + + builder.startObject(); + builder.startObject("_nodes"); + builder.field("total", getNodes().size()); + builder.field("successful", getNodes().size()); + builder.field("failed", failures().size()); + builder.endObject(); + + builder.field("cluster_name", getClusterName().value()); + + builder.startObject("nodes"); + for (NodeFlightInfo nodeInfo : getNodes()) { + builder.field(nodeInfo.getNode().getId()); + nodeInfo.toXContent(builder, params); + } + builder.endObject(); + + if (!failures().isEmpty()) { + builder.startArray("failures"); + for (FailedNodeException failure : failures()) { + builder.startObject(); + builder.field("node_id", failure.nodeId()); + builder.field("reason", failure.getMessage()); + builder.endObject(); + } + builder.endArray(); + } + + builder.endObject(); + return builder; + } +} diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/TransportNodesFlightInfoAction.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/TransportNodesFlightInfoAction.java new file mode 100644 index 0000000000000..d4722e20d1f84 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/TransportNodesFlightInfoAction.java @@ -0,0 +1,113 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.api; + +import org.opensearch.action.FailedNodeException; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.nodes.TransportNodesAction; +import org.opensearch.arrow.flight.bootstrap.FlightService; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.List; + +/** + * Transport action for getting flight information from nodes + */ +public class TransportNodesFlightInfoAction extends TransportNodesAction< + NodesFlightInfoRequest, + NodesFlightInfoResponse, + NodesFlightInfoRequest.NodeFlightInfoRequest, + NodeFlightInfo> { + + private final FlightService flightService; + + /** + * Constructor for TransportNodesFlightInfoAction + * @param settings The settings for the action + * @param threadPool The thread pool for the action + * @param clusterService The cluster service for the action + * @param transportService The transport service for the action + * @param actionFilters The action filters for the action + * @param flightService The flight service for the action + */ + @Inject + public TransportNodesFlightInfoAction( + Settings settings, + ThreadPool threadPool, + ClusterService clusterService, + TransportService transportService, + ActionFilters actionFilters, + FlightService flightService + ) { + super( + NodesFlightInfoAction.NAME, + threadPool, + clusterService, + transportService, + actionFilters, + NodesFlightInfoRequest::new, + NodesFlightInfoRequest.NodeFlightInfoRequest::new, + ThreadPool.Names.MANAGEMENT, + NodeFlightInfo.class + ); + this.flightService = flightService; + } + + /** + * Creates a new response object for the action. + * @param request The associated request. + * @param nodeFlightInfos All successful node-level responses. + * @param failures All node-level failures. + * @return The response object. + */ + @Override + protected NodesFlightInfoResponse newResponse( + NodesFlightInfoRequest request, + List nodeFlightInfos, + List failures + ) { + return new NodesFlightInfoResponse(clusterService.getClusterName(), nodeFlightInfos, failures); + } + + /** + * Creates a new request object for a node. + * @param request The associated request. + * @return The request object. + */ + @Override + protected NodesFlightInfoRequest.NodeFlightInfoRequest newNodeRequest(NodesFlightInfoRequest request) { + return new NodesFlightInfoRequest.NodeFlightInfoRequest(request); + } + + /** + * Creates a new response object for a node. + * @param in The stream input to read from. + * @return The response object. + */ + @Override + protected NodeFlightInfo newNodeResponse(StreamInput in) throws IOException { + return new NodeFlightInfo(in); + } + + /** + * Creates a new response object for a node. + * @param request The associated request. + * @return The response object. + */ + @Override + protected NodeFlightInfo nodeOperation(NodesFlightInfoRequest.NodeFlightInfoRequest request) { + return new NodeFlightInfo(clusterService.localNode(), flightService.getBoundAddress()); + } +} diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/package-info.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/package-info.java new file mode 100644 index 0000000000000..d89ec87f9a51e --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Action to retrieve flight info from nodes + */ +package org.opensearch.arrow.flight.api; diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/FlightClientManager.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/FlightClientManager.java new file mode 100644 index 0000000000000..a81033f580a03 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/FlightClientManager.java @@ -0,0 +1,252 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.arrow.flight.bootstrap; + +import org.apache.arrow.flight.FlightClient; +import org.apache.arrow.flight.Location; +import org.apache.arrow.flight.OSFlightClient; +import org.apache.arrow.memory.BufferAllocator; +import org.apache.arrow.util.VisibleForTesting; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.Version; +import org.opensearch.arrow.flight.api.NodeFlightInfo; +import org.opensearch.arrow.flight.api.NodesFlightInfoAction; +import org.opensearch.arrow.flight.api.NodesFlightInfoRequest; +import org.opensearch.arrow.flight.api.NodesFlightInfoResponse; +import org.opensearch.arrow.flight.bootstrap.tls.SslContextProvider; +import org.opensearch.cluster.ClusterChangedEvent; +import org.opensearch.cluster.ClusterStateListener; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Nullable; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.client.Client; + +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; + +import io.netty.channel.EventLoopGroup; + +import static org.opensearch.common.util.FeatureFlags.ARROW_STREAMS_SETTING; + +/** + * Manages Flight client connections to OpenSearch nodes in a cluster. + * This class maintains a pool of Flight clients for internode communication, + * handles client lifecycle, and responds to cluster state changes. + * + *

The manager implements ClusterStateListener to automatically update + * client connections when nodes join or leave the cluster.

+ */ +public class FlightClientManager implements ClusterStateListener, AutoCloseable { + private static final Version MIN_SUPPORTED_VERSION = Version.V_3_0_0; + private static final Logger logger = LogManager.getLogger(FlightClientManager.class); + static final int LOCATION_TIMEOUT_MS = 1000; + private final ExecutorService grpcExecutor; + private final ClientConfiguration clientConfig; + private final Map flightClients = new ConcurrentHashMap<>(); + private final Client client; + + /** + * Creates a new FlightClientManager instance. + * + * @param allocator Supplier for buffer allocation + * @param clusterService Service for cluster state management + * @param sslContextProvider Provider for SSL/TLS context configuration + * @param elg Event loop group for network operations + * @param threadPool Thread pool for executing tasks asynchronously + * @param client OpenSearch client + */ + public FlightClientManager( + BufferAllocator allocator, + ClusterService clusterService, + @Nullable SslContextProvider sslContextProvider, + EventLoopGroup elg, + ThreadPool threadPool, + Client client + ) { + grpcExecutor = threadPool.executor(ServerConfig.FLIGHT_CLIENT_THREAD_POOL_NAME); + this.clientConfig = new ClientConfiguration( + Objects.requireNonNull(allocator, "BufferAllocator cannot be null"), + Objects.requireNonNull(clusterService, "ClusterService cannot be null"), + sslContextProvider, + Objects.requireNonNull(elg, "EventLoopGroup cannot be null"), + Objects.requireNonNull(grpcExecutor, "ExecutorService cannot be null") + ); + this.client = Objects.requireNonNull(client, "Client cannot be null"); + clusterService.addListener(this); + } + + /** + * Returns a Flight client for a given node ID. + * + * @param nodeId The ID of the node for which to retrieve the Flight client + * @return An OpenSearchFlightClient instance for the specified node + */ + public Optional getFlightClient(String nodeId) { + return Optional.ofNullable(flightClients.get(nodeId)); + } + + /** + * Builds a client for a given nodeId in asynchronous manner + * @param nodeId nodeId of the node to build client for + */ + public void buildClientAsync(String nodeId) { + CompletableFuture locationFuture = new CompletableFuture<>(); + locationFuture.thenAccept(location -> { + DiscoveryNode node = getNodeFromClusterState(nodeId); + buildClientAndAddToPool(location, node); + }).exceptionally(throwable -> { + logger.error("Failed to get Flight server location for node: [{}] {}", nodeId, throwable); + throw new RuntimeException(throwable); + }); + requestNodeLocation(nodeId, locationFuture); + } + + private void buildClientAndAddToPool(Location location, DiscoveryNode node) { + if (!isValidNode(node)) { + logger.warn( + "Unable to build FlightClient for node [{}] with role [{}] on version [{}]", + node.getId(), + node.getRoles(), + node.getVersion() + ); + return; + } + flightClients.computeIfAbsent(node.getId(), key -> buildClient(location)); + } + + private void requestNodeLocation(String nodeId, CompletableFuture future) { + NodesFlightInfoRequest request = new NodesFlightInfoRequest(nodeId); + try { + + client.execute(NodesFlightInfoAction.INSTANCE, request, new ActionListener<>() { + @Override + public void onResponse(NodesFlightInfoResponse response) { + NodeFlightInfo nodeInfo = response.getNodesMap().get(nodeId); + if (nodeInfo != null) { + TransportAddress publishAddress = nodeInfo.getBoundAddress().publishAddress(); + String address = publishAddress.getAddress(); + int flightPort = publishAddress.address().getPort(); + Location location = clientConfig.sslContextProvider != null + ? Location.forGrpcTls(address, flightPort) + : Location.forGrpcInsecure(address, flightPort); + + future.complete(location); + } else { + future.completeExceptionally(new IllegalStateException("No Flight info received for node: [" + nodeId + "]")); + } + } + + @Override + public void onFailure(Exception e) { + future.completeExceptionally(e); + logger.error("Failed to get Flight server info for node: [{}] {}", nodeId, e); + } + }); + } catch (final Exception ex) { + future.completeExceptionally(ex); + } + } + + private FlightClient buildClient(Location location) { + return OSFlightClient.builder() + .allocator(clientConfig.allocator) + .location(location) + .channelType(ServerConfig.clientChannelType()) + .eventLoopGroup(clientConfig.workerELG) + .sslContext(clientConfig.sslContextProvider != null ? clientConfig.sslContextProvider.getClientSslContext() : null) + .executor(clientConfig.grpcExecutor) + .build(); + } + + private DiscoveryNode getNodeFromClusterState(String nodeId) { + return Objects.requireNonNull(clientConfig.clusterService).state().nodes().get(nodeId); + } + + /** + * Closes the FlightClientManager and all associated Flight clients. + */ + @Override + public void close() throws Exception { + for (FlightClient flightClient : flightClients.values()) { + flightClient.close(); + } + flightClients.clear(); + grpcExecutor.shutdown(); + grpcExecutor.awaitTermination(5, TimeUnit.SECONDS); + clientConfig.clusterService.removeListener(this); + } + + /** + * Returns the ID of the local node in the cluster. + * + * @return String representing the local node ID + */ + public String getLocalNodeId() { + return Objects.requireNonNull(clientConfig.clusterService).state().nodes().getLocalNodeId(); + } + + /** + * Handles cluster state changes by updating node locations and managing client connections. + * + * @param event The ClusterChangedEvent containing information about the cluster state change + */ + @Override + public void clusterChanged(ClusterChangedEvent event) { + if (event.nodesChanged()) { + DiscoveryNodes nodes = event.state().nodes(); + flightClients.keySet().removeIf(nodeId -> !nodes.nodeExists(nodeId)); + for (DiscoveryNode node : nodes) { + if (!flightClients.containsKey(node.getId()) && isValidNode(node)) { + buildClientAsync(node.getId()); + } + } + } + } + + private static boolean isValidNode(DiscoveryNode node) { + return node != null && !node.getVersion().before(MIN_SUPPORTED_VERSION) && FeatureFlags.isEnabled(ARROW_STREAMS_SETTING); + } + + private Set getCurrentClusterNodes() { + return Objects.requireNonNull(clientConfig.clusterService).state().nodes().getNodes().keySet(); + } + + @VisibleForTesting + Map getFlightClients() { + return flightClients; + } + + private record ClientConfiguration(BufferAllocator allocator, ClusterService clusterService, SslContextProvider sslContextProvider, + EventLoopGroup workerELG, ExecutorService grpcExecutor) { + private ClientConfiguration( + BufferAllocator allocator, + ClusterService clusterService, + @Nullable SslContextProvider sslContextProvider, + EventLoopGroup workerELG, + ExecutorService grpcExecutor + ) { + this.allocator = allocator; + this.clusterService = clusterService; + this.sslContextProvider = sslContextProvider; + this.workerELG = workerELG; + this.grpcExecutor = grpcExecutor; + } + } +} diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/FlightService.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/FlightService.java new file mode 100644 index 0000000000000..7735fc3df73e0 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/FlightService.java @@ -0,0 +1,170 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.bootstrap; + +import org.apache.arrow.flight.NoOpFlightProducer; +import org.apache.arrow.memory.BufferAllocator; +import org.apache.arrow.memory.RootAllocator; +import org.apache.arrow.util.AutoCloseables; +import org.apache.arrow.util.VisibleForTesting; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.arrow.flight.bootstrap.tls.DefaultSslContextProvider; +import org.opensearch.arrow.flight.bootstrap.tls.SslContextProvider; +import org.opensearch.arrow.spi.StreamManager; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.plugins.NetworkPlugin; +import org.opensearch.plugins.SecureTransportSettingsProvider; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.client.Client; + +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.Objects; + +/** + * FlightService manages the Arrow Flight server and client for OpenSearch. + * It handles the initialization, startup, and shutdown of the Flight server and client, + * as well as managing the stream operations through a FlightStreamManager. + */ +public class FlightService extends NetworkPlugin.AuxTransport { + private static final Logger logger = LogManager.getLogger(FlightService.class); + private final ServerComponents serverComponents; + private StreamManager streamManager; + private Client client; + private FlightClientManager clientManager; + private SecureTransportSettingsProvider secureTransportSettingsProvider; + private BufferAllocator allocator; + private ThreadPool threadPool; + + /** + * Constructor for FlightService. + * @param settings The settings for the FlightService. + */ + public FlightService(Settings settings) { + Objects.requireNonNull(settings, "Settings cannot be null"); + try { + ServerConfig.init(settings); + } catch (Exception e) { + throw new RuntimeException("Failed to initialize Arrow Flight server", e); + } + this.serverComponents = new ServerComponents(settings); + } + + void setClusterService(ClusterService clusterService) { + serverComponents.setClusterService(Objects.requireNonNull(clusterService, "ClusterService cannot be null")); + } + + void setNetworkService(NetworkService networkService) { + serverComponents.setNetworkService(Objects.requireNonNull(networkService, "NetworkService cannot be null")); + } + + void setThreadPool(ThreadPool threadPool) { + this.threadPool = Objects.requireNonNull(threadPool, "ThreadPool cannot be null"); + serverComponents.setThreadPool(threadPool); + } + + void setClient(Client client) { + this.client = client; + } + + void setSecureTransportSettingsProvider(SecureTransportSettingsProvider secureTransportSettingsProvider) { + this.secureTransportSettingsProvider = secureTransportSettingsProvider; + } + + /** + * Starts the FlightService by initializing the stream manager. + */ + @SuppressWarnings("removal") + @Override + protected void doStart() { + try { + allocator = AccessController.doPrivileged((PrivilegedAction) () -> new RootAllocator(Integer.MAX_VALUE)); + serverComponents.setAllocator(allocator); + SslContextProvider sslContextProvider = ServerConfig.isSslEnabled() + ? new DefaultSslContextProvider(secureTransportSettingsProvider) + : null; + serverComponents.setSslContextProvider(sslContextProvider); + serverComponents.initComponents(); + clientManager = new FlightClientManager( + allocator, // sharing the same allocator between server and client + serverComponents.clusterService, + sslContextProvider, + serverComponents.workerEventLoopGroup, // sharing the same worker ELG between server and client + threadPool, + client + ); + initializeStreamManager(clientManager); + serverComponents.setFlightProducer(new NoOpFlightProducer()); + serverComponents.start(); + + } catch (Exception e) { + logger.error("Failed to start Flight server", e); + doClose(); + throw new RuntimeException("Failed to start Flight server", e); + } + } + + /** + * Retrieves the FlightClientManager used by the FlightService. + * @return The FlightClientManager instance. + */ + public FlightClientManager getFlightClientManager() { + return clientManager; + } + + /** + * Retrieves the StreamManager used by the FlightService. + * @return The StreamManager instance. + */ + public StreamManager getStreamManager() { + return streamManager; + } + + /** + * Retrieves the bound address of the FlightService. + * @return The BoundTransportAddress instance. + */ + public BoundTransportAddress getBoundAddress() { + return serverComponents.getBoundAddress(); + } + + @VisibleForTesting + SslContextProvider getSslContextProvider() { + return serverComponents.getSslContextProvider(); + } + + /** + * Stops the FlightService by closing the server components and network resources. + */ + @Override + protected void doStop() { + try { + AutoCloseables.close(serverComponents, streamManager, clientManager, allocator); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + /** + * doStop() ensures all resources are cleaned up and resources are recreated on + * doStart() + */ + @Override + protected void doClose() { + doStop(); + } + + private void initializeStreamManager(FlightClientManager clientManager) { + streamManager = null; + } +} diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/FlightStreamPlugin.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/FlightStreamPlugin.java new file mode 100644 index 0000000000000..bb7edf491cf02 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/FlightStreamPlugin.java @@ -0,0 +1,264 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.bootstrap; + +import org.opensearch.arrow.flight.api.FlightServerInfoAction; +import org.opensearch.arrow.flight.api.NodesFlightInfoAction; +import org.opensearch.arrow.flight.api.TransportNodesFlightInfoAction; +import org.opensearch.arrow.spi.StreamManager; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.IndexScopedSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.settings.SettingsFilter; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.common.util.PageCacheRecycler; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.indices.breaker.CircuitBreakerService; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.env.Environment; +import org.opensearch.env.NodeEnvironment; +import org.opensearch.plugins.ActionPlugin; +import org.opensearch.plugins.ClusterPlugin; +import org.opensearch.plugins.NetworkPlugin; +import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.SecureTransportSettingsProvider; +import org.opensearch.plugins.StreamManagerPlugin; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.rest.RestController; +import org.opensearch.rest.RestHandler; +import org.opensearch.script.ScriptService; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.threadpool.ExecutorBuilder; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.Transport; +import org.opensearch.transport.client.Client; +import org.opensearch.watcher.ResourceWatcherService; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; + +/** + * FlightStreamPlugin class extends BaseFlightStreamPlugin and provides implementation for FlightStream plugin. + */ +public class FlightStreamPlugin extends Plugin implements StreamManagerPlugin, NetworkPlugin, ActionPlugin, ClusterPlugin { + + private final FlightService flightService; + private final boolean isArrowStreamsEnabled; + + /** + * Constructor for FlightStreamPluginImpl. + * @param settings The settings for the FlightStreamPlugin. + */ + public FlightStreamPlugin(Settings settings) { + this.isArrowStreamsEnabled = FeatureFlags.isEnabled(FeatureFlags.ARROW_STREAMS); + this.flightService = isArrowStreamsEnabled ? new FlightService(settings) : null; + } + + /** + * Creates components for the FlightStream plugin. + * @param client The client instance. + * @param clusterService The cluster service instance. + * @param threadPool The thread pool instance. + * @param resourceWatcherService The resource watcher service instance. + * @param scriptService The script service instance. + * @param xContentRegistry The named XContent registry. + * @param environment The environment instance. + * @param nodeEnvironment The node environment instance. + * @param namedWriteableRegistry The named writeable registry. + * @param indexNameExpressionResolver The index name expression resolver instance. + * @param repositoriesServiceSupplier The supplier for the repositories service. + * @return FlightService + */ + @Override + public Collection createComponents( + Client client, + ClusterService clusterService, + ThreadPool threadPool, + ResourceWatcherService resourceWatcherService, + ScriptService scriptService, + NamedXContentRegistry xContentRegistry, + Environment environment, + NodeEnvironment nodeEnvironment, + NamedWriteableRegistry namedWriteableRegistry, + IndexNameExpressionResolver indexNameExpressionResolver, + Supplier repositoriesServiceSupplier + ) { + if (!isArrowStreamsEnabled) { + return Collections.emptyList(); + } + flightService.setClusterService(clusterService); + flightService.setThreadPool(threadPool); + flightService.setClient(client); + return List.of(flightService); + } + + /** + * Gets the secure transports for the FlightStream plugin. + * @param settings The settings for the plugin. + * @param threadPool The thread pool instance. + * @param pageCacheRecycler The page cache recycler instance. + * @param circuitBreakerService The circuit breaker service instance. + * @param namedWriteableRegistry The named writeable registry. + * @param networkService The network service instance. + * @param secureTransportSettingsProvider The secure transport settings provider. + * @param tracer The tracer instance. + * @return A map of secure transports. + */ + @Override + public Map> getSecureTransports( + Settings settings, + ThreadPool threadPool, + PageCacheRecycler pageCacheRecycler, + CircuitBreakerService circuitBreakerService, + NamedWriteableRegistry namedWriteableRegistry, + NetworkService networkService, + SecureTransportSettingsProvider secureTransportSettingsProvider, + Tracer tracer + ) { + if (!isArrowStreamsEnabled) { + return Collections.emptyMap(); + } + flightService.setSecureTransportSettingsProvider(secureTransportSettingsProvider); + return Collections.emptyMap(); + } + + /** + * Gets the auxiliary transports for the FlightStream plugin. + * @param settings The settings for the plugin. + * @param threadPool The thread pool instance. + * @param circuitBreakerService The circuit breaker service instance. + * @param networkService The network service instance. + * @param clusterSettings The cluster settings instance. + * @param tracer The tracer instance. + * @return A map of auxiliary transports. + */ + @Override + public Map> getAuxTransports( + Settings settings, + ThreadPool threadPool, + CircuitBreakerService circuitBreakerService, + NetworkService networkService, + ClusterSettings clusterSettings, + Tracer tracer + ) { + if (!isArrowStreamsEnabled) { + return Collections.emptyMap(); + } + flightService.setNetworkService(networkService); + return Collections.singletonMap(FlightService.AUX_TRANSPORT_TYPES_KEY, () -> flightService); + } + + /** + * Gets the REST handlers for the FlightStream plugin. + * @param settings The settings for the plugin. + * @param restController The REST controller instance. + * @param clusterSettings The cluster settings instance. + * @param indexScopedSettings The index scoped settings instance. + * @param settingsFilter The settings filter instance. + * @param indexNameExpressionResolver The index name expression resolver instance. + * @param nodesInCluster The supplier for the discovery nodes. + * @return A list of REST handlers. + */ + @Override + public List getRestHandlers( + Settings settings, + RestController restController, + ClusterSettings clusterSettings, + IndexScopedSettings indexScopedSettings, + SettingsFilter settingsFilter, + IndexNameExpressionResolver indexNameExpressionResolver, + Supplier nodesInCluster + ) { + if (!isArrowStreamsEnabled) { + return Collections.emptyList(); + } + return List.of(new FlightServerInfoAction()); + } + + /** + * Gets the list of action handlers for the FlightStream plugin. + * @return A list of action handlers. + */ + @Override + public List> getActions() { + if (!isArrowStreamsEnabled) { + return Collections.emptyList(); + } + return List.of(new ActionHandler<>(NodesFlightInfoAction.INSTANCE, TransportNodesFlightInfoAction.class)); + } + + /** + * Called when node is started. DiscoveryNode argument is passed to allow referring localNode value inside plugin + * + * @param localNode local Node info + */ + @Override + public void onNodeStarted(DiscoveryNode localNode) { + if (!isArrowStreamsEnabled) { + return; + } + flightService.getFlightClientManager().buildClientAsync(localNode.getId()); + } + + /** + * Gets the StreamManager instance for managing flight streams. + */ + @Override + public Supplier getStreamManager() { + if (!isArrowStreamsEnabled) { + return null; + } + return flightService::getStreamManager; + } + + /** + * Gets the list of ExecutorBuilder instances for building thread pools used for FlightServer. + * @param settings The settings for the plugin + */ + @Override + public List> getExecutorBuilders(Settings settings) { + if (!isArrowStreamsEnabled) { + return Collections.emptyList(); + } + return List.of(ServerConfig.getServerExecutorBuilder(), ServerConfig.getClientExecutorBuilder()); + } + + /** + * Gets the list of settings for the Flight plugin. + */ + @Override + public List> getSettings() { + if (!isArrowStreamsEnabled) { + return Collections.emptyList(); + } + return new ArrayList<>( + Arrays.asList( + ServerComponents.SETTING_FLIGHT_PORTS, + ServerComponents.SETTING_FLIGHT_HOST, + ServerComponents.SETTING_FLIGHT_BIND_HOST, + ServerComponents.SETTING_FLIGHT_PUBLISH_HOST + ) + ) { + { + addAll(ServerConfig.getSettings()); + } + }; + } +} diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/ServerComponents.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/ServerComponents.java new file mode 100644 index 0000000000000..06b8b6bd4d35c --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/ServerComponents.java @@ -0,0 +1,286 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.bootstrap; + +import org.apache.arrow.flight.FlightProducer; +import org.apache.arrow.flight.FlightServer; +import org.apache.arrow.flight.Location; +import org.apache.arrow.flight.OSFlightServer; +import org.apache.arrow.memory.BufferAllocator; +import org.apache.arrow.util.AutoCloseables; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.arrow.flight.bootstrap.tls.SslContextProvider; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Nullable; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.transport.PortsRange; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.BindTransportException; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; + +import io.netty.channel.EventLoopGroup; +import io.netty.util.NettyRuntime; +import io.netty.util.concurrent.Future; + +import static java.util.Collections.emptyList; +import static org.opensearch.common.settings.Setting.intSetting; +import static org.opensearch.common.settings.Setting.listSetting; +import static org.opensearch.plugins.NetworkPlugin.AuxTransport.AUX_TRANSPORT_PORT; +import static org.opensearch.transport.Transport.resolveTransportPublishPort; + +@SuppressWarnings("removal") +final class ServerComponents implements AutoCloseable { + + public static final Setting> SETTING_FLIGHT_HOST = listSetting( + "arrow.flight.host", + emptyList(), + Function.identity(), + Setting.Property.NodeScope + ); + + public static final Setting> SETTING_FLIGHT_BIND_HOST = listSetting( + "arrow.flight.bind_host", + SETTING_FLIGHT_HOST, + Function.identity(), + Setting.Property.NodeScope + ); + + public static final Setting> SETTING_FLIGHT_PUBLISH_HOST = listSetting( + "arrow.flight.publish_host", + SETTING_FLIGHT_HOST, + Function.identity(), + Setting.Property.NodeScope + ); + + public static final Setting SETTING_FLIGHT_PUBLISH_PORT = intSetting( + "arrow.flight.publish_port", + -1, + -1, + Setting.Property.NodeScope + ); + + private static final Logger logger = LogManager.getLogger(ServerComponents.class); + + private static final String GRPC_WORKER_ELG = "os-grpc-worker-ELG"; + private static final String GRPC_BOSS_ELG = "os-grpc-boss-ELG"; + private static final int SHUTDOWN_TIMEOUT_SECONDS = 5; + + public static final String FLIGHT_TRANSPORT_SETTING_KEY = "transport-flight"; + public static final Setting SETTING_FLIGHT_PORTS = AUX_TRANSPORT_PORT.getConcreteSettingForNamespace( + FLIGHT_TRANSPORT_SETTING_KEY + ); + + private final Settings settings; + private final PortsRange port; + private final String[] bindHosts; + private final String[] publishHosts; + private volatile BoundTransportAddress boundAddress; + + private FlightServer server; + private BufferAllocator allocator; + ClusterService clusterService; + private NetworkService networkService; + private ThreadPool threadPool; + private SslContextProvider sslContextProvider; + private FlightProducer flightProducer; + + private EventLoopGroup bossEventLoopGroup; + EventLoopGroup workerEventLoopGroup; + private ExecutorService serverExecutor; + + ServerComponents(Settings settings) { + this.settings = settings; + this.port = SETTING_FLIGHT_PORTS.get(settings); + + List bindHosts = SETTING_FLIGHT_BIND_HOST.get(settings); + this.bindHosts = bindHosts.toArray(new String[0]); + + List publishHosts = SETTING_FLIGHT_PUBLISH_HOST.get(settings); + this.publishHosts = publishHosts.toArray(new String[0]); + } + + void setAllocator(BufferAllocator allocator) { + this.allocator = allocator; + } + + void setClusterService(ClusterService clusterService) { + this.clusterService = Objects.requireNonNull(clusterService); + } + + void setNetworkService(NetworkService networkService) { + this.networkService = Objects.requireNonNull(networkService); + } + + void setThreadPool(ThreadPool threadPool) { + this.threadPool = Objects.requireNonNull(threadPool); + } + + void setSslContextProvider(@Nullable SslContextProvider sslContextProvider) { + this.sslContextProvider = sslContextProvider; + } + + void setFlightProducer(FlightProducer flightProducer) { + this.flightProducer = Objects.requireNonNull(flightProducer); + } + + private FlightServer buildAndStartServer(Location location, FlightProducer producer) throws IOException { + FlightServer server = OSFlightServer.builder() + .allocator(allocator) + .location(location) + .producer(producer) + .sslContext(sslContextProvider != null ? sslContextProvider.getServerSslContext() : null) + .channelType(ServerConfig.serverChannelType()) + .bossEventLoopGroup(bossEventLoopGroup) + .workerEventLoopGroup(workerEventLoopGroup) + .executor(serverExecutor) + .build(); + AccessController.doPrivileged((PrivilegedAction) () -> { + try { + server.start(); + } catch (IOException e) { + throw new RuntimeException(e); + } + return null; + }); + return server; + } + + SslContextProvider getSslContextProvider() { + return sslContextProvider; + } + + BoundTransportAddress getBoundAddress() { + return boundAddress; + } + + void start() { + InetAddress[] hostAddresses; + try { + hostAddresses = networkService.resolveBindHostAddresses(bindHosts); + } catch (IOException e) { + throw new BindTransportException("Failed to resolve host [" + Arrays.toString(bindHosts) + "]", e); + } + + List boundAddresses = new ArrayList<>(hostAddresses.length); + for (InetAddress address : hostAddresses) { + AccessController.doPrivileged((PrivilegedAction) () -> { + boundAddresses.add(bindAddress(address, port)); + return null; + }); + } + + final InetAddress publishInetAddress; + try { + publishInetAddress = networkService.resolvePublishHostAddresses(publishHosts); + } catch (Exception e) { + throw new BindTransportException("Failed to resolve publish address", e); + } + + final int publishPort = resolveTransportPublishPort(SETTING_FLIGHT_PUBLISH_PORT.get(settings), boundAddresses, publishInetAddress); + + if (publishPort < 0) { + throw new BindTransportException( + "Failed to auto-resolve flight publish port, multiple bound addresses " + + boundAddresses + + " with distinct ports and none of them matched the publish address (" + + publishInetAddress + + "). Please specify a unique port by setting " + + SETTING_FLIGHT_PUBLISH_PORT.getKey() + ); + } + + TransportAddress publishAddress = new TransportAddress(new InetSocketAddress(publishInetAddress, publishPort)); + this.boundAddress = new BoundTransportAddress(boundAddresses.toArray(new TransportAddress[0]), publishAddress); + } + + void initComponents() throws Exception { + bossEventLoopGroup = ServerConfig.createELG(GRPC_BOSS_ELG, 1); + workerEventLoopGroup = ServerConfig.createELG(GRPC_WORKER_ELG, NettyRuntime.availableProcessors() * 2); + serverExecutor = threadPool.executor(ServerConfig.FLIGHT_SERVER_THREAD_POOL_NAME); + } + + @Override + public void close() { + try { + AutoCloseables.close(server); + gracefullyShutdownELG(bossEventLoopGroup, GRPC_BOSS_ELG); + gracefullyShutdownELG(workerEventLoopGroup, GRPC_WORKER_ELG); + if (serverExecutor != null) { + serverExecutor.shutdown(); + } + } catch (Exception e) { + logger.error("Error while closing server components", e); + } + } + + private TransportAddress bindAddress(final InetAddress hostAddress, final PortsRange portsRange) { + final AtomicReference lastException = new AtomicReference<>(); + final AtomicReference boundSocket = new AtomicReference<>(); + final TransportAddress[] address = new TransportAddress[1]; + boolean success = portsRange.iterate(portNumber -> { + boundSocket.set(new InetSocketAddress(hostAddress, portNumber)); + address[0] = new TransportAddress(boundSocket.get()); + try { + return startFlightServer(address[0]); + } catch (Exception e) { + lastException.set(e); + return false; + } + }); + + if (!success) { + throw new BindTransportException("Failed to bind to [" + hostAddress + "]", lastException.get()); + } + return address[0]; + } + + private boolean startFlightServer(TransportAddress transportAddress) { + InetSocketAddress address = transportAddress.address(); + Location serverLocation = sslContextProvider != null + ? Location.forGrpcTls(address.getHostString(), address.getPort()) + : Location.forGrpcInsecure(address.getHostString(), address.getPort()); + try { + this.server = buildAndStartServer(serverLocation, flightProducer); + logger.info("Arrow Flight server started. Listening at {}", serverLocation); + return true; + } catch (Exception e) { + String errorMsg = "Failed to start Arrow Flight server at " + serverLocation; + logger.debug(errorMsg, e); + return false; + } + } + + private void gracefullyShutdownELG(EventLoopGroup group, String groupName) { + if (group != null) { + Future shutdownFuture = group.shutdownGracefully(0, SHUTDOWN_TIMEOUT_SECONDS, TimeUnit.SECONDS); + shutdownFuture.awaitUninterruptibly(); + if (!shutdownFuture.isSuccess()) { + logger.warn("Error closing {} netty event loop group {}", groupName, shutdownFuture.cause()); + } + } + } +} diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/ServerConfig.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/ServerConfig.java new file mode 100644 index 0000000000000..78b8b1dd56a6a --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/ServerConfig.java @@ -0,0 +1,218 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.bootstrap; + +import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.threadpool.ScalingExecutorBuilder; + +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import io.netty.channel.Channel; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.epoll.Epoll; +import io.netty.channel.epoll.EpollEventLoopGroup; +import io.netty.channel.epoll.EpollServerSocketChannel; +import io.netty.channel.epoll.EpollSocketChannel; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.nio.NioServerSocketChannel; +import io.netty.channel.socket.nio.NioSocketChannel; + +/** + * Configuration class for OpenSearch Flight server settings. + * This class manages server-side configurations including port settings, Arrow memory settings, + * thread pool configurations, and SSL/TLS settings. + */ +public class ServerConfig { + /** + * Creates a new instance of the server configuration with default settings. + */ + public ServerConfig() {} + + static final Setting ARROW_ALLOCATION_MANAGER_TYPE = Setting.simpleString( + "arrow.allocation.manager.type", + "Netty", + Setting.Property.NodeScope + ); + + static final Setting ARROW_ENABLE_NULL_CHECK_FOR_GET = Setting.boolSetting( + "arrow.enable_null_check_for_get", + false, + Setting.Property.NodeScope + ); + + static final Setting ARROW_ENABLE_DEBUG_ALLOCATOR = Setting.boolSetting( + "arrow.memory.debug.allocator", + false, + Setting.Property.NodeScope + ); + + static final Setting ARROW_ENABLE_UNSAFE_MEMORY_ACCESS = Setting.boolSetting( + "arrow.enable_unsafe_memory_access", + true, + Setting.Property.NodeScope + ); + + static final Setting FLIGHT_THREAD_POOL_MIN_SIZE = Setting.intSetting( + "thread_pool.flight-server.min", + 0, + 0, + Setting.Property.NodeScope + ); + + static final Setting FLIGHT_THREAD_POOL_MAX_SIZE = Setting.intSetting( + "thread_pool.flight-server.max", + 100000, // TODO depends on max concurrent streams per node, decide after benchmark. To be controlled by admission control layer. + 1, + Setting.Property.NodeScope + ); + + static final Setting FLIGHT_THREAD_POOL_KEEP_ALIVE = Setting.timeSetting( + "thread_pool.flight-server.keep_alive", + TimeValue.timeValueSeconds(30), + Setting.Property.NodeScope + ); + + static final Setting ARROW_SSL_ENABLE = Setting.boolSetting( + "arrow.ssl.enable", + false, // TODO: get default from security enabled + Setting.Property.NodeScope + ); + + /** + * The thread pool name for the Flight server. + */ + public static final String FLIGHT_SERVER_THREAD_POOL_NAME = "flight-server"; + + /** + * The thread pool name for the Flight client. + */ + public static final String FLIGHT_CLIENT_THREAD_POOL_NAME = "flight-client"; + + private static final String host = "localhost"; + private static boolean enableSsl; + private static int threadPoolMin; + private static int threadPoolMax; + private static TimeValue keepAlive; + + /** + * Initializes the server configuration with the provided settings. + * Sets system properties for Arrow memory management and configures thread pool settings. + * + * @param settings The OpenSearch settings to initialize the server with + */ + @SuppressForbidden(reason = "required for arrow allocator") + @SuppressWarnings("removal") + public static void init(Settings settings) { + AccessController.doPrivileged((PrivilegedAction) () -> { + System.setProperty("arrow.allocation.manager.type", ARROW_ALLOCATION_MANAGER_TYPE.get(settings)); + System.setProperty("arrow.enable_null_check_for_get", Boolean.toString(ARROW_ENABLE_NULL_CHECK_FOR_GET.get(settings))); + System.setProperty("arrow.enable_unsafe_memory_access", Boolean.toString(ARROW_ENABLE_UNSAFE_MEMORY_ACCESS.get(settings))); + System.setProperty("arrow.memory.debug.allocator", Boolean.toString(ARROW_ENABLE_DEBUG_ALLOCATOR.get(settings))); + Netty4Configs.init(settings); + return null; + }); + enableSsl = ARROW_SSL_ENABLE.get(settings); + threadPoolMin = FLIGHT_THREAD_POOL_MIN_SIZE.get(settings); + threadPoolMax = FLIGHT_THREAD_POOL_MAX_SIZE.get(settings); + keepAlive = FLIGHT_THREAD_POOL_KEEP_ALIVE.get(settings); + } + + /** + * Checks if SSL/TLS is enabled for the Flight server. + * + * @return true if SSL is enabled, false otherwise + */ + public static boolean isSslEnabled() { + return enableSsl; + } + + /** + * Gets the thread pool executor builder configured for the Flight server. + * + * @return The configured ScalingExecutorBuilder instance + */ + public static ScalingExecutorBuilder getServerExecutorBuilder() { + return new ScalingExecutorBuilder(FLIGHT_SERVER_THREAD_POOL_NAME, threadPoolMin, threadPoolMax, keepAlive); + } + + /** + * Gets the thread pool executor builder configured for the Flight server. + * + * @return The configured ScalingExecutorBuilder instance + */ + public static ScalingExecutorBuilder getClientExecutorBuilder() { + return new ScalingExecutorBuilder(FLIGHT_CLIENT_THREAD_POOL_NAME, threadPoolMin, threadPoolMax, keepAlive); + } + + /** + * Returns a list of all settings managed by this configuration class. + * + * @return List of Setting instances + */ + public static List> getSettings() { + return new ArrayList<>( + Arrays.asList( + ARROW_ALLOCATION_MANAGER_TYPE, + ARROW_ENABLE_NULL_CHECK_FOR_GET, + ARROW_ENABLE_DEBUG_ALLOCATOR, + ARROW_ENABLE_UNSAFE_MEMORY_ACCESS, + ARROW_SSL_ENABLE + ) + ); + } + + static EventLoopGroup createELG(String name, int eventLoopThreads) { + + return Epoll.isAvailable() + ? new EpollEventLoopGroup(eventLoopThreads, OpenSearchExecutors.daemonThreadFactory(name)) + : new NioEventLoopGroup(eventLoopThreads, OpenSearchExecutors.daemonThreadFactory(name)); + } + + static Class serverChannelType() { + return Epoll.isAvailable() ? EpollServerSocketChannel.class : NioServerSocketChannel.class; + } + + static Class clientChannelType() { + return Epoll.isAvailable() ? EpollSocketChannel.class : NioSocketChannel.class; + } + + private static class Netty4Configs { + + @SuppressForbidden(reason = "required for netty allocator configuration") + public static void init(Settings settings) { + checkSystemProperty("io.netty.allocator.numDirectArenas", "1"); + checkSystemProperty("io.netty.noUnsafe", "false"); + checkSystemProperty("io.netty.tryUnsafe", "true"); + checkSystemProperty("io.netty.tryReflectionSetAccessible", "true"); + } + + private static void checkSystemProperty(String propertyName, String expectedValue) { + String actualValue = System.getProperty(propertyName); + if (!expectedValue.equals(actualValue)) { + throw new IllegalStateException( + "Required system property [" + + propertyName + + "] is incorrect; expected: [" + + expectedValue + + "] actual: [" + + actualValue + + "]." + ); + } + } + } +} diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/package-info.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/package-info.java new file mode 100644 index 0000000000000..3ee247809b0c0 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Bootstrap classes for initializing and configuring OpenSearch Flight service. + */ +package org.opensearch.arrow.flight.bootstrap; diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/tls/DefaultSslContextProvider.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/tls/DefaultSslContextProvider.java new file mode 100644 index 0000000000000..187124911fc5f --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/tls/DefaultSslContextProvider.java @@ -0,0 +1,104 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.bootstrap.tls; + +import org.opensearch.plugins.SecureTransportSettingsProvider; + +import javax.net.ssl.SSLException; + +import java.util.Locale; + +import io.netty.handler.ssl.ApplicationProtocolConfig; +import io.netty.handler.ssl.ApplicationProtocolNames; +import io.netty.handler.ssl.ClientAuth; +import io.netty.handler.ssl.SslContext; +import io.netty.handler.ssl.SslContextBuilder; +import io.netty.handler.ssl.SslProvider; +import io.netty.handler.ssl.SupportedCipherSuiteFilter; + +/** + * DefaultSslContextProvider is an implementation of the SslContextProvider interface that provides SSL contexts based on the provided SecureTransportSettingsProvider. + */ +public class DefaultSslContextProvider implements SslContextProvider { + + private final SecureTransportSettingsProvider secureTransportSettingsProvider; + + /** + * Constructor for DefaultSslContextProvider. + * @param secureTransportSettingsProvider The SecureTransportSettingsProvider instance. + */ + public DefaultSslContextProvider(SecureTransportSettingsProvider secureTransportSettingsProvider) { + this.secureTransportSettingsProvider = secureTransportSettingsProvider; + } + + // TODO - handle certificates reload + /** + * Creates and returns the server SSL context based on the provided SecureTransportSettingsProvider. + * @return The server SSL context. + */ + @Override + public SslContext getServerSslContext() { + try { + SecureTransportSettingsProvider.SecureTransportParameters parameters = secureTransportSettingsProvider.parameters(null).get(); + return SslContextBuilder.forServer(parameters.keyManagerFactory().get()) + .sslProvider(SslProvider.valueOf(parameters.sslProvider().get().toUpperCase(Locale.ROOT))) + .clientAuth(ClientAuth.valueOf(parameters.clientAuth().get().toUpperCase(Locale.ROOT))) + .protocols(parameters.protocols()) + .ciphers(parameters.cipherSuites(), SupportedCipherSuiteFilter.INSTANCE) + .sessionCacheSize(0) + .sessionTimeout(0) + .applicationProtocolConfig( + new ApplicationProtocolConfig( + ApplicationProtocolConfig.Protocol.ALPN, + // NO_ADVERTISE is currently the only mode supported by both OpenSsl and JDK providers. + ApplicationProtocolConfig.SelectorFailureBehavior.NO_ADVERTISE, + // ACCEPT is currently the only mode supported by both OpenSsl and JDK providers. + ApplicationProtocolConfig.SelectedListenerFailureBehavior.ACCEPT, + ApplicationProtocolNames.HTTP_2, + ApplicationProtocolNames.HTTP_1_1 + ) + ) + .trustManager(parameters.trustManagerFactory().get()) + .build(); + } catch (SSLException e) { + throw new RuntimeException(e); + } + } + + /** + * Returns the client SSL context based on the provided SecureTransportSettingsProvider. + * @return The client SSL context. + */ + @Override + public SslContext getClientSslContext() { + try { + SecureTransportSettingsProvider.SecureTransportParameters parameters = secureTransportSettingsProvider.parameters(null).get(); + return SslContextBuilder.forClient() + .sslProvider(SslProvider.valueOf(parameters.sslProvider().get().toUpperCase(Locale.ROOT))) + .protocols(parameters.protocols()) + .ciphers(parameters.cipherSuites(), SupportedCipherSuiteFilter.INSTANCE) + .applicationProtocolConfig( + new ApplicationProtocolConfig( + ApplicationProtocolConfig.Protocol.ALPN, + ApplicationProtocolConfig.SelectorFailureBehavior.NO_ADVERTISE, + ApplicationProtocolConfig.SelectedListenerFailureBehavior.ACCEPT, + ApplicationProtocolNames.HTTP_2, + ApplicationProtocolNames.HTTP_1_1 + ) + ) + .sessionCacheSize(0) + .sessionTimeout(0) + .keyManager(parameters.keyManagerFactory().get()) + .trustManager(parameters.trustManagerFactory().get()) + .build(); + } catch (SSLException e) { + throw new RuntimeException(e); + } + } +} diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/tls/SslContextProvider.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/tls/SslContextProvider.java new file mode 100644 index 0000000000000..2cd38bc3c1dd5 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/tls/SslContextProvider.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.bootstrap.tls; + +import io.netty.handler.ssl.SslContext; + +/** + * Provider interface for SSL/TLS context configuration in OpenSearch Flight. + * This interface defines methods for managing SSL contexts for both server and client-side + * Flight communications. + */ +public interface SslContextProvider { + + /** + * Gets the SSL context configuration for the Flight server. + * This context is used to secure incoming connections to the Flight server. + * + * @return SslContext configured for server-side TLS + */ + SslContext getServerSslContext(); + + /** + * Gets the SSL context configuration for Flight clients. + * This context is used when making outbound connections to other Flight servers. + * + * @return SslContext configured for client-side TLS + */ + SslContext getClientSslContext(); +} diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/tls/package-info.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/tls/package-info.java new file mode 100644 index 0000000000000..2ad8ae734c2da --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/tls/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * TLS/SSL configuration and security components for OpenSearch Flight service. + */ +package org.opensearch.arrow.flight.bootstrap.tls; diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/package-info.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/package-info.java new file mode 100644 index 0000000000000..2341a24d0be85 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Root package for OpenSearch Flight functionality, providing core flight service integration with OpenSearch. + */ +package org.opensearch.arrow.flight; diff --git a/plugins/arrow-flight-rpc/src/main/plugin-metadata/plugin-security.policy b/plugins/arrow-flight-rpc/src/main/plugin-metadata/plugin-security.policy new file mode 100644 index 0000000000000..803350a578009 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/plugin-metadata/plugin-security.policy @@ -0,0 +1,45 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +grant codeBase "${codebase.netty-common}" { + permission java.net.SocketPermission "*", "accept,connect,listen,resolve"; + permission java.lang.RuntimePermission "*", "setContextClassLoader"; +}; + +grant codeBase "${codebase.grpc-core}" { + permission java.net.SocketPermission "*", "accept,connect,listen,resolve"; + permission java.lang.RuntimePermission "*", "setContextClassLoader"; +}; + +grant { + // arrow flight service permissions + permission java.util.PropertyPermission "arrow.allocation.manager.type", "write"; + permission java.util.PropertyPermission "arrow.enable_null_check_for_get", "write"; + permission java.util.PropertyPermission "arrow.enable_unsafe_memory_access", "write"; + permission java.util.PropertyPermission "arrow.memory.debug.allocator", "write"; + + permission java.util.PropertyPermission "io.netty.tryReflectionSetAccessible", "write"; + permission java.util.PropertyPermission "io.netty.allocator.numDirectArenas", "write"; + permission java.util.PropertyPermission "io.netty.noUnsafe", "write"; + permission java.util.PropertyPermission "io.netty.tryUnsafe", "write"; + + // Needed for netty based arrow flight server for netty configs related to buffer allocator + permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; + permission java.util.PropertyPermission "arrow.allocation.manager.type", "write"; + + permission java.lang.RuntimePermission "modifyThreadGroup"; + permission java.lang.RuntimePermission "modifyThread"; + permission java.net.SocketPermission "*", "accept,connect,listen,resolve"; + + // Reflection access needed by Arrow + permission java.lang.RuntimePermission "accessDeclaredMembers"; + permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; + + // Memory access + permission java.lang.RuntimePermission "accessClassInPackage.sun.misc"; +}; diff --git a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/FlightStreamPluginTests.java b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/FlightStreamPluginTests.java new file mode 100644 index 0000000000000..6f93d792f9db4 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/FlightStreamPluginTests.java @@ -0,0 +1,104 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight; + +import org.opensearch.arrow.flight.api.FlightServerInfoAction; +import org.opensearch.arrow.flight.api.NodesFlightInfoAction; +import org.opensearch.arrow.flight.bootstrap.FlightService; +import org.opensearch.arrow.flight.bootstrap.FlightStreamPlugin; +import org.opensearch.arrow.spi.StreamManager; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.plugins.SecureTransportSettingsProvider; +import org.opensearch.test.FeatureFlagSetter; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ExecutorBuilder; +import org.opensearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.function.Supplier; + +import static org.opensearch.common.util.FeatureFlags.ARROW_STREAMS_SETTING; +import static org.opensearch.plugins.NetworkPlugin.AuxTransport.AUX_TRANSPORT_TYPES_KEY; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class FlightStreamPluginTests extends OpenSearchTestCase { + private Settings settings; + private ClusterService clusterService; + + @Override + public void setUp() throws Exception { + super.setUp(); + settings = Settings.builder().put(ARROW_STREAMS_SETTING.getKey(), true).build(); + clusterService = mock(ClusterService.class); + ClusterState clusterState = mock(ClusterState.class); + DiscoveryNodes nodes = mock(DiscoveryNodes.class); + when(clusterService.state()).thenReturn(clusterState); + when(clusterState.nodes()).thenReturn(nodes); + when(nodes.getLocalNodeId()).thenReturn("test-node"); + } + + public void testPluginEnabled() throws IOException { + FeatureFlags.initializeFeatureFlags(settings); + FeatureFlagSetter.set(ARROW_STREAMS_SETTING.getKey()); + FlightStreamPlugin plugin = new FlightStreamPlugin(settings); + Collection components = plugin.createComponents( + null, + clusterService, + mock(ThreadPool.class), + null, + null, + null, + null, + null, + null, + null, + null + ); + + assertNotNull(components); + assertFalse(components.isEmpty()); + assertEquals(1, components.size()); + assertTrue(components.iterator().next() instanceof FlightService); + + List> executorBuilders = plugin.getExecutorBuilders(settings); + assertNotNull(executorBuilders); + assertFalse(executorBuilders.isEmpty()); + assertEquals(2, executorBuilders.size()); + + Supplier streamManager = plugin.getStreamManager(); + assertNotNull(streamManager); + + List> settings = plugin.getSettings(); + assertNotNull(settings); + assertFalse(settings.isEmpty()); + + assertNotNull(plugin.getSecureTransports(null, null, null, null, null, null, mock(SecureTransportSettingsProvider.class), null)); + + assertTrue( + plugin.getAuxTransports(null, null, null, new NetworkService(List.of()), null, null) + .get(AUX_TRANSPORT_TYPES_KEY) + .get() instanceof FlightService + ); + assertEquals(1, plugin.getRestHandlers(null, null, null, null, null, null, null).size()); + assertTrue(plugin.getRestHandlers(null, null, null, null, null, null, null).get(0) instanceof FlightServerInfoAction); + assertEquals(1, plugin.getActions().size()); + assertEquals(NodesFlightInfoAction.INSTANCE.name(), plugin.getActions().get(0).getAction().name()); + + plugin.close(); + } +} diff --git a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/FlightServerInfoActionTests.java b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/FlightServerInfoActionTests.java new file mode 100644 index 0000000000000..6cb75d4a93dbe --- /dev/null +++ b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/FlightServerInfoActionTests.java @@ -0,0 +1,101 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.api; + +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.SetOnce; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.rest.RestRequest; +import org.opensearch.test.rest.FakeRestRequest; +import org.opensearch.test.rest.RestActionTestCase; +import org.junit.Before; + +import java.util.Collections; + +import static org.mockito.Mockito.mock; + +public class FlightServerInfoActionTests extends RestActionTestCase { + private FlightServerInfoAction handler; + + @Before + public void setUpAction() { + handler = new FlightServerInfoAction(); + controller().registerHandler(handler); + } + + public void testGetName() { + assertEquals("flight_server_info_action", handler.getName()); + } + + public void testRoutes() { + var routes = handler.routes(); + assertEquals(2, routes.size()); + assertTrue( + routes.stream().anyMatch(route -> route.getPath().equals("/_flight/info") && route.getMethod() == RestRequest.Method.GET) + ); + assertTrue( + routes.stream() + .anyMatch(route -> route.getPath().equals("/_flight/info/{nodeId}") && route.getMethod() == RestRequest.Method.GET) + ); + } + + public void testFlightInfoRequest() { + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.GET) + .withPath("/_flight/info") + .build(); + SetOnce executeCalled = new SetOnce<>(); + verifyingClient.setExecuteVerifier((action, actionRequest) -> { + assertEquals(NodesFlightInfoAction.INSTANCE.name(), action.name()); + assertNotNull(actionRequest); + executeCalled.set(true); + return new NodesFlightInfoResponse( + new ClusterName("test-cluster"), + Collections.singletonList(new NodeFlightInfo(mock(DiscoveryNode.class), mock(BoundTransportAddress.class))), + Collections.emptyList() + ); + }); + dispatchRequest(request); + assertEquals(Boolean.TRUE, executeCalled.get()); + } + + public void testFlightInfoRequestWithNodeId() throws Exception { + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.GET) + .withPath("/_flight/info/local_node") + .build(); + SetOnce executeCalled = new SetOnce<>(); + verifyingClient.setExecuteVerifier((action, actionRequest) -> { + assertEquals(NodesFlightInfoAction.INSTANCE.name(), action.name()); + assertNotNull(actionRequest); + executeCalled.set(true); + return null; + }); + dispatchRequest(request); + assertEquals(Boolean.TRUE, executeCalled.get()); + } + + public void testFlightInfoRequestWithInvalidPath() throws Exception { + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.GET) + .withPath("/_flight/invalid_path") + .build(); + SetOnce executeCalled = new SetOnce<>(); + verifyingClient.setExecuteVerifier((action, actionRequest) -> { + assertEquals(NodesFlightInfoAction.INSTANCE.name(), action.name()); + assertNotNull(actionRequest); + executeCalled.set(true); + return new NodesFlightInfoResponse( + new ClusterName("test-cluster"), + Collections.singletonList(new NodeFlightInfo(mock(DiscoveryNode.class), mock(BoundTransportAddress.class))), + Collections.emptyList() + ); + }); + dispatchRequest(request); + assertNull(executeCalled.get()); + } +} diff --git a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/NodeFlightInfoTests.java b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/NodeFlightInfoTests.java new file mode 100644 index 0000000000000..2f8d7deb06f3f --- /dev/null +++ b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/NodeFlightInfoTests.java @@ -0,0 +1,160 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.api; + +import org.opensearch.Version; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.test.OpenSearchTestCase; + +import java.net.InetAddress; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; + +@SuppressWarnings("unchecked") +public class NodeFlightInfoTests extends OpenSearchTestCase { + + public void testNodeFlightInfoSerialization() throws Exception { + DiscoveryNode node = new DiscoveryNode( + "test_node", + "test_node", + "hostname", + "localhost", + "127.0.0.1", + new TransportAddress(InetAddress.getLoopbackAddress(), 9300), + new HashMap<>(), + new HashSet<>(), + Version.CURRENT + ); + + TransportAddress address = new TransportAddress(InetAddress.getLoopbackAddress(), 47470); + BoundTransportAddress boundAddress = new BoundTransportAddress(new TransportAddress[] { address }, address); + + NodeFlightInfo originalInfo = new NodeFlightInfo(node, boundAddress); + + BytesStreamOutput output = new BytesStreamOutput(); + originalInfo.writeTo(output); + + StreamInput input = output.bytes().streamInput(); + NodeFlightInfo deserializedInfo = new NodeFlightInfo(input); + + assertEquals(originalInfo.getNode(), deserializedInfo.getNode()); + assertEquals(originalInfo.getBoundAddress().boundAddresses().length, deserializedInfo.getBoundAddress().boundAddresses().length); + assertEquals(originalInfo.getBoundAddress().boundAddresses()[0], deserializedInfo.getBoundAddress().boundAddresses()[0]); + assertEquals(originalInfo.getBoundAddress().publishAddress(), deserializedInfo.getBoundAddress().publishAddress()); + } + + public void testNodeFlightInfoEquality() throws Exception { + DiscoveryNode node = new DiscoveryNode( + "test_node", + "test_node", + "hostname", + "localhost", + "127.0.0.1", + new TransportAddress(InetAddress.getLoopbackAddress(), 9300), + new HashMap<>(), + new HashSet<>(), + Version.CURRENT + ); + + TransportAddress address = new TransportAddress(InetAddress.getLoopbackAddress(), 47470); + BoundTransportAddress boundAddress = new BoundTransportAddress(new TransportAddress[] { address }, address); + + NodeFlightInfo info1 = new NodeFlightInfo(node, boundAddress); + NodeFlightInfo info2 = new NodeFlightInfo(node, boundAddress); + + assertEquals(info1.getBoundAddress(), info2.getBoundAddress()); + } + + public void testGetters() throws Exception { + DiscoveryNode node = new DiscoveryNode( + "test_node", + "test_node", + "hostname", + "localhost", + "127.0.0.1", + new TransportAddress(InetAddress.getLoopbackAddress(), 9300), + new HashMap<>(), + new HashSet<>(), + Version.CURRENT + ); + + TransportAddress address = new TransportAddress(InetAddress.getLoopbackAddress(), 47470); + BoundTransportAddress boundAddress = new BoundTransportAddress(new TransportAddress[] { address }, address); + + NodeFlightInfo info = new NodeFlightInfo(node, boundAddress); + + assertEquals(node, info.getNode()); + assertEquals(boundAddress, info.getBoundAddress()); + } + + public void testToXContent() throws Exception { + TransportAddress boundAddress1 = new TransportAddress(InetAddress.getLoopbackAddress(), 47470); + TransportAddress boundAddress2 = new TransportAddress(InetAddress.getLoopbackAddress(), 47471); + TransportAddress publishAddress = new TransportAddress(InetAddress.getLoopbackAddress(), 47472); + + BoundTransportAddress boundAddress = new BoundTransportAddress( + new TransportAddress[] { boundAddress1, boundAddress2 }, + publishAddress + ); + + NodeFlightInfo info = new NodeFlightInfo( + new DiscoveryNode( + "test_node", + new TransportAddress(InetAddress.getLoopbackAddress(), 9300), + Collections.emptyMap(), + Collections.emptySet(), + Version.CURRENT + ), + boundAddress + ); + + XContentBuilder builder = JsonXContent.contentBuilder(); + builder.startObject(); + builder.field("node_info"); + info.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder))) { + Map responseMap = parser.map(); + + Map nodeInfo = (Map) responseMap.get("node_info"); + assertNotNull("node_info object should exist", nodeInfo); + + Map flightServer = (Map) nodeInfo.get("flight_server"); + assertNotNull("flight_server object should exist", flightServer); + + List> boundAddresses = (List>) flightServer.get("bound_addresses"); + assertNotNull("bound_addresses array should exist", boundAddresses); + assertEquals("Should have 2 bound addresses", 2, boundAddresses.size()); + + assertEquals("localhost", boundAddresses.get(0).get("host")); + assertEquals(47470, boundAddresses.get(0).get("port")); + + assertEquals("localhost", boundAddresses.get(1).get("host")); + assertEquals(47471, boundAddresses.get(1).get("port")); + + Map publishAddressMap = (Map) flightServer.get("publish_address"); + assertNotNull("publish_address object should exist", publishAddressMap); + assertEquals("localhost", publishAddressMap.get("host")); + assertEquals(47472, publishAddressMap.get("port")); + } + } +} diff --git a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/NodesFlightInfoRequestTests.java b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/NodesFlightInfoRequestTests.java new file mode 100644 index 0000000000000..756177423fe6f --- /dev/null +++ b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/NodesFlightInfoRequestTests.java @@ -0,0 +1,39 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.api; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.test.OpenSearchTestCase; + +public class NodesFlightInfoRequestTests extends OpenSearchTestCase { + + public void testNodesFlightInfoRequestSerialization() throws Exception { + NodesFlightInfoRequest originalRequest = new NodesFlightInfoRequest("node1", "node2"); + + BytesStreamOutput output = new BytesStreamOutput(); + originalRequest.writeTo(output); + + StreamInput input = output.bytes().streamInput(); + NodesFlightInfoRequest deserializedRequest = new NodesFlightInfoRequest(input); + + assertArrayEquals(originalRequest.nodesIds(), deserializedRequest.nodesIds()); + } + + public void testNodesFlightInfoRequestConcreteNodes() { + String[] nodeIds = new String[] { "node1", "node2" }; + NodesFlightInfoRequest request = new NodesFlightInfoRequest(nodeIds); + assertArrayEquals(nodeIds, request.nodesIds()); + } + + public void testNodesFlightInfoRequestAllNodes() { + NodesFlightInfoRequest request = new NodesFlightInfoRequest(); + assertEquals(0, request.nodesIds().length); + } +} diff --git a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/NodesFlightInfoResponseTests.java b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/NodesFlightInfoResponseTests.java new file mode 100644 index 0000000000000..49a6cc6bacf40 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/NodesFlightInfoResponseTests.java @@ -0,0 +1,241 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.api; + +import org.opensearch.Version; +import org.opensearch.action.FailedNodeException; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.test.OpenSearchTestCase; + +import java.net.ConnectException; +import java.net.InetAddress; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; + +@SuppressWarnings("unchecked") +public class NodesFlightInfoResponseTests extends OpenSearchTestCase { + + public void testNodesFlightInfoResponseSerialization() throws Exception { + ClusterName clusterName = new ClusterName("test-cluster"); + List nodes = new ArrayList<>(); + + DiscoveryNode node1 = createTestNode("node1"); + DiscoveryNode node2 = createTestNode("node2"); + + nodes.add(createNodeFlightInfo(node1, 47470)); + nodes.add(createNodeFlightInfo(node2, 47471)); + + NodesFlightInfoResponse originalResponse = new NodesFlightInfoResponse(clusterName, nodes, List.of()); + + BytesStreamOutput output = new BytesStreamOutput(); + originalResponse.writeTo(output); + + StreamInput input = output.bytes().streamInput(); + NodesFlightInfoResponse deserializedResponse = new NodesFlightInfoResponse(input); + assertEquals(originalResponse.getNodes().size(), deserializedResponse.getNodes().size()); + + for (int i = 0; i < originalResponse.getNodes().size(); i++) { + NodeFlightInfo originalNode = originalResponse.getNodes().get(i); + NodeFlightInfo deserializedNode = deserializedResponse.getNodes().get(i); + + assertEquals(originalNode.getNode().getId(), deserializedNode.getNode().getId()); + assertEquals(originalNode.getNode().getName(), deserializedNode.getNode().getName()); + assertEquals(originalNode.getBoundAddress().publishAddress(), deserializedNode.getBoundAddress().publishAddress()); + } + assertEquals(originalResponse.getClusterName(), deserializedResponse.getClusterName()); + } + + public void testNodesFlightInfoResponseEmpty() { + ClusterName clusterName = new ClusterName("test-cluster"); + List nodes = new ArrayList<>(); + + NodesFlightInfoResponse response = new NodesFlightInfoResponse(clusterName, nodes, List.of()); + + assertTrue(response.getNodes().isEmpty()); + assertEquals(clusterName, response.getClusterName()); + } + + public void testToXContentWithFailures() throws Exception { + NodesFlightInfoResponse response = getNodesFlightInfoResponse(); + + XContentBuilder builder = JsonXContent.contentBuilder(); + response.toXContent(builder, ToXContent.EMPTY_PARAMS); + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder))) { + Map responseMap = parser.map(); + + Map nodesStats = (Map) responseMap.get("_nodes"); + assertNotNull("_nodes object should exist", nodesStats); + assertEquals(2, nodesStats.get("total")); + assertEquals(2, nodesStats.get("successful")); + assertEquals(2, nodesStats.get("failed")); + + assertEquals("test-cluster", responseMap.get("cluster_name")); + + Map nodes = (Map) responseMap.get("nodes"); + assertNotNull("nodes object should exist", nodes); + assertEquals(2, nodes.size()); + + Map firstNode = (Map) nodes.get("successful_node_1"); + assertNotNull(firstNode); + Map firstNodeFlightServer = (Map) firstNode.get("flight_server"); + assertNotNull(firstNodeFlightServer); + Map firstNodePublishAddress = (Map) firstNodeFlightServer.get("publish_address"); + assertEquals("localhost", firstNodePublishAddress.get("host")); + assertEquals(47470, firstNodePublishAddress.get("port")); + + Map secondNode = (Map) nodes.get("successful_node_2"); + assertNotNull(secondNode); + Map secondNodeFlightServer = (Map) secondNode.get("flight_server"); + assertNotNull(secondNodeFlightServer); + Map secondNodePublishAddress = (Map) secondNodeFlightServer.get("publish_address"); + assertEquals("localhost", secondNodePublishAddress.get("host")); + assertEquals(47471, secondNodePublishAddress.get("port")); + + List> failuresList = (List>) responseMap.get("failures"); + assertNotNull("failures array should exist", failuresList); + assertEquals(2, failuresList.size()); + + Map firstFailure = failuresList.get(0); + assertEquals("failed_node_1", firstFailure.get("node_id")); + assertEquals("Connection refused", firstFailure.get("reason")); + + Map secondFailure = failuresList.get(1); + assertEquals("failed_node_2", secondFailure.get("node_id")); + assertEquals("Node not found", secondFailure.get("reason")); + } + } + + private static NodesFlightInfoResponse getNodesFlightInfoResponse() { + DiscoveryNode node1 = new DiscoveryNode( + "successful_node_1", + "successful_node_1", + new TransportAddress(InetAddress.getLoopbackAddress(), 9300), + Collections.emptyMap(), + Collections.emptySet(), + Version.CURRENT + ); + + List successfulNodes = getNodeFlightInfos(node1); + + return getNodesFlightInfoResponse(successfulNodes); + } + + private static NodesFlightInfoResponse getNodesFlightInfoResponse(List successfulNodes) { + List failures = Arrays.asList( + new FailedNodeException("failed_node_1", "Connection refused", new ConnectException("Connection refused")), + new FailedNodeException("failed_node_2", "Node not found", new Exception("Node not found")) + ); + + return new NodesFlightInfoResponse(new ClusterName("test-cluster"), successfulNodes, failures); + } + + private static List getNodeFlightInfos(DiscoveryNode node1) { + DiscoveryNode node2 = new DiscoveryNode( + "successful_node_2", + "successful_node_2", + new TransportAddress(InetAddress.getLoopbackAddress(), 9301), + Collections.emptyMap(), + Collections.emptySet(), + Version.CURRENT + ); + + TransportAddress address1 = new TransportAddress(InetAddress.getLoopbackAddress(), 47470); + return getNodeFlightInfos(node1, address1, node2); + } + + private static List getNodeFlightInfos(DiscoveryNode node1, TransportAddress address1, DiscoveryNode node2) { + BoundTransportAddress boundAddress1 = new BoundTransportAddress(new TransportAddress[] { address1 }, address1); + + TransportAddress address2 = new TransportAddress(InetAddress.getLoopbackAddress(), 47471); + BoundTransportAddress boundAddress2 = new BoundTransportAddress(new TransportAddress[] { address2 }, address2); + + return Arrays.asList(new NodeFlightInfo(node1, boundAddress1), new NodeFlightInfo(node2, boundAddress2)); + } + + public void testToXContentWithNoFailures() throws Exception { + NodesFlightInfoResponse response = getFlightInfoResponse(); + + XContentBuilder builder = JsonXContent.contentBuilder(); + response.toXContent(builder, ToXContent.EMPTY_PARAMS); + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder))) { + Map responseMap = parser.map(); + + Map nodesStats = (Map) responseMap.get("_nodes"); + assertNotNull(nodesStats); + assertEquals(1, nodesStats.get("total")); + assertEquals(1, nodesStats.get("successful")); + assertEquals(0, nodesStats.get("failed")); + + assertEquals("test-cluster", responseMap.get("cluster_name")); + + Map nodes = (Map) responseMap.get("nodes"); + assertNotNull(nodes); + assertEquals(1, nodes.size()); + + assertNull("failures array should not exist", responseMap.get("failures")); + } + } + + private static NodesFlightInfoResponse getFlightInfoResponse() { + DiscoveryNode node = new DiscoveryNode( + "successful_node", + "successful_node", + new TransportAddress(InetAddress.getLoopbackAddress(), 9300), + Collections.emptyMap(), + Collections.emptySet(), + Version.CURRENT + ); + + TransportAddress address = new TransportAddress(InetAddress.getLoopbackAddress(), 47470); + BoundTransportAddress boundAddress = new BoundTransportAddress(new TransportAddress[] { address }, address); + + return new NodesFlightInfoResponse( + new ClusterName("test-cluster"), + Collections.singletonList(new NodeFlightInfo(node, boundAddress)), + Collections.emptyList() + ); + } + + private DiscoveryNode createTestNode(String nodeId) { + return new DiscoveryNode( + nodeId, + nodeId, + "host" + nodeId, + "localhost", + "127.0.0.1", + new TransportAddress(InetAddress.getLoopbackAddress(), 9300), + new HashMap<>(), + new HashSet<>(), + Version.CURRENT + ); + } + + private NodeFlightInfo createNodeFlightInfo(DiscoveryNode node, int port) { + TransportAddress address = new TransportAddress(InetAddress.getLoopbackAddress(), port); + BoundTransportAddress boundAddress = new BoundTransportAddress(new TransportAddress[] { address }, address); + return new NodeFlightInfo(node, boundAddress); + } +} diff --git a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/TransportNodesFlightInfoActionTests.java b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/TransportNodesFlightInfoActionTests.java new file mode 100644 index 0000000000000..d9d8af5920d61 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/TransportNodesFlightInfoActionTests.java @@ -0,0 +1,176 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.api; + +import org.opensearch.Version; +import org.opensearch.action.FailedNodeException; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.arrow.flight.bootstrap.FlightService; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; +import org.junit.Before; + +import java.io.IOException; +import java.net.InetAddress; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class TransportNodesFlightInfoActionTests extends OpenSearchTestCase { + + private DiscoveryNode localNode; + private TransportNodesFlightInfoAction action; + private BoundTransportAddress boundAddress; + + @Before + public void setUp() throws Exception { + super.setUp(); + + localNode = new DiscoveryNode( + "local_node", + "local_node", + "host", + "localhost", + "127.0.0.1", + new TransportAddress(InetAddress.getLoopbackAddress(), 9300), + new HashMap<>(), + new HashSet<>(), + Version.CURRENT + ); + + ClusterService clusterService = mock(ClusterService.class); + when(clusterService.getClusterName()).thenReturn(new ClusterName("test-cluster")); + when(clusterService.localNode()).thenReturn(localNode); + + TransportAddress address = new TransportAddress(InetAddress.getLoopbackAddress(), 47470); + boundAddress = new BoundTransportAddress(new TransportAddress[] { address }, address); + + FlightService flightService = mock(FlightService.class); + when(flightService.getBoundAddress()).thenReturn(boundAddress); + + action = new TransportNodesFlightInfoAction( + Settings.EMPTY, + mock(ThreadPool.class), + clusterService, + mock(TransportService.class), + new ActionFilters(Collections.emptySet()), + flightService + ); + } + + public void testNewResponse() { + NodesFlightInfoRequest request = new NodesFlightInfoRequest(); + List nodeFlightInfos = Collections.singletonList(new NodeFlightInfo(localNode, boundAddress)); + List failures = Collections.emptyList(); + + NodesFlightInfoResponse response = action.newResponse(request, nodeFlightInfos, failures); + + assertNotNull(response); + assertEquals("test-cluster", response.getClusterName().value()); + assertEquals(1, response.getNodes().size()); + assertEquals(0, response.failures().size()); + + NodeFlightInfo nodeInfo = response.getNodes().get(0); + assertEquals(localNode, nodeInfo.getNode()); + assertEquals(boundAddress, nodeInfo.getBoundAddress()); + } + + public void testNewResponseWithFailures() { + NodesFlightInfoRequest request = new NodesFlightInfoRequest(); + List nodeFlightInfos = Collections.emptyList(); + List failures = Collections.singletonList(new FailedNodeException("failed_node", "test failure", null)); + + NodesFlightInfoResponse response = action.newResponse(request, nodeFlightInfos, failures); + + assertNotNull(response); + assertEquals("test-cluster", response.getClusterName().value()); + assertEquals(0, response.getNodes().size()); + assertEquals(1, response.failures().size()); + assertEquals("failed_node", response.failures().get(0).nodeId()); + assertEquals("test failure", response.failures().get(0).getMessage()); + } + + public void testNewNodeRequest() { + NodesFlightInfoRequest request = new NodesFlightInfoRequest("node1", "node2"); + NodesFlightInfoRequest.NodeFlightInfoRequest nodeRequest = action.newNodeRequest(request); + + assertNotNull(nodeRequest); + assertArrayEquals(new String[] { "node1", "node2" }, nodeRequest.request.nodesIds()); + } + + public void testNewNodeResponse() throws IOException { + NodeFlightInfo nodeInfo = new NodeFlightInfo(localNode, boundAddress); + BytesStreamOutput out = new BytesStreamOutput(); + nodeInfo.writeTo(out); + StreamInput in = out.bytes().streamInput(); + + NodeFlightInfo deserializedInfo = action.newNodeResponse(in); + + assertNotNull(deserializedInfo); + assertEquals(nodeInfo.getNode(), deserializedInfo.getNode()); + assertEquals(nodeInfo.getBoundAddress().publishAddress(), deserializedInfo.getBoundAddress().publishAddress()); + } + + public void testNodeOperation() { + NodesFlightInfoRequest.NodeFlightInfoRequest nodeRequest = new NodesFlightInfoRequest.NodeFlightInfoRequest( + new NodesFlightInfoRequest() + ); + + NodeFlightInfo response = action.nodeOperation(nodeRequest); + + assertNotNull(response); + assertEquals(localNode, response.getNode()); + assertEquals(boundAddress.publishAddress(), response.getBoundAddress().publishAddress()); + } + + public void testNodeOperationWithSpecificNodes() throws IOException { + NodesFlightInfoRequest request = new NodesFlightInfoRequest("local_node"); + NodesFlightInfoRequest.NodeFlightInfoRequest nodeRequest = new NodesFlightInfoRequest.NodeFlightInfoRequest(request); + + NodeFlightInfo response = action.nodeOperation(nodeRequest); + + assertNotNull(response); + assertEquals(localNode, response.getNode()); + assertEquals(boundAddress, response.getBoundAddress()); + } + + public void testNodeOperationWithInvalidNode() throws IOException { + NodesFlightInfoRequest request = new NodesFlightInfoRequest("invalid_node"); + NodesFlightInfoRequest.NodeFlightInfoRequest nodeRequest = new NodesFlightInfoRequest.NodeFlightInfoRequest(request); + + NodeFlightInfo response = action.nodeOperation(nodeRequest); + + assertNotNull(response); + assertEquals(localNode, response.getNode()); + assertEquals(boundAddress, response.getBoundAddress()); + } + + public void testSerialization() throws IOException { + NodesFlightInfoRequest request = new NodesFlightInfoRequest("node1", "node2"); + BytesStreamOutput out = new BytesStreamOutput(); + request.writeTo(out); + StreamInput in = out.bytes().streamInput(); + NodesFlightInfoRequest deserializedRequest = new NodesFlightInfoRequest(in); + + assertArrayEquals(request.nodesIds(), deserializedRequest.nodesIds()); + } +} diff --git a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/bootstrap/FlightClientManagerTests.java b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/bootstrap/FlightClientManagerTests.java new file mode 100644 index 0000000000000..acc32d6b32f4c --- /dev/null +++ b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/bootstrap/FlightClientManagerTests.java @@ -0,0 +1,384 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.arrow.flight.bootstrap; + +import org.apache.arrow.flight.FlightClient; +import org.apache.arrow.memory.BufferAllocator; +import org.apache.arrow.memory.RootAllocator; +import org.opensearch.Version; +import org.opensearch.arrow.flight.api.NodeFlightInfo; +import org.opensearch.arrow.flight.api.NodesFlightInfoAction; +import org.opensearch.arrow.flight.api.NodesFlightInfoRequest; +import org.opensearch.arrow.flight.api.NodesFlightInfoResponse; +import org.opensearch.arrow.flight.bootstrap.tls.SslContextProvider; +import org.opensearch.cluster.ClusterChangedEvent; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.test.FeatureFlagSetter; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.client.Client; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +import io.netty.channel.EventLoopGroup; +import io.netty.util.NettyRuntime; + +import static org.opensearch.arrow.flight.bootstrap.FlightClientManager.LOCATION_TIMEOUT_MS; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.when; + +@SuppressWarnings("unchecked") +public class FlightClientManagerTests extends OpenSearchTestCase { + + private static BufferAllocator allocator; + private static EventLoopGroup elg; + private static ExecutorService executorService; + private static final AtomicInteger port = new AtomicInteger(0); + + private ClusterService clusterService; + private Client client; + private ClusterState state; + private FlightClientManager clientManager; + private ScheduledExecutorService locationUpdaterExecutor; + + @BeforeClass + public static void setupClass() throws Exception { + ServerConfig.init(Settings.EMPTY); + allocator = new RootAllocator(); + elg = ServerConfig.createELG("test-grpc-worker-elg", NettyRuntime.availableProcessors() * 2); + executorService = ServerConfig.createELG("test-grpc-worker", NettyRuntime.availableProcessors() * 2); + } + + @Override + public void setUp() throws Exception { + super.setUp(); + locationUpdaterExecutor = Executors.newScheduledThreadPool(1); + + FeatureFlagSetter.set(FeatureFlags.ARROW_STREAMS_SETTING.getKey()); + clusterService = mock(ClusterService.class); + client = mock(Client.class); + state = getDefaultState(); + when(clusterService.state()).thenReturn(state); + + mockFlightInfoResponse(state.nodes(), 0); + + SslContextProvider sslContextProvider = null; + + ThreadPool threadPool = mock(ThreadPool.class); + when(threadPool.executor(ServerConfig.FLIGHT_CLIENT_THREAD_POOL_NAME)).thenReturn(executorService); + clientManager = new FlightClientManager(allocator, clusterService, sslContextProvider, elg, threadPool, client); + ClusterChangedEvent event = new ClusterChangedEvent("test", state, ClusterState.EMPTY_STATE); + clientManager.clusterChanged(event); + assertBusy(() -> { + assertEquals("Flight client isn't built in time limit", 2, clientManager.getFlightClients().size()); + assertNotNull("local_node should exist", clientManager.getFlightClient("local_node").get()); + assertNotNull("remote_node should exist", clientManager.getFlightClient("remote_node").get()); + }, 2, TimeUnit.SECONDS); + } + + private void mockFlightInfoResponse(DiscoveryNodes nodes, int sleepDuration) { + doAnswer(invocation -> { + locationUpdaterExecutor.schedule(() -> { + try { + NodesFlightInfoRequest request = invocation.getArgument(1); + ActionListener listener = invocation.getArgument(2); + + List nodeInfos = new ArrayList<>(); + for (DiscoveryNode node : nodes) { + if (request.nodesIds().length == 0 || Arrays.asList(request.nodesIds()).contains(node.getId())) { + int flightPort = getBaseStreamPort() + port.addAndGet(2); + TransportAddress address = new TransportAddress( + InetAddress.getByName(node.getAddress().getAddress()), + flightPort + ); + BoundTransportAddress boundAddress = new BoundTransportAddress(new TransportAddress[] { address }, address); + NodeFlightInfo nodeInfo = new NodeFlightInfo(node, boundAddress); + nodeInfos.add(nodeInfo); + } + } + NodesFlightInfoResponse response = new NodesFlightInfoResponse(ClusterName.DEFAULT, nodeInfos, Collections.emptyList()); + listener.onResponse(response); + } catch (UnknownHostException e) { + throw new RuntimeException(e); + } + }, sleepDuration, TimeUnit.MILLISECONDS); + return null; + }).when(client).execute(eq(NodesFlightInfoAction.INSTANCE), any(NodesFlightInfoRequest.class), any(ActionListener.class)); + + } + + @Override + public void tearDown() throws Exception { + locationUpdaterExecutor.shutdown(); + super.tearDown(); + clientManager.close(); + } + + private ClusterState getDefaultState() throws Exception { + int testPort = getBasePort() + port.addAndGet(2); + + DiscoveryNode localNode = createNode("local_node", "127.0.0.1", testPort); + DiscoveryNode remoteNode = createNode("remote_node", "127.0.0.2", testPort + 1); + + // Setup initial cluster state + DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(); + nodesBuilder.add(remoteNode); + nodesBuilder.add(localNode); + nodesBuilder.localNodeId(localNode.getId()); + DiscoveryNodes nodes = nodesBuilder.build(); + + return ClusterState.builder(new ClusterName("test")).nodes(nodes).build(); + } + + private DiscoveryNode createNode(String nodeId, String host, int port) throws Exception { + TransportAddress address = new TransportAddress(InetAddress.getByName(host), port); + Map attributes = new HashMap<>(); + attributes.put("arrow.streams.enabled", "true"); + Set roles = Collections.singleton(DiscoveryNodeRole.DATA_ROLE); + return new DiscoveryNode(nodeId, address, attributes, roles, Version.CURRENT); + } + + @AfterClass + public static void tearClass() { + allocator.close(); + } + + public void testGetFlightClientForExistingNode() { + validateNodes(); + } + + public void testGetFlightClientForNonExistentNode() throws Exception { + assertFalse(clientManager.getFlightClient("non_existent_node").isPresent()); + } + + public void testClusterChangedWithNodesChanged() throws Exception { + DiscoveryNode newNode = createNode("new_node", "127.0.0.3", getBasePort() + port.addAndGet(1)); + DiscoveryNodes.Builder newNodesBuilder = DiscoveryNodes.builder(); + + for (DiscoveryNode node : state.nodes()) { + newNodesBuilder.add(node); + } + newNodesBuilder.localNodeId("local_node"); + // Update cluster state with new node + newNodesBuilder.add(newNode); + DiscoveryNodes newNodes = newNodesBuilder.build(); + + ClusterState newState = ClusterState.builder(new ClusterName("test")).nodes(newNodes).build(); + mockFlightInfoResponse(newNodes, 0); + when(clusterService.state()).thenReturn(newState); + clientManager.clusterChanged(new ClusterChangedEvent("test", newState, state)); + + for (DiscoveryNode node : newState.nodes()) { + assertBusy( + () -> { assertTrue("Flight client isn't built in time limit", clientManager.getFlightClient(node.getId()).isPresent()); }, + 2, + TimeUnit.SECONDS + ); + } + } + + public void testClusterChangedWithNoNodesChanged() throws Exception { + ClusterChangedEvent event = new ClusterChangedEvent("test", state, state); + clientManager.clusterChanged(event); + + // Verify original client still exists + for (DiscoveryNode node : state.nodes()) { + assertNotNull(clientManager.getFlightClient(node.getId()).get()); + } + } + + public void testGetLocalNodeId() throws Exception { + assertEquals("Local node ID should match", "local_node", clientManager.getLocalNodeId()); + } + + public void testCloseWithActiveClients() throws Exception { + for (DiscoveryNode node : state.nodes()) { + FlightClient client = clientManager.getFlightClient(node.getId()).get(); + assertNotNull(client); + } + + clientManager.close(); + assertEquals(0, clientManager.getFlightClients().size()); + } + + public void testIncompatibleNodeVersion() throws Exception { + Map attributes = new HashMap<>(); + attributes.put("arrow.streams.enabled", "true"); + DiscoveryNode oldVersionNode = new DiscoveryNode( + "old_version_node", + new TransportAddress(InetAddress.getByName("127.0.0.3"), getBasePort() + port.addAndGet(1)), + attributes, + Collections.singleton(DiscoveryNodeRole.DATA_ROLE), + Version.fromString("2.18.0") // Version before Arrow Flight introduction + ); + + // Update cluster state with old version node + DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(); + nodesBuilder.add(oldVersionNode); + nodesBuilder.localNodeId("local_node"); + DiscoveryNodes nodes = nodesBuilder.build(); + ClusterState oldVersionState = ClusterState.builder(new ClusterName("test")).nodes(nodes).build(); + + when(clusterService.state()).thenReturn(oldVersionState); + mockFlightInfoResponse(nodes, 0); + + assertFalse(clientManager.getFlightClient(oldVersionNode.getId()).isPresent()); + } + + public void testGetFlightClientLocationTimeout() throws Exception { + reset(client); + + String nodeId = "test_node"; + DiscoveryNode testNode = createNode(nodeId, "127.0.0.1", getBasePort() + port.addAndGet(2)); + + // Update cluster state with the test node + DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(); + nodesBuilder.add(testNode); + nodesBuilder.localNodeId(nodeId); + ClusterState newState = ClusterState.builder(new ClusterName("test")).nodes(nodesBuilder.build()).build(); + when(clusterService.state()).thenReturn(newState); + // Mock a delayed response that will cause timeout + mockFlightInfoResponse(newState.nodes(), LOCATION_TIMEOUT_MS + 100); + + ClusterChangedEvent event = new ClusterChangedEvent("test", newState, ClusterState.EMPTY_STATE); + clientManager.clusterChanged(event); + assertFalse(clientManager.getFlightClient(nodeId).isPresent()); + } + + public void testGetFlightClientLocationExecutionError() throws Exception { + reset(client); + + String nodeId = "test_node"; + DiscoveryNode testNode = createNode(nodeId, "127.0.0.1", getBasePort() + port.addAndGet(2)); + + // Update cluster state with the test node + DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(); + nodesBuilder.add(testNode); + nodesBuilder.localNodeId(nodeId); + ClusterState newState = ClusterState.builder(new ClusterName("test")).nodes(nodesBuilder.build()).build(); + + when(clusterService.state()).thenReturn(newState); + + // Mock failure + doAnswer(invocation -> { + ActionListener listener = invocation.getArgument(2); + listener.onFailure(new RuntimeException("Test execution error")); + return null; + }).when(client).execute(eq(NodesFlightInfoAction.INSTANCE), any(NodesFlightInfoRequest.class), any(ActionListener.class)); + + ClusterChangedEvent event = new ClusterChangedEvent("test", newState, ClusterState.EMPTY_STATE); + clientManager.clusterChanged(event); + + assertFalse(clientManager.getFlightClient(nodeId).isPresent()); + } + + public void testFailedClusterUpdateButSuccessfulDirectRequest() throws Exception { + reset(client); + + String nodeId = "test_node"; + DiscoveryNode testNode = createNode(nodeId, "127.0.0.1", getBasePort() + port.addAndGet(2)); + + // Update cluster state with the test node + DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(); + nodesBuilder.add(testNode); + nodesBuilder.localNodeId(nodeId); + ClusterState newState = ClusterState.builder(new ClusterName("test")).nodes(nodesBuilder.build()).build(); + + when(clusterService.state()).thenReturn(newState); + + // First mock call fails during cluster update + AtomicBoolean firstCall = new AtomicBoolean(true); + doAnswer(invocation -> { + locationUpdaterExecutor.schedule(() -> { + ActionListener listener = invocation.getArgument(2); + if (firstCall.getAndSet(false)) { + // Fail on first call (during cluster update) + listener.onFailure(new RuntimeException("Failed during cluster update")); + } else { + // Succeed on second call (direct request) + try { + NodesFlightInfoRequest request = invocation.getArgument(1); + List nodeInfos = new ArrayList<>(); + for (DiscoveryNode node : newState.nodes()) { + if (request.nodesIds().length == 0 || Arrays.asList(request.nodesIds()).contains(node.getId())) { + int flightPort = getBaseStreamPort() + port.addAndGet(2); + TransportAddress address = new TransportAddress( + InetAddress.getByName(node.getAddress().getAddress()), + flightPort + ); + BoundTransportAddress boundAddress = new BoundTransportAddress(new TransportAddress[] { address }, address); + NodeFlightInfo nodeInfo = new NodeFlightInfo(node, boundAddress); + nodeInfos.add(nodeInfo); + } + } + NodesFlightInfoResponse response = new NodesFlightInfoResponse( + ClusterName.DEFAULT, + nodeInfos, + Collections.emptyList() + ); + listener.onResponse(response); + } catch (UnknownHostException e) { + throw new RuntimeException(e); + } + } + }, 0, TimeUnit.MICROSECONDS); + return null; + }).when(client).execute(eq(NodesFlightInfoAction.INSTANCE), any(NodesFlightInfoRequest.class), any(ActionListener.class)); + + ClusterChangedEvent event = new ClusterChangedEvent("test", newState, ClusterState.EMPTY_STATE); + clientManager.clusterChanged(event); + + // Verify that the client can still be created successfully on direct request + clientManager.buildClientAsync(nodeId); + assertBusy(() -> { + assertTrue("Flight client should be created successfully on direct request", clientManager.getFlightClient(nodeId).isPresent()); + }, 2, TimeUnit.SECONDS); + assertFalse("first call should be invoked", firstCall.get()); + } + + private void validateNodes() { + for (DiscoveryNode node : state.nodes()) { + FlightClient client = clientManager.getFlightClient(node.getId()).get(); + assertNotNull("Flight client should be created for existing node", client); + } + } + + protected static int getBaseStreamPort() { + return getBasePort(9401); + } +} diff --git a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/bootstrap/FlightServiceTests.java b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/bootstrap/FlightServiceTests.java new file mode 100644 index 0000000000000..fa20535384557 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/bootstrap/FlightServiceTests.java @@ -0,0 +1,160 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.arrow.flight.bootstrap; + +import org.opensearch.Version; +import org.opensearch.arrow.flight.bootstrap.tls.SslContextProvider; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.test.FeatureFlagSetter; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.client.Client; + +import java.net.InetAddress; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class FlightServiceTests extends OpenSearchTestCase { + + private Settings settings; + private ClusterService clusterService; + private NetworkService networkService; + private ThreadPool threadPool; + private final AtomicInteger port = new AtomicInteger(0); + private DiscoveryNode localNode; + + @Override + public void setUp() throws Exception { + super.setUp(); + FeatureFlagSetter.set(FeatureFlags.ARROW_STREAMS_SETTING.getKey()); + int availablePort = getBasePort(9500) + port.addAndGet(1); + settings = Settings.EMPTY; + localNode = createNode(availablePort); + + // Setup initial cluster state + DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(); + nodesBuilder.localNodeId(localNode.getId()); + nodesBuilder.add(localNode); + DiscoveryNodes nodes = nodesBuilder.build(); + ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(nodes).build(); + clusterService = mock(ClusterService.class); + when(clusterService.state()).thenReturn(clusterState); + + threadPool = mock(ThreadPool.class); + when(threadPool.executor(ServerConfig.FLIGHT_SERVER_THREAD_POOL_NAME)).thenReturn(mock(ExecutorService.class)); + when(threadPool.executor(ServerConfig.FLIGHT_CLIENT_THREAD_POOL_NAME)).thenReturn(mock(ExecutorService.class)); + networkService = new NetworkService(Collections.emptyList()); + } + + public void testInitializeWithSslDisabled() throws Exception { + + Settings noSslSettings = Settings.builder().put("arrow.ssl.enable", false).build(); + + try (FlightService noSslService = new FlightService(noSslSettings)) { + noSslService.setClusterService(clusterService); + noSslService.setThreadPool(threadPool); + noSslService.setClient(mock(Client.class)); + noSslService.setNetworkService(networkService); + noSslService.start(); + SslContextProvider sslContextProvider = noSslService.getSslContextProvider(); + assertNull("SSL context provider should be null", sslContextProvider); + assertNotNull(noSslService.getFlightClientManager()); + assertNotNull(noSslService.getBoundAddress()); + } + } + + public void testStartAndStop() throws Exception { + try (FlightService testService = new FlightService(Settings.EMPTY)) { + testService.setClusterService(clusterService); + testService.setThreadPool(threadPool); + testService.setClient(mock(Client.class)); + testService.setNetworkService(networkService); + testService.start(); + testService.stop(); + testService.start(); + assertNull(testService.getStreamManager()); + } + } + + public void testInitializeWithoutSecureTransportSettingsProvider() { + Settings sslSettings = Settings.builder().put(settings).put("arrow.ssl.enable", true).build(); + + try (FlightService sslService = new FlightService(sslSettings)) { + // Should throw exception when initializing without provider + expectThrows(RuntimeException.class, () -> { + sslService.setClusterService(clusterService); + sslService.setThreadPool(threadPool); + sslService.setClient(mock(Client.class)); + sslService.setNetworkService(networkService); + sslService.start(); + }); + } + } + + public void testServerStartupFailure() { + Settings invalidSettings = Settings.builder() + .put(ServerComponents.SETTING_FLIGHT_PUBLISH_PORT.getKey(), "-100") // Invalid port + .build(); + try (FlightService invalidService = new FlightService(invalidSettings)) { + invalidService.setClusterService(clusterService); + invalidService.setThreadPool(threadPool); + invalidService.setClient(mock(Client.class)); + invalidService.setNetworkService(networkService); + expectThrows(RuntimeException.class, () -> { invalidService.doStart(); }); + } + } + + public void testLifecycleStateTransitions() throws Exception { + // Find new port for this test + try (FlightService testService = new FlightService(Settings.EMPTY)) { + testService.setClusterService(clusterService); + testService.setThreadPool(threadPool); + testService.setClient(mock(Client.class)); + testService.setNetworkService(networkService); + // Test all state transitions + testService.start(); + assertEquals("STARTED", testService.lifecycleState().toString()); + + testService.stop(); + assertEquals("STOPPED", testService.lifecycleState().toString()); + + testService.close(); + assertEquals("CLOSED", testService.lifecycleState().toString()); + } + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + } + + private DiscoveryNode createNode(int port) throws Exception { + TransportAddress address = new TransportAddress(InetAddress.getByName("127.0.0.1"), port); + Map attributes = new HashMap<>(); + attributes.put("arrow.streams.enabled", "true"); + + Set roles = Collections.singleton(DiscoveryNodeRole.DATA_ROLE); + return new DiscoveryNode("local_node", address, attributes, roles, Version.CURRENT); + } +} diff --git a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/bootstrap/ServerConfigTests.java b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/bootstrap/ServerConfigTests.java new file mode 100644 index 0000000000000..9419e26318046 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/bootstrap/ServerConfigTests.java @@ -0,0 +1,80 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.arrow.flight.bootstrap; + +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ScalingExecutorBuilder; + +import static org.opensearch.arrow.flight.bootstrap.ServerComponents.SETTING_FLIGHT_PUBLISH_PORT; + +public class ServerConfigTests extends OpenSearchTestCase { + + private Settings settings; + + @Override + public void setUp() throws Exception { + super.setUp(); + settings = Settings.builder() + .put("arrow.allocation.manager.type", "Netty") + .put("arrow.enable_null_check_for_get", false) + .put("arrow.enable_unsafe_memory_access", true) + .put("arrow.memory.debug.allocator", false) + .put("arrow.ssl.enable", true) + .put("thread_pool.flight-server.min", 1) + .put("thread_pool.flight-server.max", 4) + .put("thread_pool.flight-server.keep_alive", TimeValue.timeValueMinutes(5)) + .build(); + } + + public void testInit() { + ServerConfig.init(settings); + + // Verify system properties are set correctly + assertEquals("Netty", System.getProperty("arrow.allocation.manager.type")); + assertEquals("false", System.getProperty("arrow.enable_null_check_for_get")); + assertEquals("true", System.getProperty("arrow.enable_unsafe_memory_access")); + assertEquals("false", System.getProperty("arrow.memory.debug.allocator")); + + // Verify SSL settings + assertTrue(ServerConfig.isSslEnabled()); + + ScalingExecutorBuilder executorBuilder = ServerConfig.getServerExecutorBuilder(); + assertNotNull(executorBuilder); + assertEquals(3, executorBuilder.getRegisteredSettings().size()); + assertEquals(1, executorBuilder.getRegisteredSettings().get(0).get(settings)); // min + assertEquals(4, executorBuilder.getRegisteredSettings().get(1).get(settings)); // max + assertEquals(TimeValue.timeValueMinutes(5), executorBuilder.getRegisteredSettings().get(2).get(settings)); // keep alive + } + + public void testGetSettings() { + var settings = ServerConfig.getSettings(); + assertNotNull(settings); + assertFalse(settings.isEmpty()); + + assertTrue(settings.contains(ServerConfig.ARROW_ALLOCATION_MANAGER_TYPE)); + assertTrue(settings.contains(ServerConfig.ARROW_ENABLE_NULL_CHECK_FOR_GET)); + assertTrue(settings.contains(ServerConfig.ARROW_ENABLE_UNSAFE_MEMORY_ACCESS)); + assertTrue(settings.contains(ServerConfig.ARROW_ENABLE_DEBUG_ALLOCATOR)); + assertTrue(settings.contains(ServerConfig.ARROW_SSL_ENABLE)); + } + + public void testDefaultSettings() { + Settings defaultSettings = Settings.EMPTY; + ServerConfig.init(defaultSettings); + + // Verify default values + assertEquals(-1, SETTING_FLIGHT_PUBLISH_PORT.get(defaultSettings).intValue()); + assertEquals("Netty", ServerConfig.ARROW_ALLOCATION_MANAGER_TYPE.get(defaultSettings)); + assertFalse(ServerConfig.ARROW_ENABLE_NULL_CHECK_FOR_GET.get(defaultSettings)); + assertTrue(ServerConfig.ARROW_ENABLE_UNSAFE_MEMORY_ACCESS.get(defaultSettings)); + assertFalse(ServerConfig.ARROW_ENABLE_DEBUG_ALLOCATOR.get(defaultSettings)); + assertFalse(ServerConfig.ARROW_SSL_ENABLE.get(defaultSettings)); + } +} diff --git a/server/build.gradle b/server/build.gradle index e1512fb4b2c58..cb64d6becb315 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -69,6 +69,7 @@ dependencies { api project(":libs:opensearch-geo") api project(":libs:opensearch-telemetry") api project(":libs:opensearch-task-commons") + implementation project(':libs:opensearch-arrow-spi') compileOnly project(':libs:opensearch-plugin-classloader') testRuntimeOnly project(':libs:opensearch-plugin-classloader') diff --git a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java index 59d999798868e..6753bb8eac083 100644 --- a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java @@ -39,6 +39,7 @@ protected FeatureFlagSettings( FeatureFlags.STAR_TREE_INDEX_SETTING, FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES_SETTING, FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL_SETTING, - FeatureFlags.TERM_VERSION_PRECOMMIT_ENABLE_SETTING + FeatureFlags.TERM_VERSION_PRECOMMIT_ENABLE_SETTING, + FeatureFlags.ARROW_STREAMS_SETTING ); } diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java index 6df68013a8119..4be45aed70023 100644 --- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -128,6 +128,9 @@ public class FeatureFlags { Property.NodeScope ); + public static final String ARROW_STREAMS = "opensearch.experimental.feature.arrow.streams.enabled"; + public static final Setting ARROW_STREAMS_SETTING = Setting.boolSetting(ARROW_STREAMS, false, Property.NodeScope); + private static final List> ALL_FEATURE_FLAG_SETTINGS = List.of( REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING, EXTENSIONS_SETTING, @@ -138,7 +141,8 @@ public class FeatureFlags { STAR_TREE_INDEX_SETTING, APPLICATION_BASED_CONFIGURATION_TEMPLATES_SETTING, READER_WRITER_SPLIT_EXPERIMENTAL_SETTING, - TERM_VERSION_PRECOMMIT_ENABLE_SETTING + TERM_VERSION_PRECOMMIT_ENABLE_SETTING, + ARROW_STREAMS_SETTING ); /** diff --git a/server/src/main/java/org/opensearch/plugins/DefaultSecureTransportParameters.java b/server/src/main/java/org/opensearch/plugins/DefaultSecureTransportParameters.java index e3771f224a7db..3265c582dba76 100644 --- a/server/src/main/java/org/opensearch/plugins/DefaultSecureTransportParameters.java +++ b/server/src/main/java/org/opensearch/plugins/DefaultSecureTransportParameters.java @@ -11,6 +11,13 @@ import org.opensearch.common.network.NetworkModule; import org.opensearch.common.settings.Settings; +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.TrustManagerFactory; + +import java.util.Collection; +import java.util.List; +import java.util.Optional; + /** * Default implementation of {@link SecureTransportSettingsProvider.SecureTransportParameters}. */ @@ -25,4 +32,34 @@ class DefaultSecureTransportParameters implements SecureTransportSettingsProvide public boolean dualModeEnabled() { return NetworkModule.TRANSPORT_SSL_DUAL_MODE_ENABLED.get(settings); } + + @Override + public Optional keyManagerFactory() { + return Optional.empty(); + } + + @Override + public Optional sslProvider() { + return Optional.empty(); + } + + @Override + public Optional clientAuth() { + return Optional.empty(); + } + + @Override + public Collection protocols() { + return List.of(); + } + + @Override + public Collection cipherSuites() { + return List.of(); + } + + @Override + public Optional trustManagerFactory() { + return Optional.empty(); + } } diff --git a/server/src/main/java/org/opensearch/plugins/SecureTransportSettingsProvider.java b/server/src/main/java/org/opensearch/plugins/SecureTransportSettingsProvider.java index 5f9e1a952b6e8..f4cf64c16cbd2 100644 --- a/server/src/main/java/org/opensearch/plugins/SecureTransportSettingsProvider.java +++ b/server/src/main/java/org/opensearch/plugins/SecureTransportSettingsProvider.java @@ -13,8 +13,10 @@ import org.opensearch.transport.Transport; import org.opensearch.transport.TransportAdapterProvider; +import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.SSLEngine; import javax.net.ssl.SSLException; +import javax.net.ssl.TrustManagerFactory; import java.util.Collection; import java.util.Collections; @@ -52,6 +54,18 @@ default Optional parameters(Settings settings) { @ExperimentalApi interface SecureTransportParameters { boolean dualModeEnabled(); + + Optional keyManagerFactory(); + + Optional sslProvider(); + + Optional clientAuth(); + + Collection protocols(); + + Collection cipherSuites(); + + Optional trustManagerFactory(); } /** diff --git a/server/src/main/java/org/opensearch/plugins/StreamManagerPlugin.java b/server/src/main/java/org/opensearch/plugins/StreamManagerPlugin.java new file mode 100644 index 0000000000000..60bdb789b3750 --- /dev/null +++ b/server/src/main/java/org/opensearch/plugins/StreamManagerPlugin.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugins; + +import org.opensearch.arrow.spi.StreamManager; + +import java.util.function.Supplier; + +/** + * An interface for OpenSearch plugins to implement to provide a StreamManager. + * Plugins can implement this interface to provide custom StreamManager implementation. + * @see StreamManager + */ +public interface StreamManagerPlugin { + /** + * Returns the StreamManager instance for this plugin. + * + * @return The StreamManager instance + */ + Supplier getStreamManager(); +} diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java index 052b1a4e52eb9..0bd5d8afda91e 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java @@ -1768,7 +1768,7 @@ public static String getPortRange() { return getBasePort() + "-" + (getBasePort() + 99); // upper bound is inclusive } - protected static int getBasePort() { + protected static int getBasePort(int start) { // some tests use MockTransportService to do network based testing. Yet, we run tests in multiple JVMs that means // concurrent tests could claim port that another JVM just released and if that test tries to simulate a disconnect it might // be smart enough to re-connect depending on what is tested. To reduce the risk, since this is very hard to debug we use @@ -1792,7 +1792,11 @@ protected static int getBasePort() { startAt = (int) Math.floorMod(workerId - 1, 223L) + 1; } assert startAt >= 0 : "Unexpected test worker Id, resulting port range would be negative"; - return 10300 + (startAt * 100); + return start + (startAt * 100); + } + + protected static int getBasePort() { + return getBasePort(10300); } protected static InetAddress randomIp(boolean v4) { From ca8e4f871a1cfec70985aa6acea33f0c36484be5 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Fri, 21 Feb 2025 07:42:30 -0500 Subject: [PATCH 018/550] HTTP API calls hang with 'Accept-Encoding: zstd' (#17408) Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + .../netty4/Netty4HttpServerTransport.java | 75 ++++++++++++++++++- .../Netty4HttpServerTransportTests.java | 5 +- 3 files changed, 77 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ab4138c452894..1ad53194361fa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -36,6 +36,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix exists queries on nested flat_object fields throws exception ([#16803](https://github.com/opensearch-project/OpenSearch/pull/16803)) - Add highlighting for wildcard search on `match_only_text` field ([#17101](https://github.com/opensearch-project/OpenSearch/pull/17101)) - Fix illegal argument exception when creating a PIT ([#16781](https://github.com/opensearch-project/OpenSearch/pull/16781)) +- Fix HTTP API calls that hang with 'Accept-Encoding: zstd' ([#17408](https://github.com/opensearch-project/OpenSearch/pull/17408)) ### Security diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java index 4970c42163ac3..7e2f3496e5c01 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java @@ -61,6 +61,8 @@ import java.net.InetSocketAddress; import java.net.SocketOption; +import java.util.ArrayList; +import java.util.List; import java.util.concurrent.TimeUnit; import io.netty.bootstrap.ServerBootstrap; @@ -77,6 +79,12 @@ import io.netty.channel.SimpleChannelInboundHandler; import io.netty.channel.socket.nio.NioChannelOption; import io.netty.handler.codec.ByteToMessageDecoder; +import io.netty.handler.codec.compression.Brotli; +import io.netty.handler.codec.compression.CompressionOptions; +import io.netty.handler.codec.compression.DeflateOptions; +import io.netty.handler.codec.compression.GzipOptions; +import io.netty.handler.codec.compression.StandardCompressionOptions; +import io.netty.handler.codec.compression.ZstdEncoder; import io.netty.handler.codec.http.HttpContentCompressor; import io.netty.handler.codec.http.HttpContentDecompressor; import io.netty.handler.codec.http.HttpMessage; @@ -440,7 +448,7 @@ protected void channelRead0(ChannelHandlerContext ctx, HttpMessage msg) throws E pipeline.addAfter( "aggregator", "encoder_compress", - new HttpContentCompressor(handlingSettings.getCompressionLevel()) + new HttpContentCompressor(defaultCompressionOptions(handlingSettings.getCompressionLevel())) ); } pipeline.addBefore("handler", "request_creator", requestCreator); @@ -467,7 +475,10 @@ protected void configureDefaultHttpPipeline(ChannelPipeline pipeline) { aggregator.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents); pipeline.addLast("aggregator", aggregator); if (handlingSettings.isCompression()) { - pipeline.addLast("encoder_compress", new HttpContentCompressor(handlingSettings.getCompressionLevel())); + pipeline.addLast( + "encoder_compress", + new HttpContentCompressor(defaultCompressionOptions(handlingSettings.getCompressionLevel())) + ); } pipeline.addLast("request_creator", requestCreator); pipeline.addLast("response_creator", responseCreator); @@ -512,7 +523,10 @@ protected void initChannel(Channel childChannel) throws Exception { if (handlingSettings.isCompression()) { childChannel.pipeline() - .addLast("encoder_compress", new HttpContentCompressor(handlingSettings.getCompressionLevel())); + .addLast( + "encoder_compress", + new HttpContentCompressor(defaultCompressionOptions(handlingSettings.getCompressionLevel())) + ); } childChannel.pipeline() @@ -563,4 +577,59 @@ protected ChannelInboundHandlerAdapter createHeaderVerifier() { protected ChannelInboundHandlerAdapter createDecompressor() { return new HttpContentDecompressor(); } + + /** + * Copy of {@link HttpContentCompressor} default compression options with ZSTD excluded: + * although zstd-jni is on the classpath, {@link ZstdEncoder} requires direct buffers support + * which by default {@link NettyAllocator} does not provide. + * + * @param compressionLevel + * {@code 1} yields the fastest compression and {@code 9} yields the + * best compression. {@code 0} means no compression. The default + * compression level is {@code 6}. + * + * @return default compression options + */ + private static CompressionOptions[] defaultCompressionOptions(int compressionLevel) { + return defaultCompressionOptions(compressionLevel, 15, 8); + } + + /** + * Copy of {@link HttpContentCompressor} default compression options with ZSTD excluded: + * although zstd-jni is on the classpath, {@link ZstdEncoder} requires direct buffers support + * which by default {@link NettyAllocator} does not provide. + * + * @param compressionLevel + * {@code 1} yields the fastest compression and {@code 9} yields the + * best compression. {@code 0} means no compression. The default + * compression level is {@code 6}. + * @param windowBits + * The base two logarithm of the size of the history buffer. The + * value should be in the range {@code 9} to {@code 15} inclusive. + * Larger values result in better compression at the expense of + * memory usage. The default value is {@code 15}. + * @param memLevel + * How much memory should be allocated for the internal compression + * state. {@code 1} uses minimum memory and {@code 9} uses maximum + * memory. Larger values result in better and faster compression + * at the expense of memory usage. The default value is {@code 8} + * + * @return default compression options + */ + private static CompressionOptions[] defaultCompressionOptions(int compressionLevel, int windowBits, int memLevel) { + final List options = new ArrayList(4); + final GzipOptions gzipOptions = StandardCompressionOptions.gzip(compressionLevel, windowBits, memLevel); + final DeflateOptions deflateOptions = StandardCompressionOptions.deflate(compressionLevel, windowBits, memLevel); + + options.add(gzipOptions); + options.add(deflateOptions); + options.add(StandardCompressionOptions.snappy()); + + if (Brotli.isAvailable()) { + options.add(StandardCompressionOptions.brotli()); + } + + return options.toArray(new CompressionOptions[0]); + } + } diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerTransportTests.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerTransportTests.java index d892918decfb5..05cd7c9fd90d3 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerTransportTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerTransportTests.java @@ -393,7 +393,10 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th try (Netty4HttpClient client = Netty4HttpClient.http()) { DefaultFullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, url); - request.headers().add(HttpHeaderNames.ACCEPT_ENCODING, randomFrom("deflate", "gzip")); + // ZSTD is not supported at the moment by NettyAllocator (needs direct buffers), + // and Brotly is not on classpath. + final String contentEncoding = randomFrom("deflate", "gzip", "snappy", "br", "zstd"); + request.headers().add(HttpHeaderNames.ACCEPT_ENCODING, contentEncoding); long numOfHugeAllocations = getHugeAllocationCount(); final FullHttpResponse response = client.send(remoteAddress.address(), request); try { From 664f254b67b9bd31eab389fcf9b81c1b761d49f7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 21 Feb 2025 08:01:06 -0500 Subject: [PATCH 019/550] Bump net.minidev:json-smart from 2.5.1 to 2.5.2 in /plugins/repository-hdfs (#17376) * Bump net.minidev:json-smart in /plugins/repository-hdfs Bumps [net.minidev:json-smart](https://github.com/netplex/json-smart-v2) from 2.5.1 to 2.5.2. - [Release notes](https://github.com/netplex/json-smart-v2/releases) - [Commits](https://github.com/netplex/json-smart-v2/compare/2.5.1...2.5.2) --- updated-dependencies: - dependency-name: net.minidev:json-smart dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- plugins/repository-hdfs/build.gradle | 2 +- plugins/repository-hdfs/licenses/json-smart-2.5.1.jar.sha1 | 1 - plugins/repository-hdfs/licenses/json-smart-2.5.2.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-hdfs/licenses/json-smart-2.5.1.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/json-smart-2.5.2.jar.sha1 diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index cf76c88c5482e..4cfb572929f54 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -81,7 +81,7 @@ dependencies { api 'javax.servlet:servlet-api:2.5' api "org.slf4j:slf4j-api:${versions.slf4j}" api "org.apache.logging.log4j:log4j-slf4j-impl:${versions.log4j}" - api 'net.minidev:json-smart:2.5.1' + api 'net.minidev:json-smart:2.5.2' api "io.netty:netty-all:${versions.netty}" implementation "com.fasterxml.woodstox:woodstox-core:${versions.woodstox}" implementation 'org.codehaus.woodstox:stax2-api:4.2.2' diff --git a/plugins/repository-hdfs/licenses/json-smart-2.5.1.jar.sha1 b/plugins/repository-hdfs/licenses/json-smart-2.5.1.jar.sha1 deleted file mode 100644 index fe23968afce1e..0000000000000 --- a/plugins/repository-hdfs/licenses/json-smart-2.5.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4c11d2808d009132dfbbf947ebf37de6bf266c8e \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/json-smart-2.5.2.jar.sha1 b/plugins/repository-hdfs/licenses/json-smart-2.5.2.jar.sha1 new file mode 100644 index 0000000000000..97fc7b94f0fd2 --- /dev/null +++ b/plugins/repository-hdfs/licenses/json-smart-2.5.2.jar.sha1 @@ -0,0 +1 @@ +95d166b18f95907be0f46cdb9e1c0695eed03387 \ No newline at end of file From 84477373f70247774f1bd52ad708c393da7f7d7c Mon Sep 17 00:00:00 2001 From: kkewwei Date: Sat, 22 Feb 2025 03:29:50 +0800 Subject: [PATCH 020/550] Fix missing bucket in terms aggregation with missing value (#17418) Signed-off-by: kkewwei Signed-off-by: kkewwei --- CHANGELOG-3.0.md | 1 + .../aggregations/support/MissingValues.java | 2 +- .../bucket/terms/TermsAggregatorTests.java | 94 +++++++++++++++++++ 3 files changed, 96 insertions(+), 1 deletion(-) diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index fc2fcd361f497..58e5e5cca3acb 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -70,6 +70,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Don't over-allocate in HeapBufferedAsyncEntityConsumer in order to consume the response ([#9993](https://github.com/opensearch-project/OpenSearch/pull/9993)) - Fix swapped field formats in nodes API where `total_indexing_buffer_in_bytes` and `total_indexing_buffer` values were reversed ([#17070](https://github.com/opensearch-project/OpenSearch/pull/17070)) - Add HTTP/2 protocol support to HttpRequest.HttpVersion ([#17248](https://github.com/opensearch-project/OpenSearch/pull/17248)) +- Fix missing bucket in terms aggregation with missing value ([#17418](https://github.com/opensearch-project/OpenSearch/pull/17418)) ### Security diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/MissingValues.java b/server/src/main/java/org/opensearch/search/aggregations/support/MissingValues.java index a5c685a0930e2..429a543281c76 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/MissingValues.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/MissingValues.java @@ -359,7 +359,7 @@ public long getValueCount() { @Override public int docValueCount() { - return values.docValueCount(); + return Math.max(1, values.docValueCount()); } @Override diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java index bc22d5f6ef2e8..e59b28d0a51ff 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java @@ -34,6 +34,7 @@ import org.apache.lucene.document.BinaryDocValuesField; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.document.LatLonDocValuesField; import org.apache.lucene.document.NumericDocValuesField; @@ -42,6 +43,8 @@ import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.document.StringField; import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.NoMergePolicy; @@ -75,6 +78,8 @@ import org.opensearch.index.mapper.RangeFieldMapper; import org.opensearch.index.mapper.RangeType; import org.opensearch.index.mapper.SeqNoFieldMapper; +import org.opensearch.index.mapper.TextFieldMapper; +import org.opensearch.index.mapper.TextParams; import org.opensearch.index.mapper.Uid; import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.query.QueryBuilders; @@ -1578,6 +1583,95 @@ public void testOrderByPipelineAggregation() throws Exception { } } + public void testBucketInTermsAggregationWithMissingValue() throws IOException { + try (Directory directory = newDirectory()) { + try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { + // test text + { + FieldType type = TextParams.buildFieldType(() -> true, () -> false, () -> "positions", () -> false, () -> "no"); + Document document = new Document(); + document.add(new Field("mv_field", "name1", type)); + document.add(new Field("mv_field", "name2", type)); + indexWriter.addDocument(document); + document = new Document(); + document.add(new Field("mv_field1", "value1", type)); + indexWriter.addDocument(document); + document = new Document(); + document.add(new Field("mv_field1", "value2", type)); + indexWriter.addDocument(document); + indexWriter.flush(); + try (IndexReader indexReader = maybeWrapReaderEs(indexWriter.getReader())) { + IndexSearcher indexSearcher = newIndexSearcher(indexReader); + TextFieldMapper.TextFieldType fieldType = new TextFieldMapper.TextFieldType("mv_field"); + fieldType.setFielddata(true); + + TermsAggregationBuilder aggregationBuilder = new TermsAggregationBuilder("nick").userValueTypeHint(ValueType.STRING) + .field("mv_field") + .missing("no_nickname"); + TermsAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); + assertThat(aggregator, instanceOf(GlobalOrdinalsStringTermsAggregator.class)); + + aggregator.preCollection(); + indexSearcher.search(new MatchAllDocsQuery(), aggregator); + aggregator.postCollection(); + Terms result = reduce(aggregator); + assertEquals(3, result.getBuckets().size()); + assertEquals("no_nickname", result.getBuckets().get(0).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(0).getDocCount()); + assertEquals("name1", result.getBuckets().get(1).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(1).getDocCount()); + assertEquals("name2", result.getBuckets().get(2).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(2).getDocCount()); + + } + indexWriter.deleteAll(); + } + + // test keyword + { + FieldType fieldtype = new FieldType(KeywordFieldMapper.Defaults.FIELD_TYPE); + fieldtype.setDocValuesType(DocValuesType.SORTED_SET); + fieldtype.setIndexOptions(IndexOptions.NONE); + fieldtype.setStored(true); + + Document document = new Document(); + document.add(new SortedSetDocValuesField("mv_field1", new BytesRef("name1"))); + document.add(new SortedSetDocValuesField("mv_field1", new BytesRef("name2"))); + indexWriter.addDocument(document); + document = new Document(); + document.add(new SortedSetDocValuesField("mv_field2", new BytesRef("value1"))); + indexWriter.addDocument(document); + document = new Document(); + document.add(new SortedSetDocValuesField("mv_field2", new BytesRef("value2"))); + indexWriter.addDocument(document); + indexWriter.flush(); + try (IndexReader indexReader = maybeWrapReaderEs(indexWriter.getReader())) { + IndexSearcher indexSearcher = newIndexSearcher(indexReader); + KeywordFieldMapper.KeywordFieldType fieldType = new KeywordFieldMapper.KeywordFieldType("mv_field1"); + + TermsAggregationBuilder aggregationBuilder = new TermsAggregationBuilder("_name").userValueTypeHint( + ValueType.STRING + ).field("mv_field1").missing("no_nickname1"); + TermsAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); + assertThat(aggregator, instanceOf(GlobalOrdinalsStringTermsAggregator.class)); + + aggregator.preCollection(); + indexSearcher.search(new MatchAllDocsQuery(), aggregator); + aggregator.postCollection(); + Terms result = reduce(aggregator); + assertEquals(3, result.getBuckets().size()); + assertEquals("no_nickname1", result.getBuckets().get(0).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(0).getDocCount()); + assertEquals("name1", result.getBuckets().get(1).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(1).getDocCount()); + assertEquals("name2", result.getBuckets().get(2).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(2).getDocCount()); + } + } + } + } + } + private final SeqNoFieldMapper.SequenceIDFields sequenceIDFields = SeqNoFieldMapper.SequenceIDFields.emptySeqID(); private List generateDocsWithNested(String id, int value, int[] nestedValues) { From 4bd1323782ad53b9bbb999ffa9616365701a0865 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Sun, 23 Feb 2025 12:26:29 -0500 Subject: [PATCH 021/550] Update Andriy Redko (https://github.com/reta) affiliation (#17430) Signed-off-by: Andriy Redko --- MAINTAINERS.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 68d8543ee2725..8a6890d1ca1c1 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -8,7 +8,7 @@ This document contains a list of maintainers in this repo. See [opensearch-proje |--------------------------|---------------------------------------------------------|-------------| | Anas Alkouz | [anasalkouz](https://github.com/anasalkouz) | Amazon | | Andrew Ross | [andrross](https://github.com/andrross) | Amazon | -| Andriy Redko | [reta](https://github.com/reta) | Aiven | +| Andriy Redko | [reta](https://github.com/reta) | Independent | | Ankit Jain | [jainankitk](https://github.com/jainankitk) | Amazon | | Ashish Singh | [ashking94](https://github.com/ashking94) | Amazon | | Bukhtawar Khan | [Bukhtawar](https://github.com/Bukhtawar) | Amazon | From e7ac072875b8ca39519eb6cefb110ff88fec6d6b Mon Sep 17 00:00:00 2001 From: Rajat Gupta <72070007+RajatGupta02@users.noreply.github.com> Date: Mon, 24 Feb 2025 00:29:39 +0530 Subject: [PATCH 022/550] Add systemd configurations to strengthen OS core security (#17107) * Add systemd configurations to strengthen OS core security Signed-off-by: Rajat Gupta * Add systemd template unit file Signed-off-by: Rajat Gupta * Update CHANGELOG-3.0.md Signed-off-by: Rajat Gupta * Revert "Add systemd configurations to strengthen OS core security" This reverts commit 71b2584ecbdce4bd3aa9328d8d562d5a7028e5c8. Signed-off-by: Rajat Gupta * Remove SocketBind Directives and template unit file Signed-off-by: Rajat Gupta * Minor fixes Signed-off-by: Rajat Gupta * Modify systemd unit file in core to be in sync with distribution unit file Signed-off-by: Rajat Gupta * Modify systemd env file to be in sync with opensearch-build Signed-off-by: Rajat Gupta --------- Signed-off-by: Rajat Gupta Signed-off-by: Rajat Gupta <72070007+RajatGupta02@users.noreply.github.com> Co-authored-by: Rajat Gupta --- CHANGELOG-3.0.md | 1 + .../packages/src/common/env/opensearch | 15 +-- .../src/common/systemd/opensearch.service | 111 +++++++++++++++++- 3 files changed, 114 insertions(+), 13 deletions(-) diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index 58e5e5cca3acb..9bb8d528a6efb 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -14,6 +14,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [WLM] Add WLM support for search scroll API ([#16981](https://github.com/opensearch-project/OpenSearch/pull/16981)) - Allow to pass the list settings through environment variables (like [], ["a", "b", "c"], ...) ([#10625](https://github.com/opensearch-project/OpenSearch/pull/10625)) - Views, simplify data access and manipulation by providing a virtual layer over one or more indices ([#11957](https://github.com/opensearch-project/OpenSearch/pull/11957)) +- Add systemd configurations to strengthen OS core security ([#17107](https://github.com/opensearch-project/OpenSearch/pull/17107)) - Added pull-based Ingestion (APIs, for ingestion source, a Kafka plugin, and IngestionEngine that pulls data from the ingestion source) ([#16958](https://github.com/opensearch-project/OpenSearch/pull/16958)) - Added ConfigurationUtils to core for the ease of configuration parsing [#17223](https://github.com/opensearch-project/OpenSearch/pull/17223) - Add execution_hint to cardinality aggregator request (#[17312](https://github.com/opensearch-project/OpenSearch/pull/17312)) diff --git a/distribution/packages/src/common/env/opensearch b/distribution/packages/src/common/env/opensearch index 198bcfde90c4c..a8b6829766924 100644 --- a/distribution/packages/src/common/env/opensearch +++ b/distribution/packages/src/common/env/opensearch @@ -3,17 +3,17 @@ ################################ # OpenSearch home directory -#OPENSEARCH_HOME=/usr/share/opensearch +OPENSEARCH_HOME=/usr/share/opensearch # OpenSearch Java path -#OPENSEARCH_JAVA_HOME= +#OPENSEARCH_JAVA_HOME=/usr/lib/jvm/java-11-amazon-corretto # OpenSearch configuration directory # Note: this setting will be shared with command-line tools -OPENSEARCH_PATH_CONF=${path.conf} +OPENSEARCH_PATH_CONF=/etc/opensearch # OpenSearch PID directory -#PID_DIR=/var/run/opensearch +PID_DIR=/var/run/opensearch # Additional Java OPTS #OPENSEARCH_JAVA_OPTS= @@ -25,11 +25,12 @@ OPENSEARCH_PATH_CONF=${path.conf} # OpenSearch service ################################ -# SysV init.d -# # The number of seconds to wait before checking if OpenSearch started successfully as a daemon process OPENSEARCH_STARTUP_SLEEP_TIME=5 +# Notification for systemd +OPENSEARCH_SD_NOTIFY=true + ################################ # System properties ################################ @@ -49,4 +50,4 @@ OPENSEARCH_STARTUP_SLEEP_TIME=5 # Maximum number of VMA (Virtual Memory Areas) a process can own # When using Systemd, this setting is ignored and the 'vm.max_map_count' # property is set at boot time in /usr/lib/sysctl.d/opensearch.conf -#MAX_MAP_COUNT=262144 +#MAX_MAP_COUNT=262144 \ No newline at end of file diff --git a/distribution/packages/src/common/systemd/opensearch.service b/distribution/packages/src/common/systemd/opensearch.service index 962dc5d2aae72..74870a35cd097 100644 --- a/distribution/packages/src/common/systemd/opensearch.service +++ b/distribution/packages/src/common/systemd/opensearch.service @@ -1,6 +1,16 @@ +# Copyright OpenSearch Contributors +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. + +# Description: +# Default opensearch.service file + [Unit] Description=OpenSearch -Documentation=https://www.elastic.co +Documentation=https://opensearch.org/ Wants=network-online.target After=network-online.target @@ -8,11 +18,8 @@ After=network-online.target Type=notify RuntimeDirectory=opensearch PrivateTmp=true -Environment=OPENSEARCH_HOME=/usr/share/opensearch -Environment=OPENSEARCH_PATH_CONF=${path.conf} -Environment=PID_DIR=/var/run/opensearch -Environment=OPENSEARCH_SD_NOTIFY=true -EnvironmentFile=-${path.env} +EnvironmentFile=-/etc/default/opensearch +EnvironmentFile=-/etc/sysconfig/opensearch WorkingDirectory=/usr/share/opensearch @@ -29,6 +36,7 @@ ExecStart=/usr/share/opensearch/bin/systemd-entrypoint -p ${PID_DIR}/opensearch. # logging, you can simply remove the "quiet" option from ExecStart. StandardOutput=journal StandardError=inherit +SyslogIdentifier=opensearch # Specifies the maximum file descriptor number that can be opened by this process LimitNOFILE=65535 @@ -60,6 +68,97 @@ SuccessExitStatus=143 # Allow a slow startup before the systemd notifier module kicks in to extend the timeout TimeoutStartSec=75 +# Prevent modifications to the control group filesystem +ProtectControlGroups=true + +# Prevent loading or reading kernel modules +ProtectKernelModules=true + +# Prevent altering kernel tunables (sysctl parameters) +ProtectKernelTunables=true + +# Set device access policy to 'closed', allowing access only to specific devices +DevicePolicy=closed + +# Make /proc invisible to the service, enhancing isolation +ProtectProc=invisible + +# Make /usr, /boot, and /etc read-only (less restrictive than 'strict') +ProtectSystem=full + +# Prevent changes to control groups (redundant with earlier setting, can be removed) +ProtectControlGroups=yes + +# Prevent changing the execution domain +LockPersonality=yes + + +# System call filtering +# System call filterings which restricts which system calls a process can make +# @ means allowed +# ~ means not allowed +SystemCallFilter=@system-service +SystemCallFilter=~@reboot +SystemCallFilter=~@swap + +SystemCallErrorNumber=EPERM + +# Capability restrictions +# Remove the ability to block system suspends +CapabilityBoundingSet=~CAP_BLOCK_SUSPEND + +# Remove the ability to establish leases on files +CapabilityBoundingSet=~CAP_LEASE + +# Remove the ability to use system resource accounting +CapabilityBoundingSet=~CAP_SYS_PACCT + +# Remove the ability to configure TTY devices +CapabilityBoundingSet=~CAP_SYS_TTY_CONFIG + +# Remov below capabilities: +# - CAP_SYS_ADMIN: Various system administration operations +# - CAP_SYS_PTRACE: Ability to trace processes +# - CAP_NET_ADMIN: Various network-related operations +CapabilityBoundingSet=~CAP_SYS_ADMIN ~CAP_SYS_PTRACE ~CAP_NET_ADMIN + + +# Address family restrictions +RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX + +# Filesystem Access + +ReadWritePaths=/var/log/opensearch +ReadWritePaths=/var/lib/opensearch +ReadWritePaths=-/etc/opensearch +ReadWritePaths=-/mnt/snapshots + +## Allow read access to system files +ReadOnlyPaths=/etc/os-release /usr/lib/os-release /etc/system-release + +## Allow read access to Linux IO stats +ReadOnlyPaths=/proc/self/mountinfo /proc/diskstats + +## Allow read access to control group stats +ReadOnlyPaths=/proc/self/cgroup /sys/fs/cgroup/cpu /sys/fs/cgroup/cpu/- +ReadOnlyPaths=/sys/fs/cgroup/cpuacct /sys/fs/cgroup/cpuacct/- /sys/fs/cgroup/memory /sys/fs/cgroup/memory/- + + +RestrictNamespaces=true + +NoNewPrivileges=true + +# Memory and execution protection +MemoryDenyWriteExecute=true # Prevent creating writable executable memory mappings +SystemCallArchitectures=native # Allow only native system calls +KeyringMode=private # Service does not share key material with other services +LockPersonality=true # Prevent changing ABI personality +RestrictSUIDSGID=true # Prevent creating SUID/SGID files +RestrictRealtime=true # Prevent acquiring realtime scheduling +ProtectHostname=true # Prevent changes to system hostname +ProtectKernelLogs=true # Prevent reading/writing kernel logs +ProtectClock=true # Prevent tampering with the system clock + [Install] WantedBy=multi-user.target From 5666982ac659794f73eda6aa07b0044a3bf3c3eb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Feb 2025 07:45:56 -0500 Subject: [PATCH 023/550] Bump com.google.api.grpc:proto-google-common-protos from 2.37.1 to 2.52.0 in /plugins/repository-gcs (#17379) * Bump com.google.api.grpc:proto-google-common-protos Bumps [com.google.api.grpc:proto-google-common-protos](https://github.com/googleapis/sdk-platform-java) from 2.37.1 to 2.52.0. - [Release notes](https://github.com/googleapis/sdk-platform-java/releases) - [Changelog](https://github.com/googleapis/sdk-platform-java/blob/main/CHANGELOG.md) - [Commits](https://github.com/googleapis/sdk-platform-java/compare/api-common/v2.37.1...v2.52.0) --- updated-dependencies: - dependency-name: com.google.api.grpc:proto-google-common-protos dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Signed-off-by: gaobinlong Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: gaobinlong --- CHANGELOG.md | 1 + plugins/repository-gcs/build.gradle | 2 +- .../licenses/proto-google-common-protos-2.37.1.jar.sha1 | 1 - .../licenses/proto-google-common-protos-2.52.0.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-gcs/licenses/proto-google-common-protos-2.37.1.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/proto-google-common-protos-2.52.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 1ad53194361fa..b4d0432b7d3db 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump netty from 4.1.117.Final to 4.1.118.Final ([#17320](https://github.com/opensearch-project/OpenSearch/pull/17320)) - Bump `reactor_netty` from 1.1.26 to 1.1.27 ([#17322](https://github.com/opensearch-project/OpenSearch/pull/17322)) - Bump `me.champeau.gradle.japicmp` from 0.4.5 to 0.4.6 ([#17375](https://github.com/opensearch-project/OpenSearch/pull/17375)) +- Bump `com.google.api.grpc:proto-google-common-protos` from 2.37.1 to 2.52.0 ([#17379](https://github.com/opensearch-project/OpenSearch/pull/17379)) - Bump `net.minidev:json-smart` from 2.5.1 to 2.5.2 ([#17378](https://github.com/opensearch-project/OpenSearch/pull/17378)) ### Changed diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index d4c870e1ca2b2..53439c1ca7744 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -56,7 +56,7 @@ dependencies { api 'com.google.api-client:google-api-client:2.7.0' - api 'com.google.api.grpc:proto-google-common-protos:2.37.1' + api 'com.google.api.grpc:proto-google-common-protos:2.52.0' api 'com.google.api.grpc:proto-google-iam-v1:1.33.0' api "com.google.auth:google-auth-library-credentials:${versions.google_auth}" diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-2.37.1.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-common-protos-2.37.1.jar.sha1 deleted file mode 100644 index 92f991778ccc3..0000000000000 --- a/plugins/repository-gcs/licenses/proto-google-common-protos-2.37.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3b8759ef0468cced72f8f0d4fc3cc57aeb8139f8 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-2.52.0.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-common-protos-2.52.0.jar.sha1 new file mode 100644 index 0000000000000..d955f83389a2d --- /dev/null +++ b/plugins/repository-gcs/licenses/proto-google-common-protos-2.52.0.jar.sha1 @@ -0,0 +1 @@ +8f64c0540ed74ca464a4a025b32f967bd764bdbe \ No newline at end of file From 4648c3ff6b6bb0021f1231e22bd8babf39b50607 Mon Sep 17 00:00:00 2001 From: Kaushal Kumar Date: Mon, 24 Feb 2025 10:07:40 -0800 Subject: [PATCH 024/550] [Rule based autotagging] Add attribute value store (#17342) * [rule based autotagging] add attribute value store Signed-off-by: Kaushal Kumar * add CHANGELOG entry Signed-off-by: Kaushal Kumar * make the store interface generic Signed-off-by: Kaushal Kumar * run spotless apply Signed-off-by: Kaushal Kumar * add missing javadoc Signed-off-by: Kaushal Kumar * improve javadoc for attribute value store Signed-off-by: Kaushal Kumar * improve binary search bisecting expression Signed-off-by: Kaushal Kumar * add licenses directory Signed-off-by: Kaushal Kumar --------- Signed-off-by: Kaushal Kumar --- CHANGELOG.md | 1 + plugins/workload-management/build.gradle | 5 + .../licenses/commons-collections-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/commons-collections-NOTICE.txt | 9 + .../commons-collections4-4.4.jar.sha1 | 1 + .../plugin/wlm/rule/package-info.java | 13 ++ .../wlm/rule/storage/AttributeValueStore.java | 47 ++++ .../storage/DefaultAttributeValueStore.java | 101 +++++++++ .../plugin/wlm/rule/storage/package-info.java | 12 ++ .../storage/AttributeValueStoreTests.java | 53 +++++ 10 files changed, 444 insertions(+) create mode 100644 plugins/workload-management/licenses/commons-collections-LICENSE.txt create mode 100644 plugins/workload-management/licenses/commons-collections-NOTICE.txt create mode 100644 plugins/workload-management/licenses/commons-collections4-4.4.jar.sha1 create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/package-info.java create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/storage/AttributeValueStore.java create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/storage/DefaultAttributeValueStore.java create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/storage/package-info.java create mode 100644 plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rule/storage/AttributeValueStoreTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index b4d0432b7d3db..428cb6a8073d6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Introduce a setting to disable download of full cluster state from remote on term mismatch([#16798](https://github.com/opensearch-project/OpenSearch/pull/16798/)) - Added ability to retrieve value from DocValues in a flat_object filed([#16802](https://github.com/opensearch-project/OpenSearch/pull/16802)) - Improve performace of NumericTermAggregation by avoiding unnecessary sorting([#17252](https://github.com/opensearch-project/OpenSearch/pull/17252)) +- [Rule Based Auto-tagging] Add in-memory attribute value store ([#17342](https://github.com/opensearch-project/OpenSearch/pull/17342)) ### Dependencies - Bump `org.awaitility:awaitility` from 4.2.0 to 4.2.2 ([#17230](https://github.com/opensearch-project/OpenSearch/pull/17230)) diff --git a/plugins/workload-management/build.gradle b/plugins/workload-management/build.gradle index 2e8b0df468092..c73c63e84ed1f 100644 --- a/plugins/workload-management/build.gradle +++ b/plugins/workload-management/build.gradle @@ -19,4 +19,9 @@ opensearchplugin { } dependencies { + api 'org.apache.commons:commons-collections4:4.4' +} + +tasks.named("dependencyLicenses").configure { + mapping from: /commons-collections.*/, to: 'commons-collections' } diff --git a/plugins/workload-management/licenses/commons-collections-LICENSE.txt b/plugins/workload-management/licenses/commons-collections-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/workload-management/licenses/commons-collections-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/workload-management/licenses/commons-collections-NOTICE.txt b/plugins/workload-management/licenses/commons-collections-NOTICE.txt new file mode 100644 index 0000000000000..79e9484bd56a0 --- /dev/null +++ b/plugins/workload-management/licenses/commons-collections-NOTICE.txt @@ -0,0 +1,9 @@ +Apache Commons Collections +Copyright 2001-2025 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (https://www.apache.org/). + +The Java source file src/main/java/org/apache/commons/collections4/map/ConcurrentReferenceHashMap.java +is from https://github.com/hazelcast/hazelcast and the following notice applies: +Copyright (c) 2008-2020, Hazelcast, Inc. All Rights Reserved. diff --git a/plugins/workload-management/licenses/commons-collections4-4.4.jar.sha1 b/plugins/workload-management/licenses/commons-collections4-4.4.jar.sha1 new file mode 100644 index 0000000000000..6b4ed5ab62b44 --- /dev/null +++ b/plugins/workload-management/licenses/commons-collections4-4.4.jar.sha1 @@ -0,0 +1 @@ +62ebe7544cb7164d87e0637a2a6a2bdc981395e8 \ No newline at end of file diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/package-info.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/package-info.java new file mode 100644 index 0000000000000..85c0562dae5ee --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/package-info.java @@ -0,0 +1,13 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.rule; +/** + * This package holds constructs for the Rule's in-memory storage, processing and syncing the in-memory view + * with the index view + */ diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/storage/AttributeValueStore.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/storage/AttributeValueStore.java new file mode 100644 index 0000000000000..eb2ce8e4764ea --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/storage/AttributeValueStore.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.rule.storage; + +import java.util.Optional; + +/** + * This interface provides apis to store Rule attribute values + */ +public interface AttributeValueStore { + /** + * Adds the value to attribute value store + * @param key to be added + * @param value to be added + */ + void put(K key, V value); + + /** + * removes the key and associated value from attribute value store + * @param key to be removed + */ + void remove(K key); + + /** + * Returns the value associated with the key + * @param key in the data structure + * @return + */ + Optional get(K key); + + /** + * Clears all the keys and their associated values from the attribute value store + */ + void clear(); + + /** + * It returns the number of values stored + * @return count of key,val pairs in the store + */ + int size(); +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/storage/DefaultAttributeValueStore.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/storage/DefaultAttributeValueStore.java new file mode 100644 index 0000000000000..8b4c063f7ad1a --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/storage/DefaultAttributeValueStore.java @@ -0,0 +1,101 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.rule.storage; + +import org.apache.commons.collections4.trie.PatriciaTrie; + +import java.util.Map; +import java.util.Optional; + +/** + * This is a patricia trie based implementation of AttributeValueStore + * We are choosing patricia trie because it provides very fast search operations on prefix matches as well as range + * lookups. It provides a very efficient storage for strings + * ref: https://commons.apache.org/proper/commons-collections/javadocs/api-4.4/org/apache/commons/collections4/trie/PatriciaTrie.html + */ +public class DefaultAttributeValueStore implements AttributeValueStore { + PatriciaTrie trie; + + /** + * Default constructor + */ + public DefaultAttributeValueStore() { + this(new PatriciaTrie<>()); + } + + /** + * Main constructor + * @param trie A Patricia Trie + */ + public DefaultAttributeValueStore(PatriciaTrie trie) { + this.trie = trie; + } + + @Override + public void put(K key, V value) { + trie.put(key, value); + } + + @Override + public void remove(String key) { + trie.remove(key); + } + + @Override + public Optional get(String key) { + /** + * Since we are inserting prefixes into the trie and searching for larger strings + * It is important to find the largest matching prefix key in the trie efficiently + * Hence we can do binary search + */ + final String longestMatchingPrefix = findLongestMatchingPrefix(key); + + /** + * Now there are following cases for this prefix + * 1. There is a Rule which has this prefix as one of the attribute values. In this case we should return the + * Rule's label otherwise send empty + */ + for (Map.Entry possibleMatch : trie.prefixMap(longestMatchingPrefix).entrySet()) { + if (key.startsWith(possibleMatch.getKey())) { + return Optional.of(possibleMatch.getValue()); + } + } + + return Optional.empty(); + } + + private String findLongestMatchingPrefix(String key) { + int low = 0; + int high = key.length() - 1; + + while (low < high) { + int mid = (high + low + 1) / 2; + /** + * This operation has O(1) complexity because prefixMap returns only the iterator + */ + if (!trie.prefixMap(key.substring(0, mid)).isEmpty()) { + low = mid; + } else { + high = mid - 1; + } + } + + return key.substring(0, low); + } + + @Override + public void clear() { + trie.clear(); + } + + @Override + public int size() { + return trie.size(); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/storage/package-info.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/storage/package-info.java new file mode 100644 index 0000000000000..6aa721ce22a00 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/storage/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains interfaces and implementations for in memory rule storage mechanisms + */ +package org.opensearch.plugin.wlm.rule.storage; diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rule/storage/AttributeValueStoreTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rule/storage/AttributeValueStoreTests.java new file mode 100644 index 0000000000000..29c42e51efeb0 --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rule/storage/AttributeValueStoreTests.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.rule.storage; + +import org.apache.commons.collections4.trie.PatriciaTrie; +import org.opensearch.test.OpenSearchTestCase; + +public class AttributeValueStoreTests extends OpenSearchTestCase { + + AttributeValueStore subjectUnderTest; + + public void setUp() throws Exception { + super.setUp(); + subjectUnderTest = new DefaultAttributeValueStore<>(new PatriciaTrie<>()); + } + + public void testPut() { + subjectUnderTest.put("foo", "bar"); + assertEquals("bar", subjectUnderTest.get("foo").get()); + } + + public void testRemove() { + subjectUnderTest.put("foo", "bar"); + subjectUnderTest.remove("foo"); + assertEquals(0, subjectUnderTest.size()); + } + + public void tesGet() { + subjectUnderTest.put("foo", "bar"); + assertEquals("bar", subjectUnderTest.get("foo").get()); + } + + public void testGetWhenNoProperPrefixIsPresent() { + subjectUnderTest.put("foo", "bar"); + subjectUnderTest.put("foodip", "sing"); + assertTrue(subjectUnderTest.get("foxtail").isEmpty()); + subjectUnderTest.put("fox", "lucy"); + + assertFalse(subjectUnderTest.get("foxtail").isEmpty()); + } + + public void testClear() { + subjectUnderTest.put("foo", "bar"); + subjectUnderTest.clear(); + assertEquals(0, subjectUnderTest.size()); + } +} From 0714a1b753d03cce1f684e6f68e93a430b2e1261 Mon Sep 17 00:00:00 2001 From: Xu Xiong Date: Mon, 24 Feb 2025 14:36:41 -0800 Subject: [PATCH 025/550] [Pull-based Ingestion] Offset management, support rewind by offset or timestamp (#17354) * initial commit Signed-off-by: xuxiong1 * add tests Signed-off-by: xuxiong1 * resolve comments Signed-off-by: xuxiong1 * support optional auto offset config Signed-off-by: xuxiong1 * Update DefaultStreamPollerTests with countDownLatch Signed-off-by: xuxiong1 * use long as timestamp type Signed-off-by: xuxiong1 * add change log Signed-off-by: xuxiong1 --------- Signed-off-by: xuxiong1 --- CHANGELOG-3.0.md | 1 + .../plugin/kafka/IngestFromKafkaIT.java | 69 +++++++++++++++- .../plugin/kafka/KafkaPartitionConsumer.java | 46 +++++++++++ .../plugin/kafka/KafkaSourceConfig.java | 13 +++ .../kafka/KafkaPartitionConsumerTests.java | 16 ++++ .../cluster/metadata/IndexMetadata.java | 56 +++++++++++-- .../cluster/metadata/IngestionSource.java | 47 ++++++++++- .../common/settings/IndexScopedSettings.java | 1 + .../index/IngestionShardConsumer.java | 16 ++++ .../index/engine/IngestionEngine.java | 8 +- .../pollingingest/DefaultStreamPoller.java | 23 +++++- .../indices/pollingingest/StreamPoller.java | 4 + .../metadata/IngestionSourceTests.java | 28 ++++--- .../index/engine/FakeIngestionSource.java | 10 +++ .../DefaultStreamPollerTests.java | 82 ++++++++++++++++--- 15 files changed, 380 insertions(+), 40 deletions(-) diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index 9bb8d528a6efb..e0ac2c3ecd80d 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -19,6 +19,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Added ConfigurationUtils to core for the ease of configuration parsing [#17223](https://github.com/opensearch-project/OpenSearch/pull/17223) - Add execution_hint to cardinality aggregator request (#[17312](https://github.com/opensearch-project/OpenSearch/pull/17312)) - Arrow Flight RPC plugin with Flight server bootstrap logic and client for internode communication ([#16962](https://github.com/opensearch-project/OpenSearch/pull/16962)) +- Added offset management for the pull-based Ingestion ([#17354](https://github.com/opensearch-project/OpenSearch/pull/17354)) ### Dependencies - Update Apache Lucene to 10.1.0 ([#16366](https://github.com/opensearch-project/OpenSearch/pull/16366)) diff --git a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/IngestFromKafkaIT.java b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/IngestFromKafkaIT.java index e7d8e36acb302..d6b099c6b24d8 100644 --- a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/IngestFromKafkaIT.java +++ b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/IngestFromKafkaIT.java @@ -102,6 +102,69 @@ public void testKafkaIngestion() { } } + public void testKafkaIngestion_RewindByTimeStamp() { + try { + setupKafka(); + // create an index with ingestion source from kafka + createIndex( + "test_rewind_by_timestamp", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("ingestion_source.type", "kafka") + .put("ingestion_source.pointer.init.reset", "rewind_by_timestamp") + // 1739459500000 is the timestamp of the first message + // 1739459800000 is the timestamp of the second message + // by resetting to 1739459600000, only the second message will be ingested + .put("ingestion_source.pointer.init.reset.value", "1739459600000") + .put("ingestion_source.param.topic", "test") + .put("ingestion_source.param.bootstrap_servers", kafka.getBootstrapServers()) + .put("ingestion_source.param.auto.offset.reset", "latest") + .build(), + "{\"properties\":{\"name\":{\"type\": \"text\"},\"age\":{\"type\": \"integer\"}}}}" + ); + + RangeQueryBuilder query = new RangeQueryBuilder("age").gte(0); + await().atMost(10, TimeUnit.SECONDS).untilAsserted(() -> { + refresh("test_rewind_by_timestamp"); + SearchResponse response = client().prepareSearch("test_rewind_by_timestamp").setQuery(query).get(); + assertThat(response.getHits().getTotalHits().value(), is(1L)); + }); + } finally { + stopKafka(); + } + } + + public void testKafkaIngestion_RewindByOffset() { + try { + setupKafka(); + // create an index with ingestion source from kafka + createIndex( + "test_rewind_by_offset", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("ingestion_source.type", "kafka") + .put("ingestion_source.pointer.init.reset", "rewind_by_offset") + .put("ingestion_source.pointer.init.reset.value", "1") + .put("ingestion_source.param.topic", "test") + .put("ingestion_source.param.bootstrap_servers", kafka.getBootstrapServers()) + .put("ingestion_source.param.auto.offset.reset", "latest") + .build(), + "{\"properties\":{\"name\":{\"type\": \"text\"},\"age\":{\"type\": \"integer\"}}}}" + ); + + RangeQueryBuilder query = new RangeQueryBuilder("age").gte(0); + await().atMost(10, TimeUnit.SECONDS).untilAsserted(() -> { + refresh("test_rewind_by_offset"); + SearchResponse response = client().prepareSearch("test_rewind_by_offset").setQuery(query).get(); + assertThat(response.getHits().getTotalHits().value(), is(1L)); + }); + } finally { + stopKafka(); + } + } + private void setupKafka() { kafka = new KafkaContainer(DockerImageName.parse("confluentinc/cp-kafka:6.2.1")) // disable topic auto creation @@ -122,10 +185,14 @@ private void prepareKafkaData() { Properties props = new Properties(); props.put("bootstrap.servers", kafka.getBootstrapServers()); Producer producer = new KafkaProducer<>(props, new StringSerializer(), new StringSerializer()); - producer.send(new ProducerRecord<>(topicName, "null", "{\"_id\":\"1\",\"_source\":{\"name\":\"bob\", \"age\": 24}}")); + producer.send( + new ProducerRecord<>(topicName, null, 1739459500000L, "null", "{\"_id\":\"1\",\"_source\":{\"name\":\"bob\", \"age\": 24}}") + ); producer.send( new ProducerRecord<>( topicName, + null, + 1739459800000L, "null", "{\"_id\":\"2\", \"_op_type:\":\"index\",\"_source\":{\"name\":\"alice\", \"age\": 20}}" ) diff --git a/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaPartitionConsumer.java b/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaPartitionConsumer.java index a20e52a06eecd..9461cfbc2de98 100644 --- a/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaPartitionConsumer.java +++ b/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaPartitionConsumer.java @@ -9,9 +9,12 @@ package org.opensearch.plugin.kafka; import org.apache.kafka.clients.consumer.Consumer; +import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.clients.consumer.OffsetAndTimestamp; +import org.apache.kafka.clients.consumer.OffsetResetStrategy; import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.serialization.ByteArrayDeserializer; @@ -27,6 +30,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.Properties; import java.util.concurrent.TimeoutException; @@ -47,6 +51,7 @@ public class KafkaPartitionConsumer implements IngestionShardConsumer consumer) { this.clientId = clientId; this.consumer = consumer; + this.config = config; String topic = config.getTopic(); List partitionInfos = AccessController.doPrivileged( (PrivilegedAction>) () -> consumer.partitionsFor(topic, Duration.ofMillis(timeoutMillis)) @@ -93,6 +99,9 @@ protected static Consumer createConsumer(String clientId, KafkaS Properties consumerProp = new Properties(); consumerProp.put("bootstrap.servers", config.getBootstrapServers()); consumerProp.put("client.id", clientId); + if (config.getAutoOffsetResetConfig() != null && !config.getAutoOffsetResetConfig().isEmpty()) { + consumerProp.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, config.getAutoOffsetResetConfig()); + } // TODO: why Class org.apache.kafka.common.serialization.StringDeserializer could not be found if set the deserializer as prop? // consumerProp.put("key.deserializer", // "org.apache.kafka.common.serialization.StringDeserializer"); @@ -140,6 +149,43 @@ public IngestionShardPointer latestPointer() { return new KafkaOffset(endOffset); } + @Override + public IngestionShardPointer pointerFromTimestampMillis(long timestampMillis) { + long offset = AccessController.doPrivileged((PrivilegedAction) () -> { + Map position = consumer.offsetsForTimes( + Collections.singletonMap(topicPartition, timestampMillis) + ); + if (position == null || position.isEmpty()) { + return -1L; + } + OffsetAndTimestamp offsetAndTimestamp = position.values().iterator().next(); + if (offsetAndTimestamp == null) { + return -1L; + } + return offsetAndTimestamp.offset(); + }); + if (offset < 0) { + logger.warn("No message found for timestamp {}, fall back to auto.offset.reset policy", timestampMillis); + String autoOffsetResetConfig = config.getAutoOffsetResetConfig(); + if (OffsetResetStrategy.EARLIEST.toString().equals(autoOffsetResetConfig)) { + logger.warn("The auto.offset.reset is set to earliest, seek to earliest pointer"); + return earliestPointer(); + } else if (OffsetResetStrategy.LATEST.toString().equals(autoOffsetResetConfig)) { + logger.warn("The auto.offset.reset is set to latest, seek to latest pointer"); + return latestPointer(); + } else { + throw new IllegalArgumentException("No message found for timestamp " + timestampMillis); + } + } + return new KafkaOffset(offset); + } + + @Override + public IngestionShardPointer pointerFromOffset(String offset) { + long offsetValue = Long.parseLong(offset); + return new KafkaOffset(offsetValue); + } + private synchronized List> fetch(long startOffset, long maxMessages, int timeoutMillis) { if (lastFetchedOffset < 0 || lastFetchedOffset != startOffset - 1) { logger.info("Seeking to offset {}", startOffset); diff --git a/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaSourceConfig.java b/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaSourceConfig.java index 722883d353ebf..cbb8530963ec8 100644 --- a/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaSourceConfig.java +++ b/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaSourceConfig.java @@ -18,9 +18,12 @@ public class KafkaSourceConfig { private final String PROP_TOPIC = "topic"; private final String PROP_BOOTSTRAP_SERVERS = "bootstrap_servers"; + // TODO: support pass any generic kafka configs + private final String PROP_AUTO_OFFSET_RESET = "auto.offset.reset"; private final String topic; private final String bootstrapServers; + private final String autoOffsetResetConfig; /** * Constructor @@ -29,6 +32,7 @@ public class KafkaSourceConfig { public KafkaSourceConfig(Map params) { this.topic = ConfigurationUtils.readStringProperty(params, PROP_TOPIC); this.bootstrapServers = ConfigurationUtils.readStringProperty(params, PROP_BOOTSTRAP_SERVERS); + this.autoOffsetResetConfig = ConfigurationUtils.readOptionalStringProperty(params, PROP_AUTO_OFFSET_RESET); } /** @@ -47,4 +51,13 @@ public String getTopic() { public String getBootstrapServers() { return bootstrapServers; } + + /** + * Get the auto offset reset configuration + * + * @return the auto offset reset configuration + */ + public String getAutoOffsetResetConfig() { + return autoOffsetResetConfig; + } } diff --git a/plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaPartitionConsumerTests.java b/plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaPartitionConsumerTests.java index 96f639366d887..d1d9ad4fbf8ae 100644 --- a/plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaPartitionConsumerTests.java +++ b/plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaPartitionConsumerTests.java @@ -90,6 +90,22 @@ public void testLatestPointer() { assertEquals(10L, offset.getOffset()); } + public void testPointerFromTimestampMillis() { + TopicPartition topicPartition = new TopicPartition("test-topic", 0); + when(mockConsumer.offsetsForTimes(Collections.singletonMap(topicPartition, 1000L))).thenReturn( + Collections.singletonMap(topicPartition, new org.apache.kafka.clients.consumer.OffsetAndTimestamp(5L, 1000L)) + ); + + KafkaOffset offset = (KafkaOffset) consumer.pointerFromTimestampMillis(1000); + + assertEquals(5L, offset.getOffset()); + } + + public void testPointerFromOffset() { + KafkaOffset offset = new KafkaOffset(5L); + assertEquals(5L, offset.getOffset()); + } + public void testTopicDoesNotExist() { Map params = new HashMap<>(); params.put("topic", "non-existent-topic"); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index d50192f106cfe..d4fcadc4ac56d 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -716,8 +716,7 @@ public void validate(final String value, final Map, Object> settings) @Override public void validate(final String value) { - if (!(value.equalsIgnoreCase(StreamPoller.ResetState.LATEST.name()) - || value.equalsIgnoreCase(StreamPoller.ResetState.EARLIEST.name()))) { + if (!isValidResetState(value)) { throw new IllegalArgumentException( "Invalid value for " + SETTING_INGESTION_SOURCE_POINTER_INIT_RESET + " [" + value + "]" ); @@ -725,10 +724,50 @@ public void validate(final String value) { } @Override - public void validate(final String value, final Map, Object> settings) {} + public void validate(final String value, final Map, Object> settings) { + if (isRewindState(value)) { + // Ensure the reset value setting is provided when rewinding. + final String resetValue = (String) settings.get(INGESTION_SOURCE_POINTER_INIT_RESET_VALUE_SETTING); + if (resetValue == null || resetValue.isEmpty()) { + throw new IllegalArgumentException( + "Setting " + + INGESTION_SOURCE_POINTER_INIT_RESET_VALUE_SETTING.getKey() + + " should be set when REWIND_BY_OFFSET or REWIND_BY_TIMESTAMP" + ); + } + } + } + + private boolean isValidResetState(String value) { + return StreamPoller.ResetState.LATEST.name().equalsIgnoreCase(value) + || StreamPoller.ResetState.EARLIEST.name().equalsIgnoreCase(value) + || isRewindState(value); + } + + private boolean isRewindState(String value) { + return StreamPoller.ResetState.REWIND_BY_OFFSET.name().equalsIgnoreCase(value) + || StreamPoller.ResetState.REWIND_BY_TIMESTAMP.name().equalsIgnoreCase(value); + } + + @Override + public Iterator> settings() { + final List> settings = Collections.singletonList(INGESTION_SOURCE_POINTER_INIT_RESET_VALUE_SETTING); + return settings.iterator(); + } }, Property.IndexScope, - Property.Dynamic + Property.Final + ); + + /** + * Defines the setting for the value to be used when resetting by offset or timestamp. + */ + public static final String SETTING_INGESTION_SOURCE_POINTER_INIT_RESET_VALUE = "index.ingestion_source.pointer.init.reset.value"; + public static final Setting INGESTION_SOURCE_POINTER_INIT_RESET_VALUE_SETTING = Setting.simpleString( + SETTING_INGESTION_SOURCE_POINTER_INIT_RESET_VALUE, + "", + Property.IndexScope, + Property.Final ); public static final Setting.AffixSetting INGESTION_SOURCE_PARAMS_SETTING = Setting.prefixKeySetting( @@ -954,7 +993,14 @@ public Version getCreationVersion() { public IngestionSource getIngestionSource() { final String ingestionSourceType = INGESTION_SOURCE_TYPE_SETTING.get(settings); if (ingestionSourceType != null && !(NONE_INGESTION_SOURCE_TYPE.equals(ingestionSourceType))) { - final String pointerInitReset = INGESTION_SOURCE_POINTER_INIT_RESET_SETTING.get(settings); + final StreamPoller.ResetState pointerInitResetType = StreamPoller.ResetState.valueOf( + INGESTION_SOURCE_POINTER_INIT_RESET_SETTING.get(settings).toUpperCase(Locale.ROOT) + ); + final String pointerInitResetValue = INGESTION_SOURCE_POINTER_INIT_RESET_VALUE_SETTING.get(settings); + IngestionSource.PointerInitReset pointerInitReset = new IngestionSource.PointerInitReset( + pointerInitResetType, + pointerInitResetValue + ); final Map ingestionSourceParams = INGESTION_SOURCE_PARAMS_SETTING.getAsMap(settings); return new IngestionSource(ingestionSourceType, pointerInitReset, ingestionSourceParams); } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IngestionSource.java b/server/src/main/java/org/opensearch/cluster/metadata/IngestionSource.java index 583114d9ecbd2..9849c0a5f2ba9 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IngestionSource.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IngestionSource.java @@ -9,6 +9,7 @@ package org.opensearch.cluster.metadata; import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.indices.pollingingest.StreamPoller; import java.util.Map; import java.util.Objects; @@ -19,10 +20,10 @@ @ExperimentalApi public class IngestionSource { private String type; - private String pointerInitReset; + private PointerInitReset pointerInitReset; private Map params; - public IngestionSource(String type, String pointerInitReset, Map params) { + public IngestionSource(String type, PointerInitReset pointerInitReset, Map params) { this.type = type; this.pointerInitReset = pointerInitReset; this.params = params; @@ -32,7 +33,7 @@ public String getType() { return type; } - public String getPointerInitReset() { + public PointerInitReset getPointerInitReset() { return pointerInitReset; } @@ -59,4 +60,44 @@ public int hashCode() { public String toString() { return "IngestionSource{" + "type='" + type + '\'' + ",pointer_init_reset='" + pointerInitReset + '\'' + ", params=" + params + '}'; } + + /** + * Class encapsulating the configuration of a pointer initialization. + */ + @ExperimentalApi + public static class PointerInitReset { + private final StreamPoller.ResetState type; + private final String value; + + public PointerInitReset(StreamPoller.ResetState type, String value) { + this.type = type; + this.value = value; + } + + public StreamPoller.ResetState getType() { + return type; + } + + public String getValue() { + return value; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + PointerInitReset pointerInitReset = (PointerInitReset) o; + return Objects.equals(type, pointerInitReset.type) && Objects.equals(value, pointerInitReset.value); + } + + @Override + public int hashCode() { + return Objects.hash(type, value); + } + + @Override + public String toString() { + return "PointerInitReset{" + "type='" + type + '\'' + ", value=" + value + '}'; + } + } } diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index 0e21104fb6426..946d7fe734deb 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -263,6 +263,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { // Settings for ingestion source IndexMetadata.INGESTION_SOURCE_TYPE_SETTING, IndexMetadata.INGESTION_SOURCE_POINTER_INIT_RESET_SETTING, + IndexMetadata.INGESTION_SOURCE_POINTER_INIT_RESET_VALUE_SETTING, IndexMetadata.INGESTION_SOURCE_PARAMS_SETTING, // validate that built-in similarities don't get redefined diff --git a/server/src/main/java/org/opensearch/index/IngestionShardConsumer.java b/server/src/main/java/org/opensearch/index/IngestionShardConsumer.java index 02a9f5a18ebb1..41e659196a612 100644 --- a/server/src/main/java/org/opensearch/index/IngestionShardConsumer.java +++ b/server/src/main/java/org/opensearch/index/IngestionShardConsumer.java @@ -82,6 +82,22 @@ public M getMessage() { */ IngestionShardPointer latestPointer(); + /** + * Returns an ingestion shard pointer based on the provided timestamp in milliseconds. + * + * @param timestampMillis the timestamp in milliseconds + * @return the ingestion shard pointer corresponding to the given timestamp + */ + IngestionShardPointer pointerFromTimestampMillis(long timestampMillis); + + /** + * Returns an ingestion shard pointer based on the provided offset. + * + * @param offset the offset value + * @return the ingestion shard pointer corresponding to the given offset + */ + IngestionShardPointer pointerFromOffset(String offset); + /** * @return the shard id */ diff --git a/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java b/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java index 58c6371d51c0a..b37281b9d1582 100644 --- a/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java @@ -65,7 +65,6 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Optional; @@ -172,9 +171,7 @@ public void start() { logger.info("created ingestion consumer for shard [{}]", engineConfig.getShardId()); Map commitData = commitDataAsMap(); - StreamPoller.ResetState resetState = StreamPoller.ResetState.valueOf( - ingestionSource.getPointerInitReset().toUpperCase(Locale.ROOT) - ); + StreamPoller.ResetState resetState = ingestionSource.getPointerInitReset().getType(); IngestionShardPointer startPointer = null; Set persistedPointers = new HashSet<>(); if (commitData.containsKey(StreamPoller.BATCH_START)) { @@ -191,7 +188,8 @@ public void start() { resetState = StreamPoller.ResetState.NONE; } - streamPoller = new DefaultStreamPoller(startPointer, persistedPointers, ingestionShardConsumer, this, resetState); + String resetValue = ingestionSource.getPointerInitReset().getValue(); + streamPoller = new DefaultStreamPoller(startPointer, persistedPointers, ingestionShardConsumer, this, resetState, resetValue); streamPoller.start(); } diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java b/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java index b5c1db999544a..884cffec4aad5 100644 --- a/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java +++ b/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java @@ -52,6 +52,7 @@ public class DefaultStreamPoller implements StreamPoller { private IngestionShardPointer batchStartPointer; private ResetState resetState; + private final String resetValue; private Set persistedPointers; @@ -68,14 +69,16 @@ public DefaultStreamPoller( Set persistedPointers, IngestionShardConsumer consumer, IngestionEngine ingestionEngine, - ResetState resetState + ResetState resetState, + String resetValue ) { this( startPointer, persistedPointers, consumer, new MessageProcessorRunnable(new ArrayBlockingQueue<>(100), ingestionEngine), - resetState + resetState, + resetValue ); } @@ -84,10 +87,12 @@ public DefaultStreamPoller( Set persistedPointers, IngestionShardConsumer consumer, MessageProcessorRunnable processorRunnable, - ResetState resetState + ResetState resetState, + String resetValue ) { this.consumer = Objects.requireNonNull(consumer); this.resetState = resetState; + this.resetValue = resetValue; batchStartPointer = startPointer; this.persistedPointers = persistedPointers; if (!this.persistedPointers.isEmpty()) { @@ -151,6 +156,18 @@ protected void startPoll() { batchStartPointer = consumer.latestPointer(); logger.info("Resetting offset by seeking to latest offset {}", batchStartPointer.asString()); break; + case REWIND_BY_OFFSET: + batchStartPointer = consumer.pointerFromOffset(resetValue); + logger.info("Resetting offset by seeking to offset {}", batchStartPointer.asString()); + break; + case REWIND_BY_TIMESTAMP: + batchStartPointer = consumer.pointerFromTimestampMillis(Long.parseLong(resetValue)); + logger.info( + "Resetting offset by seeking to timestamp {}, corresponding offset {}", + resetValue, + batchStartPointer.asString() + ); + break; } resetState = ResetState.NONE; } diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/StreamPoller.java b/server/src/main/java/org/opensearch/indices/pollingingest/StreamPoller.java index f674f6dc55c85..5010982991ecc 100644 --- a/server/src/main/java/org/opensearch/indices/pollingingest/StreamPoller.java +++ b/server/src/main/java/org/opensearch/indices/pollingingest/StreamPoller.java @@ -8,6 +8,7 @@ package org.opensearch.indices.pollingingest; +import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.index.IngestionShardPointer; import java.io.Closeable; @@ -63,9 +64,12 @@ enum State { /** * a reset state to indicate how to reset the pointer */ + @ExperimentalApi enum ResetState { EARLIEST, LATEST, + REWIND_BY_OFFSET, + REWIND_BY_TIMESTAMP, NONE, } } diff --git a/server/src/test/java/org/opensearch/cluster/metadata/IngestionSourceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/IngestionSourceTests.java index f67d13e54e608..0afe67002517b 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/IngestionSourceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/IngestionSourceTests.java @@ -8,6 +8,7 @@ package org.opensearch.cluster.metadata; +import org.opensearch.indices.pollingingest.StreamPoller; import org.opensearch.test.OpenSearchTestCase; import java.util.HashMap; @@ -15,53 +16,60 @@ public class IngestionSourceTests extends OpenSearchTestCase { + private final IngestionSource.PointerInitReset pointerInitReset = new IngestionSource.PointerInitReset( + StreamPoller.ResetState.REWIND_BY_OFFSET, + "1000" + ); + public void testConstructorAndGetters() { Map params = new HashMap<>(); params.put("key", "value"); - IngestionSource source = new IngestionSource("type", "pointerInitReset", params); + IngestionSource source = new IngestionSource("type", pointerInitReset, params); assertEquals("type", source.getType()); - assertEquals("pointerInitReset", source.getPointerInitReset()); + assertEquals(StreamPoller.ResetState.REWIND_BY_OFFSET, source.getPointerInitReset().getType()); + assertEquals("1000", source.getPointerInitReset().getValue()); assertEquals(params, source.params()); } public void testEquals() { Map params1 = new HashMap<>(); params1.put("key", "value"); - IngestionSource source1 = new IngestionSource("type", "pointerInitReset", params1); + IngestionSource source1 = new IngestionSource("type", pointerInitReset, params1); Map params2 = new HashMap<>(); params2.put("key", "value"); - IngestionSource source2 = new IngestionSource("type", "pointerInitReset", params2); + IngestionSource source2 = new IngestionSource("type", pointerInitReset, params2); assertTrue(source1.equals(source2)); assertTrue(source2.equals(source1)); - IngestionSource source3 = new IngestionSource("differentType", "pointerInitReset", params1); + IngestionSource source3 = new IngestionSource("differentType", pointerInitReset, params1); assertFalse(source1.equals(source3)); } public void testHashCode() { Map params1 = new HashMap<>(); params1.put("key", "value"); - IngestionSource source1 = new IngestionSource("type", "pointerInitReset", params1); + IngestionSource source1 = new IngestionSource("type", pointerInitReset, params1); Map params2 = new HashMap<>(); params2.put("key", "value"); - IngestionSource source2 = new IngestionSource("type", "pointerInitReset", params2); + IngestionSource source2 = new IngestionSource("type", pointerInitReset, params2); assertEquals(source1.hashCode(), source2.hashCode()); - IngestionSource source3 = new IngestionSource("differentType", "pointerInitReset", params1); + IngestionSource source3 = new IngestionSource("differentType", pointerInitReset, params1); assertNotEquals(source1.hashCode(), source3.hashCode()); } public void testToString() { Map params = new HashMap<>(); params.put("key", "value"); - IngestionSource source = new IngestionSource("type", "pointerInitReset", params); + IngestionSource source = new IngestionSource("type", pointerInitReset, params); - String expected = "IngestionSource{type='type',pointer_init_reset='pointerInitReset', params={key=value}}"; + String expected = + "IngestionSource{type='type',pointer_init_reset='PointerInitReset{type='REWIND_BY_OFFSET', value=1000}', params={key=value}}"; assertEquals(expected, source.toString()); } } diff --git a/server/src/test/java/org/opensearch/index/engine/FakeIngestionSource.java b/server/src/test/java/org/opensearch/index/engine/FakeIngestionSource.java index de03dcd313c29..1d81a22e94e9c 100644 --- a/server/src/test/java/org/opensearch/index/engine/FakeIngestionSource.java +++ b/server/src/test/java/org/opensearch/index/engine/FakeIngestionSource.java @@ -93,6 +93,16 @@ public FakeIngestionShardPointer latestPointer() { return new FakeIngestionShardPointer(messages.size()); } + @Override + public IngestionShardPointer pointerFromTimestampMillis(long timestampMillis) { + throw new UnsupportedOperationException("Not implemented yet."); + } + + @Override + public IngestionShardPointer pointerFromOffset(String offset) { + return new FakeIngestionShardPointer(Long.parseLong(offset)); + } + @Override public int getShardId() { return shardId; diff --git a/server/src/test/java/org/opensearch/indices/pollingingest/DefaultStreamPollerTests.java b/server/src/test/java/org/opensearch/indices/pollingingest/DefaultStreamPollerTests.java index 1a98f65d04f7c..c17b11791af09 100644 --- a/server/src/test/java/org/opensearch/indices/pollingingest/DefaultStreamPollerTests.java +++ b/server/src/test/java/org/opensearch/indices/pollingingest/DefaultStreamPollerTests.java @@ -20,8 +20,11 @@ import java.util.List; import java.util.Set; import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; @@ -34,7 +37,7 @@ public class DefaultStreamPollerTests extends OpenSearchTestCase { private MessageProcessorRunnable.MessageProcessor processor; private List messages; private Set persistedPointers; - private final int sleepTime = 300; + private final int awaitTime = 300; @Before public void setUp() throws Exception { @@ -52,7 +55,8 @@ public void setUp() throws Exception { persistedPointers, fakeConsumer, processorRunnable, - StreamPoller.ResetState.NONE + StreamPoller.ResetState.NONE, + "" ); } @@ -65,16 +69,32 @@ public void tearDown() throws Exception { } public void testPauseAndResume() throws InterruptedException { + // We'll use a latch that counts the number of messages processed. + CountDownLatch pauseLatch = new CountDownLatch(2); + doAnswer(invocation -> { + pauseLatch.countDown(); + return null; + }).when(processor).process(any(), any()); + poller.pause(); poller.start(); - Thread.sleep(sleepTime); // Allow some time for the poller to run + + // Wait briefly to ensure that no processing occurs. + boolean processedWhilePaused = pauseLatch.await(awaitTime, TimeUnit.MILLISECONDS); + // Expecting the latch NOT to reach zero because we are paused. + assertFalse("Messages should not be processed while paused", processedWhilePaused); assertEquals(DefaultStreamPoller.State.PAUSED, poller.getState()); assertTrue(poller.isPaused()); - // no messages are processed verify(processor, never()).process(any(), any()); + CountDownLatch resumeLatch = new CountDownLatch(2); + doAnswer(invocation -> { + resumeLatch.countDown(); + return null; + }).when(processor).process(any(), any()); + poller.resume(); - Thread.sleep(sleepTime); // Allow some time for the poller to run + resumeLatch.await(); assertFalse(poller.isPaused()); // 2 messages are processed verify(processor, times(2)).process(any(), any()); @@ -90,10 +110,18 @@ public void testSkipProcessed() throws InterruptedException { persistedPointers, fakeConsumer, processorRunnable, - StreamPoller.ResetState.NONE + StreamPoller.ResetState.NONE, + "" ); + + CountDownLatch latch = new CountDownLatch(2); + doAnswer(invocation -> { + latch.countDown(); + return null; + }).when(processor).process(any(), any()); + poller.start(); - Thread.sleep(sleepTime); // Allow some time for the poller to run + latch.await(); // 2 messages are processed, 2 messages are skipped verify(processor, times(2)).process(any(), any()); assertEquals(new FakeIngestionSource.FakeIngestionShardPointer(2), poller.getMaxPersistedPointer()); @@ -106,7 +134,7 @@ public void testCloseWithoutStart() { public void testClose() throws InterruptedException { poller.start(); - Thread.sleep(sleepTime); // Allow some time for the poller to run + waitUntil(() -> poller.getState() == DefaultStreamPoller.State.POLLING, awaitTime, TimeUnit.MILLISECONDS); poller.close(); assertTrue(poller.isClosed()); assertEquals(DefaultStreamPoller.State.CLOSED, poller.getState()); @@ -118,11 +146,17 @@ public void testResetStateEarliest() throws InterruptedException { persistedPointers, fakeConsumer, processorRunnable, - StreamPoller.ResetState.EARLIEST + StreamPoller.ResetState.EARLIEST, + "" ); + CountDownLatch latch = new CountDownLatch(2); + doAnswer(invocation -> { + latch.countDown(); + return null; + }).when(processor).process(any(), any()); poller.start(); - Thread.sleep(sleepTime); // Allow some time for the poller to run + latch.await(); // 2 messages are processed verify(processor, times(2)).process(any(), any()); @@ -134,17 +168,39 @@ public void testResetStateLatest() throws InterruptedException { persistedPointers, fakeConsumer, processorRunnable, - StreamPoller.ResetState.LATEST + StreamPoller.ResetState.LATEST, + "" ); poller.start(); - Thread.sleep(sleepTime); // Allow some time for the poller to run + waitUntil(() -> poller.getState() == DefaultStreamPoller.State.POLLING, awaitTime, TimeUnit.MILLISECONDS); // no messages processed verify(processor, never()).process(any(), any()); // reset to the latest assertEquals(new FakeIngestionSource.FakeIngestionShardPointer(2), poller.getBatchStartPointer()); } + public void testResetStateRewindByOffset() throws InterruptedException { + poller = new DefaultStreamPoller( + new FakeIngestionSource.FakeIngestionShardPointer(2), + persistedPointers, + fakeConsumer, + processorRunnable, + StreamPoller.ResetState.REWIND_BY_OFFSET, + "1" + ); + CountDownLatch latch = new CountDownLatch(1); + doAnswer(invocation -> { + latch.countDown(); + return null; + }).when(processor).process(any(), any()); + + poller.start(); + latch.await(); + // 1 message is processed + verify(processor, times(1)).process(any(), any()); + } + public void testStartPollWithoutStart() { try { poller.startPoll(); @@ -156,7 +212,7 @@ public void testStartPollWithoutStart() { public void testStartClosedPoller() throws InterruptedException { poller.start(); - Thread.sleep(sleepTime); + waitUntil(() -> poller.getState() == DefaultStreamPoller.State.POLLING, awaitTime, TimeUnit.MILLISECONDS); poller.close(); try { poller.startPoll(); From bc209ee6bacbb1027dcd7ba28d56b6ceb96f4fe0 Mon Sep 17 00:00:00 2001 From: Divyansh Pandey <98746046+pandeydivyansh1803@users.noreply.github.com> Date: Tue, 25 Feb 2025 08:39:14 +0530 Subject: [PATCH 026/550] Add new index and cluster level settings to limit the total primary shards per node and per index (#17295) * Added a new index level setting to limit the total primary shards per index per node. Added relevant files for unit test and integration test. Signed-off-by: Divyansh Pandey * update files for code quality Signed-off-by: Divyansh Pandey * moved primary shard count function to RoutingNode.java Signed-off-by: Divyansh Pandey * removed unwanted files Signed-off-by: Divyansh Pandey * added cluster level setting to limit total primary shards per node Signed-off-by: Divyansh Pandey * allow the index level settings to be applied to both DOCUMENT and SEGMENT replication indices Signed-off-by: Divyansh Pandey * Added necessary validator to restrict the index and cluster level primary shards per node settings only for remote store enabled cluster. Added relevant unit and integration tests. Signed-off-by: Divyansh Pandey * refactoring changes Signed-off-by: Divyansh Pandey * refactoring changes Signed-off-by: Divyansh Pandey * Empty commit to rerun gradle test Signed-off-by: Divyansh Pandey * optimised the calculation of total primary shards on a node Signed-off-by: Divyansh Pandey * Refactoring changes Signed-off-by: Divyansh Pandey * refactoring changes, added TODO to MetadataCreateIndexService Signed-off-by: Divyansh Pandey * Added integration test for scenario where primary shards setting is set for cluster which is not remote store enabled Signed-off-by: Divyansh Pandey --------- Signed-off-by: Divyansh Pandey Signed-off-by: Divyansh Pandey <98746046+pandeydivyansh1803@users.noreply.github.com> Co-authored-by: Divyansh Pandey --- CHANGELOG-3.0.md | 1 + .../ShardsLimitAllocationDeciderIT.java | 305 +++++++++++++++ ...AllocationDeciderRemoteStoreEnabledIT.java | 248 +++++++++++++ .../TransportClusterUpdateSettingsAction.java | 32 ++ .../cluster/metadata/IndexMetadata.java | 11 + .../metadata/MetadataCreateIndexService.java | 24 ++ .../MetadataIndexTemplateService.java | 4 + .../MetadataUpdateSettingsService.java | 2 + .../cluster/routing/RoutingNode.java | 113 +++++- .../decider/ShardsLimitAllocationDecider.java | 91 ++++- .../common/settings/ClusterSettings.java | 1 + .../common/settings/IndexScopedSettings.java | 1 + .../MetadataCreateIndexServiceTests.java | 91 +++++ .../MetadataIndexTemplateServiceTests.java | 19 + .../cluster/routing/RoutingNodeTests.java | 55 +++ .../ShardsLimitAllocationDeciderTests.java | 349 ++++++++++++++++++ 16 files changed, 1320 insertions(+), 27 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDeciderIT.java create mode 100644 server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDeciderRemoteStoreEnabledIT.java create mode 100644 server/src/test/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDeciderTests.java diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index e0ac2c3ecd80d..e4ae38e8da2ae 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -17,6 +17,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add systemd configurations to strengthen OS core security ([#17107](https://github.com/opensearch-project/OpenSearch/pull/17107)) - Added pull-based Ingestion (APIs, for ingestion source, a Kafka plugin, and IngestionEngine that pulls data from the ingestion source) ([#16958](https://github.com/opensearch-project/OpenSearch/pull/16958)) - Added ConfigurationUtils to core for the ease of configuration parsing [#17223](https://github.com/opensearch-project/OpenSearch/pull/17223) +- Add cluster and index level settings to limit the total primary shards per node and per index [#17295](https://github.com/opensearch-project/OpenSearch/pull/17295) - Add execution_hint to cardinality aggregator request (#[17312](https://github.com/opensearch-project/OpenSearch/pull/17312)) - Arrow Flight RPC plugin with Flight server bootstrap logic and client for internode communication ([#16962](https://github.com/opensearch-project/OpenSearch/pull/16962)) - Added offset management for the pull-based Ingestion ([#17354](https://github.com/opensearch-project/OpenSearch/pull/17354)) diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDeciderIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDeciderIT.java new file mode 100644 index 0000000000000..fdc6a7e6b96b2 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDeciderIT.java @@ -0,0 +1,305 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing.allocation.decider; + +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.IndexShardRoutingTable; +import org.opensearch.cluster.routing.RoutingNode; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider.CLUSTER_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING; +import static org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING; +import static org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider.INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING; +import static org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 3) +public class ShardsLimitAllocationDeciderIT extends OpenSearchIntegTestCase { + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal)).build(); + } + + public void testClusterWideShardsLimit() { + // Set the cluster-wide shard limit to 2 + updateClusterSetting(CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), 4); + + // Create the first two indices with 3 shards and 1 replica each + createIndex("test1", Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 3).put(SETTING_NUMBER_OF_REPLICAS, 1).build()); + createIndex("test2", Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 3).put(SETTING_NUMBER_OF_REPLICAS, 1).build()); + + // Create the third index with 2 shards and 1 replica + createIndex("test3", Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 2).put(SETTING_NUMBER_OF_REPLICAS, 1).build()); + + // Wait for the shard limit to be applied + try { + assertBusy(() -> { + ClusterState state = client().admin().cluster().prepareState().get().getState(); + + // Check total number of shards + assertEquals(16, state.getRoutingTable().allShards().size()); + + // Check number of unassigned shards + int unassignedShards = state.getRoutingTable().shardsWithState(ShardRoutingState.UNASSIGNED).size(); + assertEquals(4, unassignedShards); + + // Check shard distribution across nodes + for (RoutingNode routingNode : state.getRoutingNodes()) { + assertTrue("Node exceeds shard limit", routingNode.numberOfOwningShards() <= 4); + } + }); + } catch (Exception e) { + throw new RuntimeException(e); + } + + // Additional assertions to verify shard distribution + ClusterState state = client().admin().cluster().prepareState().get().getState(); + int totalAssignedShards = 0; + for (RoutingNode routingNode : state.getRoutingNodes()) { + totalAssignedShards += routingNode.numberOfOwningShards(); + } + assertEquals("Total assigned shards should be 12", 12, totalAssignedShards); + + } + + public void testIndexSpecificShardLimit() { + // Set the index-specific shard limit to 2 for the first index only + Settings indexSettingsWithLimit = Settings.builder() + .put(SETTING_NUMBER_OF_SHARDS, 4) + .put(SETTING_NUMBER_OF_REPLICAS, 1) + .put(INDEX_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), 2) + .build(); + + Settings indexSettingsWithoutLimit = Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 4).put(SETTING_NUMBER_OF_REPLICAS, 1).build(); + + // Create the first index with 4 shards, 1 replica, and the index-specific limit + createIndex("test1", indexSettingsWithLimit); + + // Create the second index with 4 shards and 1 replica, without the index-specific limit + createIndex("test2", indexSettingsWithoutLimit); + + // Create the third index with 3 shards and 1 replica, without the index-specific limit + createIndex("test3", Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 3).put(SETTING_NUMBER_OF_REPLICAS, 1).build()); + + try { + // Wait for the shard limit to be applied + assertBusy(() -> { + ClusterState state = client().admin().cluster().prepareState().get().getState(); + + // Check total number of shards + assertEquals(22, state.getRoutingTable().allShards().size()); + + // Check total number of assigned and unassigned shards + int totalAssignedShards = 0; + int totalUnassignedShards = 0; + Map unassignedShardsByIndex = new HashMap<>(); + + for (IndexRoutingTable indexRoutingTable : state.getRoutingTable()) { + String index = indexRoutingTable.getIndex().getName(); + int indexUnassignedShards = 0; + + for (IndexShardRoutingTable shardRoutingTable : indexRoutingTable) { + for (ShardRouting shardRouting : shardRoutingTable) { + if (shardRouting.unassigned()) { + totalUnassignedShards++; + indexUnassignedShards++; + } else { + totalAssignedShards++; + } + } + } + + unassignedShardsByIndex.put(index, indexUnassignedShards); + } + + assertEquals("Total assigned shards should be 20", 20, totalAssignedShards); + assertEquals("Total unassigned shards should be 2", 2, totalUnassignedShards); + + // Check unassigned shards for each index + assertEquals("test1 should have 2 unassigned shards", 2, unassignedShardsByIndex.get("test1").intValue()); + assertEquals("test2 should have 0 unassigned shards", 0, unassignedShardsByIndex.get("test2").intValue()); + assertEquals("test3 should have 0 unassigned shards", 0, unassignedShardsByIndex.get("test3").intValue()); + }); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public void testCombinedClusterAndIndexSpecificShardLimits() { + // Set the cluster-wide shard limit to 6 + updateClusterSetting(CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), 6); + + // Create the first index with 3 shards, 1 replica, and index-specific limit of 1 + Settings indexSettingsWithLimit = Settings.builder() + .put(SETTING_NUMBER_OF_SHARDS, 3) + .put(SETTING_NUMBER_OF_REPLICAS, 1) + .put(INDEX_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), 1) + .build(); + createIndex("test1", indexSettingsWithLimit); + + // Create the second index with 4 shards and 1 replica + createIndex("test2", Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 4).put(SETTING_NUMBER_OF_REPLICAS, 1).build()); + + // Create the third index with 3 shards and 1 replica + createIndex("test3", Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 3).put(SETTING_NUMBER_OF_REPLICAS, 1).build()); + + try { + assertBusy(() -> { + ClusterState state = client().admin().cluster().prepareState().get().getState(); + + // Check total number of shards + assertEquals("Total shards should be 20", 20, state.getRoutingTable().allShards().size()); + + int totalAssignedShards = 0; + int totalUnassignedShards = 0; + Map unassignedShardsByIndex = new HashMap<>(); + Map nodeShardCounts = new HashMap<>(); + Map> indexShardsPerNode = new HashMap<>(); + + for (RoutingNode routingNode : state.getRoutingNodes()) { + String nodeName = routingNode.node().getName(); + nodeShardCounts.put(nodeName, routingNode.numberOfOwningShards()); + indexShardsPerNode.put(nodeName, new HashSet<>()); + + for (ShardRouting shardRouting : routingNode) { + indexShardsPerNode.get(nodeName).add(shardRouting.getIndexName()); + } + } + + for (IndexRoutingTable indexRoutingTable : state.getRoutingTable()) { + String index = indexRoutingTable.getIndex().getName(); + int indexUnassignedShards = 0; + + for (IndexShardRoutingTable shardRoutingTable : indexRoutingTable) { + for (ShardRouting shardRouting : shardRoutingTable) { + if (shardRouting.unassigned()) { + totalUnassignedShards++; + indexUnassignedShards++; + } else { + totalAssignedShards++; + } + } + } + + unassignedShardsByIndex.put(index, indexUnassignedShards); + } + + assertEquals("Total assigned shards should be 17", 17, totalAssignedShards); + assertEquals("Total unassigned shards should be 3", 3, totalUnassignedShards); + assertEquals("test1 should have 3 unassigned shards", 3, unassignedShardsByIndex.get("test1").intValue()); + assertEquals("test2 should have 0 unassigned shards", 0, unassignedShardsByIndex.getOrDefault("test2", 0).intValue()); + assertEquals("test3 should have 0 unassigned shards", 0, unassignedShardsByIndex.getOrDefault("test3", 0).intValue()); + + // Check shard distribution across nodes + List shardCounts = new ArrayList<>(nodeShardCounts.values()); + Collections.sort(shardCounts, Collections.reverseOrder()); + assertEquals("Two nodes should have 6 shards", 6, shardCounts.get(0).intValue()); + assertEquals("Two nodes should have 6 shards", 6, shardCounts.get(1).intValue()); + assertEquals("One node should have 5 shards", 5, shardCounts.get(2).intValue()); + + // Check that all nodes have only one shard of the first index + for (Set indexesOnNode : indexShardsPerNode.values()) { + assertTrue("Each node should have a shard from test1", indexesOnNode.contains("test1")); + } + }); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + /** + * Integration test to verify the behavior of INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING + * in a non-remote store environment. + * + * Scenario: + * An end-user attempts to create an index with INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING + * on a cluster where remote store is not enabled. + * + * Expected Outcome: + * The system should reject the index creation request and throw an appropriate exception, + * indicating that this setting is only applicable for remote store enabled clusters. + */ + public void testIndexTotalPrimaryShardsPerNodeSettingWithoutRemoteStore() { + // Attempt to create an index with INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING + Settings indexSettings = Settings.builder() + .put(SETTING_NUMBER_OF_SHARDS, 3) + .put(SETTING_NUMBER_OF_REPLICAS, 1) + .put(INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey(), 1) + .build(); + + // Assert that creating the index throws an exception + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> { createIndex("test_index", indexSettings); } + ); + + // Verify the exception message + assertTrue( + "Exception should mention that the setting requires remote store", + exception.getMessage() + .contains( + "Setting [index.routing.allocation.total_primary_shards_per_node] can only be used with remote store enabled clusters" + ) + ); + } + + /** + * Integration test to verify the behavior of CLUSTER_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING + * in a non-remote store environment. + * + * Scenario: + * An end-user attempts to create an index with CLUSTER_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING + * on a cluster where remote store is not enabled. + * + * Expected Outcome: + * The system should reject the index creation request and throw an appropriate exception, + * indicating that this setting is only applicable for remote store enabled clusters. + */ + public void testClusterTotalPrimaryShardsPerNodeSettingWithoutRemoteStore() { + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> { + updateClusterSetting(CLUSTER_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey(), 1); + }); + + // Verify the exception message + assertTrue( + "Exception should mention that the setting requires remote store", + exception.getMessage() + .contains( + "Setting [cluster.routing.allocation.total_primary_shards_per_node] can only be used with remote store enabled clusters" + ) + ); + + // Attempt to create an index with INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING + Settings indexSettings = Settings.builder() + .put(SETTING_NUMBER_OF_SHARDS, 3) + .put(SETTING_NUMBER_OF_REPLICAS, 1) + .put(INDEX_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), 1) + .build(); + + createIndex("test_index", indexSettings); + } + + private void updateClusterSetting(String setting, int value) { + client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(setting, value)).get(); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDeciderRemoteStoreEnabledIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDeciderRemoteStoreEnabledIT.java new file mode 100644 index 0000000000000..401db7790de92 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDeciderRemoteStoreEnabledIT.java @@ -0,0 +1,248 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing.allocation.decider; + +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.routing.IndexShardRoutingTable; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.settings.Settings; +import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.Before; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider.CLUSTER_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING; +import static org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider.INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class ShardsLimitAllocationDeciderRemoteStoreEnabledIT extends RemoteStoreBaseIntegTestCase { + @Before + public void setup() { + setupCustomCluster(); + } + + private void setupCustomCluster() { + // Start cluster manager node first + String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(Settings.EMPTY); + // Start data nodes + List dataNodes = internalCluster().startDataOnlyNodes(3); + // Wait for green cluster state + ensureGreen(); + } + + public void testIndexPrimaryShardLimit() throws Exception { + // Create first index with primary shard limit + Settings firstIndexSettings = Settings.builder() + .put(remoteStoreIndexSettings(0, 4)) // 4 shards, 0 replicas + .put(INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey(), 1) + .build(); + + // Create first index + createIndex("test1", firstIndexSettings); + + // Create second index + createIndex("test2", remoteStoreIndexSettings(0, 4)); + + assertBusy(() -> { + ClusterState state = client().admin().cluster().prepareState().get().getState(); + + // Check total number of shards (8 total: 4 from each index) + assertEquals("Total shards should be 8", 8, state.getRoutingTable().allShards().size()); + + // Count assigned and unassigned shards for test1 + int test1AssignedShards = 0; + int test1UnassignedShards = 0; + Map nodePrimaryCount = new HashMap<>(); + + // Check test1 shard distribution + for (IndexShardRoutingTable shardRouting : state.routingTable().index("test1")) { + for (ShardRouting shard : shardRouting) { + if (shard.assignedToNode()) { + test1AssignedShards++; + // Count primaries per node for test1 + String nodeId = shard.currentNodeId(); + nodePrimaryCount.merge(nodeId, 1, Integer::sum); + } else { + test1UnassignedShards++; + } + } + } + + // Check test2 shard assignment + int test2UnassignedShards = 0; + for (IndexShardRoutingTable shardRouting : state.routingTable().index("test2")) { + for (ShardRouting shard : shardRouting) { + if (!shard.assignedToNode()) { + test2UnassignedShards++; + } + } + } + + // Assertions + assertEquals("test1 should have 3 assigned shards", 3, test1AssignedShards); + assertEquals("test1 should have 1 unassigned shard", 1, test1UnassignedShards); + assertEquals("test2 should have no unassigned shards", 0, test2UnassignedShards); + + // Verify no node has more than one primary shard of test1 + for (Integer count : nodePrimaryCount.values()) { + assertTrue("No node should have more than 1 primary shard of test1", count <= 1); + } + }); + } + + public void testClusterPrimaryShardLimitss() throws Exception { + // Update cluster setting to limit primary shards per node + updateClusterSetting(CLUSTER_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey(), 1); + + // Create index with 4 shards and 1 replica + createIndex("test1", remoteStoreIndexSettings(1, 4)); + + assertBusy(() -> { + ClusterState state = client().admin().cluster().prepareState().get().getState(); + + // Check total number of shards (8 total: 4 primaries + 4 replicas) + assertEquals("Total shards should be 8", 8, state.getRoutingTable().allShards().size()); + + // Count assigned and unassigned shards for test1 + int assignedShards = 0; + int unassignedShards = 0; + int unassignedPrimaries = 0; + int unassignedReplicas = 0; + Map nodePrimaryCount = new HashMap<>(); + + // Check shard distribution + for (IndexShardRoutingTable shardRouting : state.routingTable().index("test1")) { + for (ShardRouting shard : shardRouting) { + if (shard.assignedToNode()) { + assignedShards++; + if (shard.primary()) { + // Count primaries per node + String nodeId = shard.currentNodeId(); + nodePrimaryCount.merge(nodeId, 1, Integer::sum); + } + } else { + unassignedShards++; + if (shard.primary()) { + unassignedPrimaries++; + } else { + unassignedReplicas++; + } + } + } + } + + // Assertions + assertEquals("Should have 6 assigned shards", 6, assignedShards); + assertEquals("Should have 2 unassigned shards", 2, unassignedShards); + assertEquals("Should have 1 unassigned primary", 1, unassignedPrimaries); + assertEquals("Should have 1 unassigned replica", 1, unassignedReplicas); + + // Verify no node has more than one primary shard + for (Integer count : nodePrimaryCount.values()) { + assertTrue("No node should have more than 1 primary shard", count <= 1); + } + }); + } + + public void testCombinedIndexAndClusterPrimaryShardLimits() throws Exception { + // Set cluster-wide primary shard limit to 3 + updateClusterSetting(CLUSTER_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey(), 3); + + // Create first index with index-level primary shard limit + Settings firstIndexSettings = Settings.builder() + .put(remoteStoreIndexSettings(1, 4)) // 4 shards, 1 replica + .put(INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey(), 1) + .build(); + + // Create first index + createIndex("test1", firstIndexSettings); + + // Create second index with no index-level limits + createIndex("test2", remoteStoreIndexSettings(1, 4)); // 4 shards, 1 replica + + assertBusy(() -> { + ClusterState state = client().admin().cluster().prepareState().get().getState(); + + // Check total number of shards (16 total: 8 from each index - 4 primaries + 4 replicas each) + assertEquals("Total shards should be 16", 16, state.getRoutingTable().allShards().size()); + + // Count assigned and unassigned shards for both indices + int totalAssignedShards = 0; + int test1UnassignedPrimaries = 0; + int test1UnassignedReplicas = 0; + int test2UnassignedShards = 0; + Map nodePrimaryCount = new HashMap<>(); + + // Check test1 shard distribution + for (IndexShardRoutingTable shardRouting : state.routingTable().index("test1")) { + for (ShardRouting shard : shardRouting) { + if (shard.assignedToNode()) { + totalAssignedShards++; + if (shard.primary()) { + String nodeId = shard.currentNodeId(); + nodePrimaryCount.merge(nodeId, 1, Integer::sum); + } + } else { + if (shard.primary()) { + test1UnassignedPrimaries++; + } else { + test1UnassignedReplicas++; + } + } + } + } + + // Check test2 shard distribution + for (IndexShardRoutingTable shardRouting : state.routingTable().index("test2")) { + for (ShardRouting shard : shardRouting) { + if (shard.assignedToNode()) { + totalAssignedShards++; + if (shard.primary()) { + String nodeId = shard.currentNodeId(); + nodePrimaryCount.merge(nodeId, 1, Integer::sum); + } + } else { + test2UnassignedShards++; + } + } + } + + // Assertions + assertEquals("Should have 14 assigned shards", 14, totalAssignedShards); + assertEquals("Should have 1 unassigned primary in test1", 1, test1UnassignedPrimaries); + assertEquals("Should have 1 unassigned replica in test1", 1, test1UnassignedReplicas); + assertEquals("Should have no unassigned shards in test2", 0, test2UnassignedShards); + + // Verify no node has more than one primary shard for test1 + for (IndexShardRoutingTable shardRouting : state.routingTable().index("test1")) { + Map test1NodePrimaryCount = new HashMap<>(); + for (ShardRouting shard : shardRouting) { + if (shard.assignedToNode() && shard.primary()) { + test1NodePrimaryCount.merge(shard.currentNodeId(), 1, Integer::sum); + } + } + for (Integer count : test1NodePrimaryCount.values()) { + assertTrue("No node should have more than 1 primary shard of test1", count <= 1); + } + } + + // Verify no node has more than three primary shards total (cluster-wide limit) + for (Integer count : nodePrimaryCount.values()) { + assertTrue("No node should have more than 3 primary shards total", count <= 3); + } + }); + } + + private void updateClusterSetting(String setting, int value) { + client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(setting, value)).get(); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java index 3988d50b2ce1e..60c04d5a620f8 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java @@ -64,6 +64,7 @@ import java.io.IOException; +import static org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider.CLUSTER_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING; import static org.opensearch.index.remote.RemoteStoreUtils.checkAndFinalizeRemoteStoreMigration; /** @@ -257,6 +258,7 @@ public void onFailure(String source, Exception e) { @Override public ClusterState execute(final ClusterState currentState) { + validateClusterTotalPrimaryShardsPerNodeSetting(currentState, request); boolean isCompatibilityModeChanging = validateCompatibilityModeSettingRequest(request, state); ClusterState clusterState = updater.updateSettings( currentState, @@ -324,4 +326,34 @@ private void validateAllNodesOfSameType(DiscoveryNodes discoveryNodes) { ); } } + + private void validateClusterTotalPrimaryShardsPerNodeSetting(ClusterState currentState, ClusterUpdateSettingsRequest request) { + if (request.transientSettings().hasValue(CLUSTER_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey()) + || request.persistentSettings().hasValue(CLUSTER_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey())) { + + Settings settings = Settings.builder().put(request.transientSettings()).put(request.persistentSettings()).build(); + + int newValue = CLUSTER_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.get(settings); + + // If default value (-1), no validation needed + if (newValue == -1) { + return; + } + + // Check current state + boolean allNodesRemoteStoreEnabled = currentState.nodes() + .getNodes() + .values() + .stream() + .allMatch(discoveryNode -> discoveryNode.isRemoteStoreNode()); + + if (!allNodesRemoteStoreEnabled) { + throw new IllegalArgumentException( + "Setting [" + + CLUSTER_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey() + + "] can only be used with remote store enabled clusters" + ); + } + } + } } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index d4fcadc4ac56d..cabea0efe8433 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -856,6 +856,7 @@ public Iterator> settings() { private final boolean isRemoteSnapshot; private final int indexTotalShardsPerNodeLimit; + private final int indexTotalPrimaryShardsPerNodeLimit; private final boolean isAppendOnlyIndex; private final Context context; @@ -888,6 +889,7 @@ private IndexMetadata( final Map rolloverInfos, final boolean isSystem, final int indexTotalShardsPerNodeLimit, + final int indexTotalPrimaryShardsPerNodeLimit, boolean isAppendOnlyIndex, final Context context ) { @@ -926,6 +928,7 @@ private IndexMetadata( this.isSystem = isSystem; this.isRemoteSnapshot = IndexModule.Type.REMOTE_SNAPSHOT.match(this.settings); this.indexTotalShardsPerNodeLimit = indexTotalShardsPerNodeLimit; + this.indexTotalPrimaryShardsPerNodeLimit = indexTotalPrimaryShardsPerNodeLimit; this.isAppendOnlyIndex = isAppendOnlyIndex; this.context = context; assert numberOfShards * routingFactor == routingNumShards : routingNumShards + " must be a multiple of " + numberOfShards; @@ -1115,6 +1118,10 @@ public int getIndexTotalShardsPerNodeLimit() { return this.indexTotalShardsPerNodeLimit; } + public int getIndexTotalPrimaryShardsPerNodeLimit() { + return this.indexTotalPrimaryShardsPerNodeLimit; + } + public boolean isAppendOnlyIndex() { return this.isAppendOnlyIndex; } @@ -1913,6 +1920,9 @@ public IndexMetadata build() { } final int indexTotalShardsPerNodeLimit = ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING.get(settings); + final int indexTotalPrimaryShardsPerNodeLimit = ShardsLimitAllocationDecider.INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.get( + settings + ); final boolean isAppendOnlyIndex = INDEX_APPEND_ONLY_ENABLED_SETTING.get(settings); final String uuid = settings.get(SETTING_INDEX_UUID, INDEX_UUID_NA_VALUE); @@ -1945,6 +1955,7 @@ public IndexMetadata build() { rolloverInfos, isSystem, indexTotalShardsPerNodeLimit, + indexTotalPrimaryShardsPerNodeLimit, isAppendOnlyIndex, context ); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index a2e1ca440512d..a81fe01f0e7f4 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -155,6 +155,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; import static org.opensearch.cluster.metadata.Metadata.DEFAULT_REPLICA_COUNT_SETTING; import static org.opensearch.cluster.metadata.MetadataIndexTemplateService.findContextTemplateName; +import static org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider.INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING; import static org.opensearch.index.IndexModule.INDEX_STORE_TYPE_SETTING; import static org.opensearch.index.IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; @@ -1094,6 +1095,7 @@ static Settings aggregateIndexSettings( if (FeatureFlags.isEnabled(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL_SETTING)) { validateSearchOnlyReplicasSettings(indexSettings); } + validateIndexTotalPrimaryShardsPerNodeSetting(indexSettings); return indexSettings; } @@ -1844,6 +1846,28 @@ public static void validateRefreshIntervalSettings(Settings requestSettings, Clu } } + /** + * Validates {@code index.routing.allocation.total_primary_shards_per_node} is only set for remote store enabled cluster + */ + // TODO : Update this check for SegRep to DocRep migration on need basis + public static void validateIndexTotalPrimaryShardsPerNodeSetting(Settings indexSettings) { + // Get the setting value + int indexPrimaryShardsPerNode = INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.get(indexSettings); + + // If default value (-1), no validation needed + if (indexPrimaryShardsPerNode == -1) { + return; + } + + // Check if remote store is enabled + boolean isRemoteStoreEnabled = IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING.get(indexSettings); + if (!isRemoteStoreEnabled) { + throw new IllegalArgumentException( + "Setting [" + INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey() + "] can only be used with remote store enabled clusters" + ); + } + } + /** * Validates {@code index.translog.durability} is not async if the {@code cluster.remote_store.index.restrict.async-durability} is set to true. * diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java index 5d20388b74e1f..b032ade720612 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java @@ -101,6 +101,7 @@ import java.util.stream.Collectors; import static org.opensearch.cluster.metadata.MetadataCreateDataStreamService.validateTimestampFieldMapping; +import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateIndexTotalPrimaryShardsPerNodeSetting; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateRefreshIntervalSettings; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateTranslogFlushIntervalSettingsForCompositeIndex; import static org.opensearch.common.util.concurrent.ThreadContext.ACTION_ORIGIN_TRANSIENT_NAME; @@ -1642,6 +1643,9 @@ private void validate(String name, @Nullable Settings settings, List ind validateRefreshIntervalSettings(settings, clusterService.getClusterSettings()); validateTranslogFlushIntervalSettingsForCompositeIndex(settings, clusterService.getClusterSettings()); validateTranslogDurabilitySettingsInTemplate(settings, clusterService.getClusterSettings()); + + // validate index total primary shards per node setting + validateIndexTotalPrimaryShardsPerNodeSetting(settings); } if (indexPatterns.stream().anyMatch(Regex::isMatchAllPattern)) { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java index a35af0e607c31..eb10fd5d04288 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java @@ -78,6 +78,7 @@ import static org.opensearch.action.support.ContextPreservingActionListener.wrapPreservingContext; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; +import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateIndexTotalPrimaryShardsPerNodeSetting; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateOverlap; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateRefreshIntervalSettings; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateTranslogDurabilitySettings; @@ -139,6 +140,7 @@ public void updateSettings( validateRefreshIntervalSettings(normalizedSettings, clusterService.getClusterSettings()); validateTranslogDurabilitySettings(normalizedSettings, clusterService.getClusterSettings(), clusterService.getSettings()); + validateIndexTotalPrimaryShardsPerNodeSetting(normalizedSettings); final int defaultReplicaCount = clusterService.getClusterSettings().get(Metadata.DEFAULT_REPLICA_COUNT_SETTING); Settings.Builder settingsForClosedIndices = Settings.builder(); diff --git a/server/src/main/java/org/opensearch/cluster/routing/RoutingNode.java b/server/src/main/java/org/opensearch/cluster/routing/RoutingNode.java index 24c3077960444..15f1b99ac2754 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RoutingNode.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RoutingNode.java @@ -124,6 +124,62 @@ public Iterator iterator() { Collections.unmodifiableCollection(this.shardTuple.v2().values()).stream() ).iterator(); } + + public int numberOfPrimaryShards() { + return this.shardTuple.v1().size(); + } + } + + static class RelocatingShardsBucket { + private final LinkedHashSet relocatingShards; + private final LinkedHashSet relocatingPrimaryShards; + + RelocatingShardsBucket() { + relocatingShards = new LinkedHashSet<>(); + relocatingPrimaryShards = new LinkedHashSet<>(); + } + + public boolean add(ShardRouting shard) { + boolean res = relocatingShards.add(shard); + if (shard.primary()) { + relocatingPrimaryShards.add(shard); + } + return res; + } + + public boolean remove(ShardRouting shard) { + boolean res = relocatingShards.remove(shard); + relocatingPrimaryShards.remove(shard); + return res; + } + + public int size() { + return relocatingShards.size(); + } + + public int primarySize() { + return relocatingPrimaryShards.size(); + } + + public Set getRelocatingShards() { + return Collections.unmodifiableSet(relocatingShards); + } + + public Set getRelocatingPrimaryShards() { + return Collections.unmodifiableSet(relocatingPrimaryShards); + } + + public List getRelocatingShardsList() { + return new ArrayList<>(relocatingShards); + } + + // For assertions/verification + public boolean invariant() { + assert relocatingShards.containsAll(relocatingPrimaryShards); + assert relocatingPrimaryShards.stream().allMatch(ShardRouting::primary); + assert relocatingPrimaryShards.size() == relocatingShards.stream().filter(ShardRouting::primary).count(); + return true; + } } private final String nodeId; @@ -132,9 +188,9 @@ public Iterator iterator() { private final BucketedShards shards; - private final LinkedHashSet initializingShards; + private final RelocatingShardsBucket relocatingShardsBucket; - private final LinkedHashSet relocatingShards; + private final LinkedHashSet initializingShards; private final HashMap> shardsByIndex; @@ -144,7 +200,7 @@ public RoutingNode(String nodeId, DiscoveryNode node, ShardRouting... shardRouti final LinkedHashMap primaryShards = new LinkedHashMap<>(); final LinkedHashMap replicaShards = new LinkedHashMap<>(); this.shards = new BucketedShards(primaryShards, replicaShards); - this.relocatingShards = new LinkedHashSet<>(); + this.relocatingShardsBucket = new RelocatingShardsBucket(); this.initializingShards = new LinkedHashSet<>(); this.shardsByIndex = new LinkedHashMap<>(); @@ -152,7 +208,7 @@ public RoutingNode(String nodeId, DiscoveryNode node, ShardRouting... shardRouti if (shardRouting.initializing()) { initializingShards.add(shardRouting); } else if (shardRouting.relocating()) { - relocatingShards.add(shardRouting); + relocatingShardsBucket.add(shardRouting); } shardsByIndex.computeIfAbsent(shardRouting.index(), k -> new LinkedHashSet<>()).add(shardRouting); @@ -231,7 +287,7 @@ void add(ShardRouting shard) { if (shard.initializing()) { initializingShards.add(shard); } else if (shard.relocating()) { - relocatingShards.add(shard); + relocatingShardsBucket.add(shard); } shardsByIndex.computeIfAbsent(shard.index(), k -> new LinkedHashSet<>()).add(shard); assert invariant(); @@ -251,7 +307,7 @@ void update(ShardRouting oldShard, ShardRouting newShard) { boolean exist = initializingShards.remove(oldShard); assert exist : "expected shard " + oldShard + " to exist in initializingShards"; } else if (oldShard.relocating()) { - boolean exist = relocatingShards.remove(oldShard); + boolean exist = relocatingShardsBucket.remove(oldShard); assert exist : "expected shard " + oldShard + " to exist in relocatingShards"; } shardsByIndex.get(oldShard.index()).remove(oldShard); @@ -261,7 +317,7 @@ void update(ShardRouting oldShard, ShardRouting newShard) { if (newShard.initializing()) { initializingShards.add(newShard); } else if (newShard.relocating()) { - relocatingShards.add(newShard); + relocatingShardsBucket.add(newShard); } shardsByIndex.computeIfAbsent(newShard.index(), k -> new LinkedHashSet<>()).add(newShard); assert invariant(); @@ -275,7 +331,7 @@ void remove(ShardRouting shard) { boolean exist = initializingShards.remove(shard); assert exist : "expected shard " + shard + " to exist in initializingShards"; } else if (shard.relocating()) { - boolean exist = relocatingShards.remove(shard); + boolean exist = relocatingShardsBucket.remove(shard); assert exist : "expected shard " + shard + " to exist in relocatingShards"; } shardsByIndex.get(shard.index()).remove(shard); @@ -295,7 +351,7 @@ public int numberOfShardsWithState(ShardRoutingState... states) { if (states[0] == ShardRoutingState.INITIALIZING) { return initializingShards.size(); } else if (states[0] == ShardRoutingState.RELOCATING) { - return relocatingShards.size(); + return relocatingShardsBucket.size(); } } @@ -320,7 +376,7 @@ public List shardsWithState(ShardRoutingState... states) { if (states[0] == ShardRoutingState.INITIALIZING) { return new ArrayList<>(initializingShards); } else if (states[0] == ShardRoutingState.RELOCATING) { - return new ArrayList<>(relocatingShards); + return relocatingShardsBucket.getRelocatingShardsList(); } } @@ -354,7 +410,7 @@ public List shardsWithState(String index, ShardRoutingState... sta } return shards; } else if (states[0] == ShardRoutingState.RELOCATING) { - for (ShardRouting shardEntry : relocatingShards) { + for (ShardRouting shardEntry : relocatingShardsBucket.getRelocatingShards()) { if (shardEntry.getIndexName().equals(index) == false) { continue; } @@ -381,7 +437,11 @@ public List shardsWithState(String index, ShardRoutingState... sta * The number of shards on this node that will not be eventually relocated. */ public int numberOfOwningShards() { - return shards.size() - relocatingShards.size(); + return shards.size() - relocatingShardsBucket.size(); + } + + public int numberOfOwningPrimaryShards() { + return shards.numberOfPrimaryShards() - relocatingShardsBucket.primarySize(); } public int numberOfOwningShardsForIndex(final Index index) { @@ -393,6 +453,20 @@ public int numberOfOwningShardsForIndex(final Index index) { } } + public int numberOfOwningPrimaryShardsForIndex(final Index index) { + final LinkedHashSet shardRoutings = shardsByIndex.get(index); + if (shardRoutings == null) { + return 0; + } else { + return Math.toIntExact( + shardRoutings.stream() + .filter(sr -> sr.relocating() == false) + .filter(ShardRouting::primary) // Add this filter for primary shards + .count() + ); + } + } + public String prettyPrint() { StringBuilder sb = new StringBuilder(); sb.append("-----node_id[").append(nodeId).append("][").append(node == null ? "X" : "V").append("]\n"); @@ -441,8 +515,19 @@ private boolean invariant() { Collection shardRoutingsRelocating = StreamSupport.stream(shards.spliterator(), false) .filter(ShardRouting::relocating) .collect(Collectors.toList()); - assert relocatingShards.size() == shardRoutingsRelocating.size(); - assert relocatingShards.containsAll(shardRoutingsRelocating); + assert relocatingShardsBucket.getRelocatingShards().size() == shardRoutingsRelocating.size(); + assert relocatingShardsBucket.getRelocatingShards().containsAll(shardRoutingsRelocating); + + // relocatingPrimaryShards must be consistent with primary shards that are relocating + Collection primaryShardRoutingsRelocating = StreamSupport.stream(shards.spliterator(), false) + .filter(ShardRouting::relocating) + .filter(ShardRouting::primary) + .collect(Collectors.toList()); + assert relocatingShardsBucket.getRelocatingPrimaryShards().size() == primaryShardRoutingsRelocating.size(); + assert relocatingShardsBucket.getRelocatingPrimaryShards().containsAll(primaryShardRoutingsRelocating); + + // relocatingPrimaryShards and relocatingShards should be consistent + assert relocatingShardsBucket.invariant(); final Map> shardRoutingsByIndex = StreamSupport.stream(shards.spliterator(), false) .collect(Collectors.groupingBy(ShardRouting::index, Collectors.toSet())); diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java index 6f211f370de95..ad77aed4e4fd5 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java @@ -32,6 +32,7 @@ package org.opensearch.cluster.routing.allocation.decider; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; @@ -46,13 +47,14 @@ /** * This {@link AllocationDecider} limits the number of shards per node on a per * index or node-wide basis. The allocator prevents a single node to hold more - * than {@code index.routing.allocation.total_shards_per_node} per index and - * {@code cluster.routing.allocation.total_shards_per_node} globally during the allocation + * than {@code index.routing.allocation.total_shards_per_node} per index, {@code index.routing.allocation.total_primary_shards_per_node} per index, + * {@code cluster.routing.allocation.total_shards_per_node} globally and + * {@code cluster.routing.allocation.total_primary_shards_per_node} globally during the allocation * process. The limits of this decider can be changed in real-time via a the * index settings API. *

- * If {@code index.routing.allocation.total_shards_per_node} is reset to a negative value shards - * per index are unlimited per node. Shards currently in the + * If {@code index.routing.allocation.total_shards_per_node} or {@code index.routing.allocation.total_primary_shards_per_node}is reset to a negative value shards + * per index are unlimited per node or primary shards per index are unlimited per node respectively. Shards currently in the * {@link ShardRoutingState#RELOCATING relocating} state are ignored by this * {@link AllocationDecider} until the shard changed its state to either * {@link ShardRoutingState#STARTED started}, @@ -70,6 +72,7 @@ public class ShardsLimitAllocationDecider extends AllocationDecider { public static final String NAME = "shards_limit"; private volatile int clusterShardLimit; + private volatile int clusterPrimaryShardLimit; /** * Controls the maximum number of shards per index on a single OpenSearch @@ -84,7 +87,19 @@ public class ShardsLimitAllocationDecider extends AllocationDecider { ); /** - * Controls the maximum number of shards per node on a global level. + * Controls the maximum number of primary shards per index on a single OpenSearch + * node for segment replication enabled indices. Negative values are interpreted as unlimited. + */ + public static final Setting INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING = Setting.intSetting( + "index.routing.allocation.total_primary_shards_per_node", + -1, + -1, + Property.Dynamic, + Property.IndexScope + ); + + /** + * Controls the maximum number of shards per node on a cluster level. * Negative values are interpreted as unlimited. */ public static final Setting CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING = Setting.intSetting( @@ -95,18 +110,36 @@ public class ShardsLimitAllocationDecider extends AllocationDecider { Property.NodeScope ); + /** + * Controls the maximum number of primary shards per node on a cluster level. + * Negative values are interpreted as unlimited. + */ + public static final Setting CLUSTER_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING = Setting.intSetting( + "cluster.routing.allocation.total_primary_shards_per_node", + -1, + -1, + Property.Dynamic, + Property.NodeScope + ); + private final Settings settings; public ShardsLimitAllocationDecider(Settings settings, ClusterSettings clusterSettings) { this.settings = settings; this.clusterShardLimit = CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING.get(settings); + this.clusterPrimaryShardLimit = CLUSTER_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.get(settings); clusterSettings.addSettingsUpdateConsumer(CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING, this::setClusterShardLimit); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING, this::setClusterPrimaryShardLimit); } private void setClusterShardLimit(int clusterShardLimit) { this.clusterShardLimit = clusterShardLimit; } + private void setClusterPrimaryShardLimit(int clusterPrimaryShardLimit) { + this.clusterPrimaryShardLimit = clusterPrimaryShardLimit; + } + @Override public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { return doDecide(shardRouting, node, allocation, (count, limit) -> count >= limit); @@ -115,7 +148,6 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing @Override public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { return doDecide(shardRouting, node, allocation, (count, limit) -> count > limit); - } private Decision doDecide( @@ -124,18 +156,22 @@ private Decision doDecide( RoutingAllocation allocation, BiPredicate decider ) { - final int indexShardLimit = allocation.metadata().getIndexSafe(shardRouting.index()).getIndexTotalShardsPerNodeLimit(); + IndexMetadata indexMetadata = allocation.metadata().getIndexSafe(shardRouting.index()); + final int indexShardLimit = indexMetadata.getIndexTotalShardsPerNodeLimit(); + final int indexPrimaryShardLimit = indexMetadata.getIndexTotalPrimaryShardsPerNodeLimit(); // Capture the limit here in case it changes during this method's // execution final int clusterShardLimit = this.clusterShardLimit; - - if (indexShardLimit <= 0 && clusterShardLimit <= 0) { + final int clusterPrimaryShardLimit = this.clusterPrimaryShardLimit; + if (indexShardLimit <= 0 && indexPrimaryShardLimit <= 0 && clusterShardLimit <= 0 && clusterPrimaryShardLimit <= 0) { return allocation.decision( Decision.YES, NAME, - "total shard limits are disabled: [index: %d, cluster: %d] <= 0", + "total shard limits are disabled: [index: %d, index primary: %d, cluster: %d, cluster primary: %d] <= 0", indexShardLimit, - clusterShardLimit + indexPrimaryShardLimit, + clusterShardLimit, + clusterPrimaryShardLimit ); } @@ -151,6 +187,19 @@ private Decision doDecide( clusterShardLimit ); } + if (shardRouting.primary() && clusterPrimaryShardLimit > 0) { + final int nodePrimaryShardCount = node.numberOfOwningPrimaryShards(); + if (decider.test(nodePrimaryShardCount, clusterPrimaryShardLimit)) { + return allocation.decision( + Decision.NO, + NAME, + "too many primary shards [%d] allocated to this node, cluster setting [%s=%d]", + nodePrimaryShardCount, + CLUSTER_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey(), + clusterPrimaryShardLimit + ); + } + } if (indexShardLimit > 0) { final int indexShardCount = node.numberOfOwningShardsForIndex(shardRouting.index()); if (decider.test(indexShardCount, indexShardLimit)) { @@ -165,13 +214,29 @@ private Decision doDecide( ); } } + if (indexPrimaryShardLimit > 0 && shardRouting.primary()) { + final int indexPrimaryShardCount = node.numberOfOwningPrimaryShardsForIndex(shardRouting.index()); + if (decider.test(indexPrimaryShardCount, indexPrimaryShardLimit)) { + return allocation.decision( + Decision.NO, + NAME, + "too many primary shards [%d] allocated to this node for index [%s], index setting [%s=%d]", + indexPrimaryShardCount, + shardRouting.getIndexName(), + INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey(), + indexPrimaryShardLimit + ); + } + } return allocation.decision( Decision.YES, NAME, - "the shard count [%d] for this node is under the index limit [%d] and cluster level node limit [%d]", + "the shard count [%d] for this node is under the index limit [%d], index primary limit [%d], cluster level node limit [%d] and cluster level primary node limit [%d]", nodeShardCount, indexShardLimit, - clusterShardLimit + indexPrimaryShardLimit, + clusterShardLimit, + clusterPrimaryShardLimit ); } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index d204c383524c2..b4b85e0a9d367 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -433,6 +433,7 @@ public void apply(Settings value, Settings current, Settings previous) { SniffConnectionStrategy.REMOTE_NODE_CONNECTIONS, TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING, ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING, + ShardsLimitAllocationDecider.CLUSTER_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING, NodeConnectionsService.CLUSTER_NODE_RECONNECT_INTERVAL_SETTING, HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING, HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING, diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index 946d7fe734deb..dc77ffd720bad 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -166,6 +166,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexSettings.MAX_SLICES_PER_PIT, IndexSettings.MAX_REGEX_LENGTH_SETTING, ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING, + ShardsLimitAllocationDecider.INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING, IndexSettings.INDEX_GC_DELETES_SETTING, IndexSettings.INDEX_SOFT_DELETES_SETTING, IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING, diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index cc35426ee15b8..dfe3928ac37f3 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -155,6 +155,7 @@ import static org.opensearch.cluster.metadata.MetadataCreateIndexService.getIndexNumberOfRoutingShards; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.parseV1Mappings; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.resolveAndValidateAliases; +import static org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider.INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING; import static org.opensearch.common.util.FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL; import static org.opensearch.index.IndexModule.INDEX_STORE_TYPE_SETTING; import static org.opensearch.index.IndexSettings.INDEX_MERGE_POLICY; @@ -2548,6 +2549,96 @@ public void testApplyContextWithSettingsOverlap() throws IOException { }); } + public void testIndexTotalPrimaryShardsPerNodeSettingValidationWithRemoteStore() { + // Test case where setting is used with remote store enabled (should succeed) + Settings settings = Settings.builder().build(); + request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); + + final Settings.Builder requestSettings = Settings.builder() + // Enable remote store + .put(IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING.getKey(), true) + // Set primary shards per node to valid value + .put(INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey(), 2) + .put(IndexMetadata.INDEX_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT.toString()); + + request.settings(requestSettings.build()); + + Settings indexSettings = aggregateIndexSettings( + ClusterState.EMPTY_STATE, + request, + Settings.EMPTY, + null, + settings, + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + randomShardLimitService(), + Collections.emptySet(), + clusterSettings + ); + + // Verify that the value is the same as set earlier and validation was successful + assertEquals(Integer.valueOf(2), INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.get(indexSettings)); + } + + public void testIndexTotalPrimaryShardsPerNodeSettingValidationWithoutRemoteStore() { + // Test case where setting is used without remote store (should fail) + Settings settings = Settings.builder().build(); + request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); + + final Settings.Builder requestSettings = Settings.builder() + // Remote store not enabled + .put(INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey(), 2) + .put(IndexMetadata.INDEX_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT.toString()); + + request.settings(requestSettings.build()); + + // Expect IllegalArgumentException + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> aggregateIndexSettings( + ClusterState.EMPTY_STATE, + request, + Settings.EMPTY, + null, + settings, + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + randomShardLimitService(), + Collections.emptySet(), + clusterSettings + ) + ); + + // Verify error message + assertEquals( + "Setting [index.routing.allocation.total_primary_shards_per_node] can only be used with remote store enabled clusters", + exception.getMessage() + ); + } + + public void testIndexTotalPrimaryShardsPerNodeSettingValidationWithDefaultValue() { + // Test case with default value (-1) without remote store (should succeed) + Settings settings = Settings.builder().build(); + request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); + + final Settings.Builder requestSettings = Settings.builder().put(INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey(), -1); + + request.settings(requestSettings.build()); + + Settings indexSettings = aggregateIndexSettings( + ClusterState.EMPTY_STATE, + request, + Settings.EMPTY, + null, + settings, + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + randomShardLimitService(), + Collections.emptySet(), + clusterSettings + ); + + // Verify that default value passes validation + assertEquals(Integer.valueOf(-1), INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.get(indexSettings)); + } + private IndexTemplateMetadata addMatchingTemplate(Consumer configurator) { IndexTemplateMetadata.Builder builder = templateMetadataBuilder("template1", "te*"); configurator.accept(builder); diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java index 05ae67d10f4cb..795d1713772c2 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java @@ -69,6 +69,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.InvalidIndexTemplateException; import org.opensearch.indices.SystemIndices; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.repositories.RepositoriesService; import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.threadpool.ThreadPool; @@ -92,6 +93,7 @@ import static java.util.Collections.singletonList; import static org.opensearch.cluster.applicationtemplates.ClusterStateSystemTemplateLoader.TEMPLATE_LOADER_IDENTIFIER; import static org.opensearch.cluster.applicationtemplates.SystemTemplateMetadata.fromComponentTemplateInfo; +import static org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider.INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING; import static org.opensearch.common.settings.Settings.builder; import static org.opensearch.common.util.concurrent.ThreadContext.ACTION_ORIGIN_TRANSIENT_NAME; import static org.opensearch.env.Environment.PATH_HOME_SETTING; @@ -2440,6 +2442,23 @@ public void testMaxTranslogFlushSizeWithCompositeIndex() { assertThat(throwables.get(0), instanceOf(IllegalArgumentException.class)); } + public void testIndexPrimaryShardsSetting() { + Settings clusterSettings = Settings.builder().build(); + PutRequest request = new PutRequest("test", "test_index_primary_shard_constraint"); + request.patterns(singletonList("test_shards_wait*")); + Settings.Builder settingsBuilder = builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "1") + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") + .put(INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey(), 2) + .put(IndexMetadata.INDEX_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT.toString()); + request.settings(settingsBuilder.build()); + List throwables = putTemplate(xContentRegistry(), request, clusterSettings); + assertThat(throwables.get(0), instanceOf(IllegalArgumentException.class)); + assertEquals( + "Setting [index.routing.allocation.total_primary_shards_per_node] can only be used with remote store enabled clusters", + throwables.get(0).getMessage() + ); + } + private static List putTemplate(NamedXContentRegistry xContentRegistry, PutRequest request) { return putTemplate(xContentRegistry, request, Settings.EMPTY); } diff --git a/server/src/test/java/org/opensearch/cluster/routing/RoutingNodeTests.java b/server/src/test/java/org/opensearch/cluster/routing/RoutingNodeTests.java index cc4f2e510cb31..c78e5582155d1 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/RoutingNodeTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/RoutingNodeTests.java @@ -165,6 +165,32 @@ public void testNumberOfOwningShards() { assertThat(routingNode.numberOfOwningShards(), equalTo(2)); } + public void testNumberOfOwningPrimaryShards() { + final ShardRouting test1Shard0 = TestShardRouting.newShardRouting("test1", 0, "node-1", true, ShardRoutingState.STARTED); + final ShardRouting test2Shard0 = TestShardRouting.newShardRouting( + "test2", + 0, + "node-1", + "node-2", + false, + ShardRoutingState.RELOCATING + ); + final ShardRouting test3Shard0 = TestShardRouting.newShardRouting( + "test3", + 0, + "node-1", + "node-2", + true, + ShardRoutingState.RELOCATING + ); + final ShardRouting test3Shard1 = TestShardRouting.newShardRouting("test3", 1, "node-1", true, ShardRoutingState.STARTED); + routingNode.add(test1Shard0); + routingNode.add(test2Shard0); + routingNode.add(test3Shard0); + routingNode.add(test3Shard1); + assertThat(routingNode.numberOfOwningPrimaryShards(), equalTo(2)); + } + public void testNumberOfOwningShardsForIndex() { final ShardRouting test1Shard0 = TestShardRouting.newShardRouting("test1", 0, "node-1", false, ShardRoutingState.STARTED); final ShardRouting test2Shard0 = TestShardRouting.newShardRouting( @@ -183,4 +209,33 @@ public void testNumberOfOwningShardsForIndex() { assertThat(routingNode.numberOfOwningShardsForIndex(new Index("test3", IndexMetadata.INDEX_UUID_NA_VALUE)), equalTo(0)); } + public void testNumberOfOwningPrimaryShardsForIndex() { + final ShardRouting test1Shard0 = TestShardRouting.newShardRouting("test1", 0, "node-1", true, ShardRoutingState.STARTED); + final ShardRouting test2Shard0 = TestShardRouting.newShardRouting( + "test2", + 0, + "node-1", + "node-2", + false, + ShardRoutingState.RELOCATING + ); + final ShardRouting test3Shard0 = TestShardRouting.newShardRouting( + "test3", + 0, + "node-1", + "node-2", + true, + ShardRoutingState.RELOCATING + ); + final ShardRouting test3Shard1 = TestShardRouting.newShardRouting("test3", 1, "node-1", true, ShardRoutingState.STARTED); + routingNode.add(test1Shard0); + routingNode.add(test2Shard0); + routingNode.add(test3Shard0); + routingNode.add(test3Shard1); + assertThat(routingNode.numberOfOwningPrimaryShardsForIndex(new Index("test", IndexMetadata.INDEX_UUID_NA_VALUE)), equalTo(0)); + assertThat(routingNode.numberOfOwningPrimaryShardsForIndex(new Index("test1", IndexMetadata.INDEX_UUID_NA_VALUE)), equalTo(1)); + assertThat(routingNode.numberOfOwningPrimaryShardsForIndex(new Index("test2", IndexMetadata.INDEX_UUID_NA_VALUE)), equalTo(0)); + assertThat(routingNode.numberOfOwningPrimaryShardsForIndex(new Index("test3", IndexMetadata.INDEX_UUID_NA_VALUE)), equalTo(1)); + } + } diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDeciderTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDeciderTests.java new file mode 100644 index 0000000000000..ffc42d11d3696 --- /dev/null +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDeciderTests.java @@ -0,0 +1,349 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing.allocation.decider; + +import org.opensearch.Version; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.cluster.routing.TestShardRouting; +import org.opensearch.cluster.routing.allocation.RoutingAllocation; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.test.OpenSearchTestCase; + +import static org.opensearch.cluster.routing.allocation.decider.Decision.Type.NO; +import static org.opensearch.cluster.routing.allocation.decider.Decision.Type.YES; + +public class ShardsLimitAllocationDeciderTests extends OpenSearchTestCase { + + public void testWithNoLimit() { + Settings settings = Settings.builder().build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + ShardsLimitAllocationDecider decider = new ShardsLimitAllocationDecider(settings, clusterSettings); + + Metadata metadata = Metadata.builder() + .put(IndexMetadata.builder("test").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(0)) + .build(); + + // Create a RoutingTable with shards 0 and 1 initialized on node1, and shard 2 unassigned + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(metadata.index("test").getIndex()); + + // Shard 0 and 1: STARTED on node1 + indexRoutingTableBuilder.addShard(TestShardRouting.newShardRouting("test", 0, "node1", null, true, ShardRoutingState.STARTED)); + indexRoutingTableBuilder.addShard(TestShardRouting.newShardRouting("test", 1, "node1", null, true, ShardRoutingState.STARTED)); + + // Shard 2: Unassigned + indexRoutingTableBuilder.addShard(TestShardRouting.newShardRouting("test", 2, null, null, true, ShardRoutingState.UNASSIGNED)); + + routingTableBuilder.add(indexRoutingTableBuilder.build()); + RoutingTable routingTable = routingTableBuilder.build(); + + ClusterState clusterState = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(routingTable) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))) + .build(); + + RoutingAllocation allocation = new RoutingAllocation(null, clusterState.getRoutingNodes(), clusterState, null, null, 0); + allocation.debugDecision(true); + + ShardRouting shard1 = routingTable.index("test").shard(0).primaryShard(); + ShardRouting shard2 = routingTable.index("test").shard(1).primaryShard(); + ShardRouting shard3 = routingTable.index("test").shard(2).primaryShard(); + + // Test allocation decisions + assertEquals(YES, decider.canAllocate(shard3, clusterState.getRoutingNodes().node("node1"), allocation).type()); + assertEquals(YES, decider.canRemain(shard1, clusterState.getRoutingNodes().node("node1"), allocation).type()); + assertEquals(YES, decider.canAllocate(shard3, clusterState.getRoutingNodes().node("node2"), allocation).type()); + } + + public void testClusterShardLimit() { + Settings settings = Settings.builder().put(ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), 2).build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + ShardsLimitAllocationDecider decider = new ShardsLimitAllocationDecider(settings, clusterSettings); + + Metadata metadata = Metadata.builder() + .put(IndexMetadata.builder("test").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(0)) + .build(); + + // Create a RoutingTable with shards 0 and 1 initialized on node1, and shard 2 unassigned + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(metadata.index("test").getIndex()); + + // Shard 0 and 1: STARTED on node1 + indexRoutingTableBuilder.addShard(TestShardRouting.newShardRouting("test", 0, "node1", null, true, ShardRoutingState.STARTED)); + indexRoutingTableBuilder.addShard(TestShardRouting.newShardRouting("test", 1, "node1", null, true, ShardRoutingState.STARTED)); + + // Shard 2: Unassigned + indexRoutingTableBuilder.addShard(TestShardRouting.newShardRouting("test", 2, null, null, true, ShardRoutingState.UNASSIGNED)); + + routingTableBuilder.add(indexRoutingTableBuilder.build()); + RoutingTable routingTable = routingTableBuilder.build(); + + ClusterState clusterState = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(routingTable) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))) + .build(); + + RoutingAllocation allocation = new RoutingAllocation(null, clusterState.getRoutingNodes(), clusterState, null, null, 0); + allocation.debugDecision(true); + + ShardRouting shard1 = routingTable.index("test").shard(0).primaryShard(); + ShardRouting shard2 = routingTable.index("test").shard(1).primaryShard(); + ShardRouting shard3 = routingTable.index("test").shard(2).primaryShard(); + + // Test allocation decisions + assertEquals(NO, decider.canAllocate(shard3, clusterState.getRoutingNodes().node("node1"), allocation).type()); + assertEquals(YES, decider.canRemain(shard1, clusterState.getRoutingNodes().node("node1"), allocation).type()); + assertEquals(YES, decider.canAllocate(shard3, clusterState.getRoutingNodes().node("node2"), allocation).type()); + } + + public void testClusterPrimaryShardLimit() { + Settings settings = Settings.builder() + .put(ShardsLimitAllocationDecider.CLUSTER_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey(), 2) + .put(ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), 3) + .build(); + + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + ShardsLimitAllocationDecider decider = new ShardsLimitAllocationDecider(settings, clusterSettings); + + // Create metadata for two indices + Metadata metadata = Metadata.builder() + .put(IndexMetadata.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(0)) + .put(IndexMetadata.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(1)) + .build(); + + // Create routing table + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + + // Setup routing for test1 (3 primaries) + IndexRoutingTable.Builder test1RoutingTableBuilder = IndexRoutingTable.builder(metadata.index("test1").getIndex()); + + // test1: First primary on node1 + test1RoutingTableBuilder.addShard(TestShardRouting.newShardRouting("test1", 0, "node1", null, true, ShardRoutingState.STARTED)); + + // test1: Second primary on node2 + test1RoutingTableBuilder.addShard(TestShardRouting.newShardRouting("test1", 1, "node2", null, true, ShardRoutingState.STARTED)); + + // test1: Third primary unassigned + test1RoutingTableBuilder.addShard(TestShardRouting.newShardRouting("test1", 2, null, null, true, ShardRoutingState.UNASSIGNED)); + + // Setup routing for test2 (2 primaries, 1 replica) + IndexRoutingTable.Builder test2RoutingTableBuilder = IndexRoutingTable.builder(metadata.index("test2").getIndex()); + + // test2: First primary on node1 + test2RoutingTableBuilder.addShard(TestShardRouting.newShardRouting("test2", 0, "node1", null, true, ShardRoutingState.STARTED)); + + // test2: Second primary on node2 + test2RoutingTableBuilder.addShard(TestShardRouting.newShardRouting("test2", 1, "node2", null, true, ShardRoutingState.STARTED)); + + // test2: First replica on node2 + test2RoutingTableBuilder.addShard(TestShardRouting.newShardRouting("test2", 0, "node2", null, false, ShardRoutingState.STARTED)); + // test2: Second replica unassigned + test2RoutingTableBuilder.addShard(TestShardRouting.newShardRouting("test2", 1, null, null, false, ShardRoutingState.UNASSIGNED)); + + routingTableBuilder.add(test1RoutingTableBuilder.build()); + routingTableBuilder.add(test2RoutingTableBuilder.build()); + RoutingTable routingTable = routingTableBuilder.build(); + + ClusterState clusterState = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(routingTable) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))) + .build(); + + RoutingAllocation allocation = new RoutingAllocation(null, clusterState.getRoutingNodes(), clusterState, null, null, 0); + allocation.debugDecision(true); + + // Get shards for testing + ShardRouting test1Shard1 = routingTable.index("test1").shard(0).primaryShard(); + ShardRouting test1Shard3 = routingTable.index("test1").shard(2).primaryShard(); + ShardRouting test2Replica2 = routingTable.index("test2").shard(1).replicaShards().get(0); + + // Test allocation decisions + // Cannot allocate third primary to node1 (would exceed primary shard limit) + assertEquals(NO, decider.canAllocate(test1Shard3, clusterState.getRoutingNodes().node("node1"), allocation).type()); + + // Cannot allocate third primary to node2 (would exceed primary shard limit) + assertEquals(NO, decider.canAllocate(test1Shard3, clusterState.getRoutingNodes().node("node2"), allocation).type()); + + // Can allocate second replica to node1 (within total shard limit) + assertEquals(YES, decider.canAllocate(test2Replica2, clusterState.getRoutingNodes().node("node1"), allocation).type()); + + // Cannot allocate second replica to node2 (would exceed total shard limit) + assertEquals(NO, decider.canAllocate(test2Replica2, clusterState.getRoutingNodes().node("node2"), allocation).type()); + + // Existing primary can remain + assertEquals(YES, decider.canRemain(test1Shard1, clusterState.getRoutingNodes().node("node1"), allocation).type()); + + } + + public void testIndexShardLimit() { + Settings clusterSettings = Settings.builder() + .put(ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), 2) + .build(); + ClusterSettings clusterSettingsObject = new ClusterSettings(clusterSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + ShardsLimitAllocationDecider decider = new ShardsLimitAllocationDecider(clusterSettings, clusterSettingsObject); + + // Create index settings with INDEX_TOTAL_SHARDS_PER_NODE_SETTING and version + Settings indexSettings = Settings.builder() + .put(ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), 1) // Set index-level limit to 1 + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .build(); + + Metadata metadata = Metadata.builder() + .put(IndexMetadata.builder("test1").settings(indexSettings).numberOfShards(3).numberOfReplicas(0)) + .put(IndexMetadata.builder("test2").settings(indexSettings).numberOfShards(3).numberOfReplicas(0)) + .build(); + + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + + // Set up routing table for test1 + IndexRoutingTable.Builder test1RoutingTableBuilder = IndexRoutingTable.builder(metadata.index("test1").getIndex()); + test1RoutingTableBuilder.addShard(TestShardRouting.newShardRouting("test1", 0, "node1", null, true, ShardRoutingState.STARTED)); + test1RoutingTableBuilder.addShard(TestShardRouting.newShardRouting("test1", 1, null, null, true, ShardRoutingState.UNASSIGNED)); + test1RoutingTableBuilder.addShard(TestShardRouting.newShardRouting("test1", 2, null, null, true, ShardRoutingState.UNASSIGNED)); + routingTableBuilder.add(test1RoutingTableBuilder.build()); + + // Set up routing table for test2 + IndexRoutingTable.Builder test2RoutingTableBuilder = IndexRoutingTable.builder(metadata.index("test2").getIndex()); + test2RoutingTableBuilder.addShard(TestShardRouting.newShardRouting("test2", 0, "node2", null, true, ShardRoutingState.STARTED)); + test2RoutingTableBuilder.addShard(TestShardRouting.newShardRouting("test2", 1, null, null, true, ShardRoutingState.UNASSIGNED)); + test2RoutingTableBuilder.addShard(TestShardRouting.newShardRouting("test2", 2, null, null, true, ShardRoutingState.UNASSIGNED)); + routingTableBuilder.add(test2RoutingTableBuilder.build()); + + RoutingTable routingTable = routingTableBuilder.build(); + + ClusterState clusterState = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(routingTable) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))) + .build(); + + RoutingAllocation allocation = new RoutingAllocation(null, clusterState.getRoutingNodes(), clusterState, null, null, 0); + allocation.debugDecision(true); + + // Test allocation decisions + ShardRouting test1Shard1 = routingTable.index("test1").shard(1).primaryShard(); + ShardRouting test1Shard2 = routingTable.index("test1").shard(2).primaryShard(); + ShardRouting test2Shard1 = routingTable.index("test2").shard(1).primaryShard(); + ShardRouting test2Shard2 = routingTable.index("test2").shard(2).primaryShard(); + + assertEquals(NO, decider.canAllocate(test1Shard2, clusterState.getRoutingNodes().node("node1"), allocation).type()); + assertEquals(YES, decider.canRemain(test1Shard1, clusterState.getRoutingNodes().node("node1"), allocation).type()); + assertEquals(YES, decider.canAllocate(test1Shard2, clusterState.getRoutingNodes().node("node2"), allocation).type()); + assertEquals(NO, decider.canAllocate(test2Shard2, clusterState.getRoutingNodes().node("node2"), allocation).type()); + assertEquals(YES, decider.canRemain(test2Shard1, clusterState.getRoutingNodes().node("node2"), allocation).type()); + assertEquals(YES, decider.canAllocate(test2Shard2, clusterState.getRoutingNodes().node("node1"), allocation).type()); + } + + public void testIndexPrimaryShardLimit() { + Settings clusterSettings = Settings.builder() + .put(ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), -1) + .build(); + ClusterSettings clusterSettingsObject = new ClusterSettings(clusterSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + ShardsLimitAllocationDecider decider = new ShardsLimitAllocationDecider(clusterSettings, clusterSettingsObject); + + // Create index settings for three indices + Settings indexSettingsTest1 = Settings.builder() + .put(ShardsLimitAllocationDecider.INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey(), 1) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.INDEX_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT.toString()) + .build(); + + Settings indexSettingsTest2 = Settings.builder() + .put(ShardsLimitAllocationDecider.INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey(), 2) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .build(); + + Settings indexSettingsTest3 = Settings.builder() + .put(ShardsLimitAllocationDecider.INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey(), 1) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .build(); + + Metadata metadata = Metadata.builder() + .put(IndexMetadata.builder("test1").settings(indexSettingsTest1).numberOfShards(3).numberOfReplicas(0)) + .put(IndexMetadata.builder("test2").settings(indexSettingsTest2).numberOfShards(3).numberOfReplicas(0)) + .put(IndexMetadata.builder("test3").settings(indexSettingsTest3).numberOfShards(3).numberOfReplicas(0)) + .build(); + + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + + // Set up routing table for test1 + IndexRoutingTable.Builder test1Builder = IndexRoutingTable.builder(metadata.index("test1").getIndex()); + test1Builder.addShard(TestShardRouting.newShardRouting("test1", 0, "node1", null, true, ShardRoutingState.STARTED)); + test1Builder.addShard(TestShardRouting.newShardRouting("test1", 1, "node2", null, true, ShardRoutingState.STARTED)); + test1Builder.addShard(TestShardRouting.newShardRouting("test1", 2, null, null, true, ShardRoutingState.UNASSIGNED)); + routingTableBuilder.add(test1Builder.build()); + + // Set up routing table for test2 + IndexRoutingTable.Builder test2Builder = IndexRoutingTable.builder(metadata.index("test2").getIndex()); + test2Builder.addShard(TestShardRouting.newShardRouting("test2", 0, "node1", null, true, ShardRoutingState.STARTED)); + test2Builder.addShard(TestShardRouting.newShardRouting("test2", 1, "node2", null, true, ShardRoutingState.STARTED)); + test2Builder.addShard(TestShardRouting.newShardRouting("test2", 2, null, null, true, ShardRoutingState.UNASSIGNED)); + routingTableBuilder.add(test2Builder.build()); + + // Set up routing table for test3 + IndexRoutingTable.Builder test3Builder = IndexRoutingTable.builder(metadata.index("test3").getIndex()); + test3Builder.addShard(TestShardRouting.newShardRouting("test3", 0, "node1", null, true, ShardRoutingState.STARTED)); + test3Builder.addShard(TestShardRouting.newShardRouting("test3", 1, "node2", null, true, ShardRoutingState.STARTED)); + test3Builder.addShard(TestShardRouting.newShardRouting("test3", 2, null, null, true, ShardRoutingState.UNASSIGNED)); + routingTableBuilder.add(test3Builder.build()); + + RoutingTable routingTable = routingTableBuilder.build(); + + ClusterState clusterState = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(routingTable) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))) + .build(); + + RoutingAllocation allocation = new RoutingAllocation(null, clusterState.getRoutingNodes(), clusterState, null, null, 0); + allocation.debugDecision(true); + + // Get unassigned shards for testing + ShardRouting test1Shard2 = routingTable.index("test1").shard(2).primaryShard(); + ShardRouting test2Shard2 = routingTable.index("test2").shard(2).primaryShard(); + ShardRouting test3Shard2 = routingTable.index("test3").shard(2).primaryShard(); + + // Test assertions + assertEquals(NO, decider.canAllocate(test1Shard2, clusterState.getRoutingNodes().node("node1"), allocation).type()); // Cannot + // assign 3rd + // shard of + // test1 to + // node1 + assertEquals(NO, decider.canAllocate(test3Shard2, clusterState.getRoutingNodes().node("node2"), allocation).type()); // Cannot + // assign 3rd + // shard of + // test3 to + // node2 + assertEquals(YES, decider.canAllocate(test2Shard2, clusterState.getRoutingNodes().node("node1"), allocation).type()); // Can assign + // 3rd shard + // of test2 to + // node1 + assertEquals(YES, decider.canAllocate(test2Shard2, clusterState.getRoutingNodes().node("node2"), allocation).type()); // Can assign + // 3rd shard + // of test2 to + // node2 + } + + private DiscoveryNode newNode(String nodeId) { + return new DiscoveryNode(nodeId, buildNewFakeTransportAddress(), Version.CURRENT); + } +} From a728cae0f4771fa19c60ee9d5794595d32b99bce Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Feb 2025 22:43:15 -0500 Subject: [PATCH 027/550] Bump org.awaitility:awaitility from 4.2.2 to 4.3.0 in /server (#17439) * Bump org.awaitility:awaitility from 4.2.2 to 4.3.0 in /server Bumps [org.awaitility:awaitility](https://github.com/awaitility/awaitility) from 4.2.2 to 4.3.0. - [Changelog](https://github.com/awaitility/awaitility/blob/master/changelog.txt) - [Commits](https://github.com/awaitility/awaitility/compare/awaitility-4.2.2...awaitility-4.3.0) --- updated-dependencies: - dependency-name: org.awaitility:awaitility dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- CHANGELOG.md | 2 +- server/build.gradle | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 428cb6a8073d6..70f8a48d19ba3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,7 +14,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Rule Based Auto-tagging] Add in-memory attribute value store ([#17342](https://github.com/opensearch-project/OpenSearch/pull/17342)) ### Dependencies -- Bump `org.awaitility:awaitility` from 4.2.0 to 4.2.2 ([#17230](https://github.com/opensearch-project/OpenSearch/pull/17230)) +- Bump `org.awaitility:awaitility` from 4.2.0 to 4.3.0 ([#17230](https://github.com/opensearch-project/OpenSearch/pull/17230), [#17439](https://github.com/opensearch-project/OpenSearch/pull/17439)) - Bump `dnsjava:dnsjava` from 3.6.2 to 3.6.3 ([#17231](https://github.com/opensearch-project/OpenSearch/pull/17231)) - Bump `com.google.code.gson:gson` from 2.11.0 to 2.12.1 ([#17229](https://github.com/opensearch-project/OpenSearch/pull/17229)) - Bump `org.jruby.joni:joni` from 2.2.1 to 2.2.3 ([#17136](https://github.com/opensearch-project/OpenSearch/pull/17136)) diff --git a/server/build.gradle b/server/build.gradle index cb64d6becb315..fd2cac4c7506f 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -114,7 +114,7 @@ dependencies { // https://mvnrepository.com/artifact/org.roaringbitmap/RoaringBitmap api libs.roaringbitmap - testImplementation 'org.awaitility:awaitility:4.2.2' + testImplementation 'org.awaitility:awaitility:4.3.0' testImplementation(project(":test:framework")) { // tests use the locally compiled version of server exclude group: 'org.opensearch', module: 'server' From db43d0fd5d2bf044c79b17f928673ef6f7db7479 Mon Sep 17 00:00:00 2001 From: "Daniel (dB.) Doubrovkine" Date: Tue, 25 Feb 2025 00:18:19 -0500 Subject: [PATCH 028/550] Updated affiliation for @dblock. (#17450) Signed-off-by: Daniel (dB.) Doubrovkine --- MAINTAINERS.md | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 8a6890d1ca1c1..887ff654dff96 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -5,7 +5,7 @@ This document contains a list of maintainers in this repo. See [opensearch-proje ## Current Maintainers | Maintainer | GitHub ID | Affiliation | -|--------------------------|---------------------------------------------------------|-------------| +| ------------------------ | ------------------------------------------------------- | ----------- | | Anas Alkouz | [anasalkouz](https://github.com/anasalkouz) | Amazon | | Andrew Ross | [andrross](https://github.com/andrross) | Amazon | | Andriy Redko | [reta](https://github.com/reta) | Independent | @@ -15,7 +15,7 @@ This document contains a list of maintainers in this repo. See [opensearch-proje | Charlotte Henkle | [CEHENKLE](https://github.com/CEHENKLE) | Amazon | | Craig Perkins | [cwperks](https://github.com/cwperks) | Amazon | | Dan Widdis | [dbwiddis](https://github.com/dbwiddis) | Amazon | -| Daniel "dB." Doubrovkine | [dblock](https://github.com/dblock) | Amazon | +| Daniel "dB." Doubrovkine | [dblock](https://github.com/dblock) | Independent | | Binlong Gao | [gaobinlong](https://github.com/gaobinlong) | Amazon | | Gaurav Bafna | [gbbafna](https://github.com/gbbafna) | Amazon | | Jay Deng | [jed326](https://github.com/jed326) | Amazon | @@ -35,14 +35,14 @@ This document contains a list of maintainers in this repo. See [opensearch-proje ## Emeritus -| Maintainer | GitHub ID | Affiliation | -| ---------------------- |-------------------------------------------- | ----------- | -| Megha Sai Kavikondala | [meghasaik](https://github.com/meghasaik) | Amazon | -| Xue Zhou | [xuezhou25](https://github.com/xuezhou25) | Amazon | -| Kartik Ganesh | [kartg](https://github.com/kartg) | Amazon | -| Abbas Hussain | [abbashus](https://github.com/abbashus) | Meta | -| Himanshu Setia | [setiah](https://github.com/setiah) | Amazon | -| Ryan Bogan | [ryanbogan](https://github.com/ryanbogan) | Amazon | -| Rabi Panda | [adnapibar](https://github.com/adnapibar) | Independent | -| Tianli Feng | [tlfeng](https://github.com/tlfeng) | Amazon | -| Suraj Singh | [dreamer-89](https://github.com/dreamer-89) | Amazon | +| Maintainer | GitHub ID | Affiliation | +| --------------------- | ------------------------------------------- | ----------- | +| Megha Sai Kavikondala | [meghasaik](https://github.com/meghasaik) | Amazon | +| Xue Zhou | [xuezhou25](https://github.com/xuezhou25) | Amazon | +| Kartik Ganesh | [kartg](https://github.com/kartg) | Amazon | +| Abbas Hussain | [abbashus](https://github.com/abbashus) | Meta | +| Himanshu Setia | [setiah](https://github.com/setiah) | Amazon | +| Ryan Bogan | [ryanbogan](https://github.com/ryanbogan) | Amazon | +| Rabi Panda | [adnapibar](https://github.com/adnapibar) | Independent | +| Tianli Feng | [tlfeng](https://github.com/tlfeng) | Amazon | +| Suraj Singh | [dreamer-89](https://github.com/dreamer-89) | Amazon | From 7e2d2437a14580e985440c140f06680dc4e3cd81 Mon Sep 17 00:00:00 2001 From: panguixin Date: Wed, 26 Feb 2025 00:00:59 +0800 Subject: [PATCH 029/550] Correct the isStored flag for wildcard field type (#17440) Signed-off-by: panguixin --- .../java/org/opensearch/index/mapper/WildcardFieldMapper.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/opensearch/index/mapper/WildcardFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/WildcardFieldMapper.java index 20c5ce87ad1c7..1132c245c6930 100644 --- a/server/src/main/java/org/opensearch/index/mapper/WildcardFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/WildcardFieldMapper.java @@ -315,7 +315,7 @@ public WildcardFieldType(String name, Map meta) { } public WildcardFieldType(String name, NamedAnalyzer normalizer, Builder builder) { - super(name, true, true, builder.hasDocValues.getValue(), TextSearchInfo.SIMPLE_MATCH_ONLY, builder.meta.getValue()); + super(name, true, false, builder.hasDocValues.getValue(), TextSearchInfo.SIMPLE_MATCH_ONLY, builder.meta.getValue()); setIndexAnalyzer(normalizer); this.ignoreAbove = builder.ignoreAbove.getValue(); this.nullValue = builder.nullValue.getValue(); From e39790357453eca32da707106c22cbd1c8ec39d5 Mon Sep 17 00:00:00 2001 From: Lakshya Taragi <157457166+ltaragi@users.noreply.github.com> Date: Wed, 26 Feb 2025 05:00:27 +0530 Subject: [PATCH 030/550] Fix flaky tests from`SegmentReplicationAllocationIT` (#17429) * Fix flaky tests in SegmentReplicationAllocationIT Signed-off-by: Lakshya Taragi * Remove extra logs Signed-off-by: Lakshya Taragi * Account for replicas as well Signed-off-by: Lakshya Taragi * Reduce upper limit on no. of indices Signed-off-by: Lakshya Taragi * Only verified changes Signed-off-by: Lakshya Taragi * Fix testSingleIndexShardAllocation Signed-off-by: Lakshya Taragi --------- Signed-off-by: Lakshya Taragi --- .../SegmentReplicationAllocationIT.java | 29 +++++++++++++------ 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java index 669e24f9fb555..0b2cf93903ed9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java @@ -25,6 +25,7 @@ import org.opensearch.test.junit.annotations.TestLogging; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; @@ -169,14 +170,16 @@ public void testSingleIndexShardAllocation() throws Exception { // Remove a node internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodeNames.get(0))); - ensureGreen(TimeValue.timeValueSeconds(60)); + internalCluster().validateClusterFormed(); + ensureGreen(TimeValue.timeValueSeconds(100)); state = client().admin().cluster().prepareState().execute().actionGet().getState(); logger.info(ShardAllocations.printShardDistribution(state)); verifyPerIndexPrimaryBalance(); // Add a new node internalCluster().startDataOnlyNode(); - ensureGreen(TimeValue.timeValueSeconds(60)); + internalCluster().validateClusterFormed(); + ensureGreen(TimeValue.timeValueSeconds(100)); state = client().admin().cluster().prepareState().execute().actionGet().getState(); logger.info(ShardAllocations.printShardDistribution(state)); verifyPerIndexPrimaryBalance(); @@ -250,12 +253,21 @@ public void testAllocationAndRebalanceWithDisruption() throws Exception { internalCluster().startClusterManagerOnlyNode(); final int maxReplicaCount = 2; final int maxShardCount = 2; - // Create higher number of nodes than number of shards to reduce chances of SameShardAllocationDecider kicking-in + final int numberOfIndices = randomIntBetween(1, 3); + final int maxPossibleShards = numberOfIndices * maxShardCount * (1 + maxReplicaCount); + + List> shardAndReplicaCounts = new ArrayList<>(); + int shardCount, replicaCount, totalShards = 0; + for (int i = 0; i < numberOfIndices; i++) { + shardCount = randomIntBetween(1, maxShardCount); + replicaCount = randomIntBetween(1, maxReplicaCount); + shardAndReplicaCounts.add(Arrays.asList(shardCount, replicaCount)); + totalShards += shardCount * (1 + replicaCount); + } + // Create a strictly higher number of nodes than the number of shards to reduce chances of SameShardAllocationDecider kicking-in // and preventing primary relocations - final int nodeCount = randomIntBetween(5, 10); - final int numberOfIndices = randomIntBetween(1, 10); + final int nodeCount = randomIntBetween(totalShards, maxPossibleShards) + 1; final float buffer = randomIntBetween(1, 4) * 0.10f; - logger.info("--> Creating {} nodes", nodeCount); final List nodeNames = new ArrayList<>(); for (int i = 0; i < nodeCount; i++) { @@ -263,11 +275,10 @@ public void testAllocationAndRebalanceWithDisruption() throws Exception { } setAllocationRelocationStrategy(true, true, buffer); - int shardCount, replicaCount; ClusterState state; for (int i = 0; i < numberOfIndices; i++) { - shardCount = randomIntBetween(1, maxShardCount); - replicaCount = randomIntBetween(1, maxReplicaCount); + shardCount = shardAndReplicaCounts.get(i).get(0); + replicaCount = shardAndReplicaCounts.get(i).get(1); logger.info("--> Creating index test{} with primary {} and replica {}", i, shardCount, replicaCount); createIndex("test" + i, shardCount, replicaCount, i % 2 == 0); ensureGreen(TimeValue.timeValueSeconds(60)); From 171433c2ad253aa42b6773e290ac26f78fb02917 Mon Sep 17 00:00:00 2001 From: Sachin Kale Date: Wed, 26 Feb 2025 18:49:21 +0530 Subject: [PATCH 031/550] Fix ConcurrentModificationException in RemoteFsTimestampAwareTranslog.trimUnreferencedReaders (#17028) * Fix ConcurrentModificationException in RemoteFsTimestampAwareTranslog.trimUnreferencedReaders Signed-off-by: Sachin Kale * Address PR comments Signed-off-by: Sachin Kale --------- Signed-off-by: Sachin Kale --- .../RemoteFsTimestampAwareTranslog.java | 20 +++++++++---------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslog.java index 99153324b8372..427dbb690448f 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslog.java @@ -125,20 +125,18 @@ protected void trimUnreferencedReaders(boolean indexDeleted, boolean trimLocal) } // Update file tracker to reflect local translog state - Optional minLiveGeneration = readers.stream().map(BaseTranslogReader::getGeneration).min(Long::compareTo); - if (minLiveGeneration.isPresent()) { - List staleFilesInTracker = new ArrayList<>(); - for (String file : fileTransferTracker.allUploaded()) { - if (file.endsWith(TRANSLOG_FILE_SUFFIX)) { - long generation = Translog.parseIdFromFileName(file); - if (generation < minLiveGeneration.get()) { - staleFilesInTracker.add(file); - staleFilesInTracker.add(Translog.getCommitCheckpointFileName(generation)); - } + long minLiveGeneration = getMinFileGeneration(); + List staleFilesInTracker = new ArrayList<>(); + for (String file : fileTransferTracker.allUploaded()) { + if (file.endsWith(TRANSLOG_FILE_SUFFIX)) { + long generation = Translog.parseIdFromFileName(file); + if (generation < minLiveGeneration) { + staleFilesInTracker.add(file); + staleFilesInTracker.add(Translog.getCommitCheckpointFileName(generation)); } - fileTransferTracker.delete(staleFilesInTracker); } } + fileTransferTracker.delete(staleFilesInTracker); // This is to ensure that after the permits are acquired during primary relocation, there are no further modification on remote // store. From 176a4f01dea48a9a5f149ba50c3d51fe6f11e9d4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 26 Feb 2025 11:24:30 -0500 Subject: [PATCH 032/550] Bump com.netflix.nebula.ospackage-base from 11.10.1 to 11.11.1 in /distribution/packages (#17374) * Bump com.netflix.nebula.ospackage-base in /distribution/packages Bumps com.netflix.nebula.ospackage-base from 11.10.1 to 11.11.1. --- updated-dependencies: - dependency-name: com.netflix.nebula.ospackage-base dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Signed-off-by: Craig Perkins Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Craig Perkins --- CHANGELOG.md | 1 + distribution/packages/build.gradle | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 70f8a48d19ba3..e4779231977b9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `me.champeau.gradle.japicmp` from 0.4.5 to 0.4.6 ([#17375](https://github.com/opensearch-project/OpenSearch/pull/17375)) - Bump `com.google.api.grpc:proto-google-common-protos` from 2.37.1 to 2.52.0 ([#17379](https://github.com/opensearch-project/OpenSearch/pull/17379)) - Bump `net.minidev:json-smart` from 2.5.1 to 2.5.2 ([#17378](https://github.com/opensearch-project/OpenSearch/pull/17378)) +- Bump `com.netflix.nebula.ospackage-base` from 11.10.1 to 11.11.1 ([#17374](https://github.com/opensearch-project/OpenSearch/pull/17374)) ### Changed - Convert transport-reactor-netty4 to use gradle version catalog [#17233](https://github.com/opensearch-project/OpenSearch/pull/17233) diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index ada19dfa38e78..d3cecde24a35d 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -63,7 +63,7 @@ import java.util.regex.Pattern */ plugins { - id "com.netflix.nebula.ospackage-base" version "11.10.1" + id "com.netflix.nebula.ospackage-base" version "11.11.1" } void addProcessFilesTask(String type, boolean jdk) { From 0ffed5e8b743a075f5d66c4fe6e9b8371eacfa14 Mon Sep 17 00:00:00 2001 From: Iwan Igonin <83668556+beanuwave@users.noreply.github.com> Date: Wed, 26 Feb 2025 20:43:38 +0100 Subject: [PATCH 033/550] =?UTF-8?q?Use=20BC=20libraries=20to=20parse=20PEM?= =?UTF-8?q?=20files,=20increase=20key=20length,=20allow=20gener=E2=80=A6?= =?UTF-8?q?=20(#17393)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Use BC libraries to parse PEM files, increase key length, allow general use of known cryptographic binary extensions, remove unused BC dependencies Signed-off-by: Igonin * remove duplicated test permission Signed-off-by: Igonin --------- Signed-off-by: Igonin Co-authored-by: Igonin --- CHANGELOG-3.0.md | 1 + buildSrc/build.gradle | 4 - .../precommit/ForbiddenPatternsTask.java | 5 + .../gradle/testclusters/OpenSearchNode.java | 11 +- client/rest/build.gradle | 4 - distribution/tools/plugin-cli/build.gradle | 4 +- libs/ssl-config/build.gradle | 11 +- .../licenses/bcpkix-jdk18on-1.78.jar.sha1 | 0 .../licenses/bcprov-jdk18on-1.78.jar.sha1 | 0 .../licenses/bcutil-jdk18on-1.78.jar.sha1 | 1 + .../licenses/bouncycastle-LICENSE.txt | 14 + .../licenses/bouncycastle-NOTICE.txt | 1 + .../opensearch/common/ssl/PemKeyConfig.java | 4 +- .../org/opensearch/common/ssl/PemUtils.java | 658 +++--------------- .../common/ssl/SslConfiguration.java | 8 +- .../common/ssl/SslConfigurationLoader.java | 9 +- .../common/ssl/PemKeyConfigTests.java | 15 +- .../common/ssl/PemTrustConfigTests.java | 11 +- .../opensearch/common/ssl/PemUtilsTests.java | 95 ++- .../ssl/SslConfigurationLoaderTests.java | 5 +- .../common/ssl/SslDiagnosticsTests.java | 24 +- .../common/ssl/StoreKeyConfigTests.java | 37 +- .../common/ssl/StoreTrustConfigTests.java | 31 +- .../src/test/resources/certs/README.md | 155 +++++ .../src/test/resources/certs/README.txt | 85 --- .../test/resources/certs/cert-all/certs.p12 | Bin 4757 -> 4895 bytes .../test/resources/certs/cert-all/empty.jks | Bin 0 -> 32 bytes .../resources/certs/cert1/cert1-pkcs1.crt | 19 + .../resources/certs/cert1/cert1-pkcs1.key | 27 + .../resources/certs/cert1/cert1-pkcs8.key | 28 - .../src/test/resources/certs/cert1/cert1.crt | 34 +- .../src/test/resources/certs/cert1/cert1.key | 55 +- .../src/test/resources/certs/cert1/cert1.p12 | Bin 2456 -> 2606 bytes .../resources/certs/cert2/cert2-pkcs1.crt | 19 + .../resources/certs/cert2/cert2-pkcs1.key | 30 + .../resources/certs/cert2/cert2-pkcs8.key | 29 - .../src/test/resources/certs/cert2/cert2.crt | 34 +- .../src/test/resources/certs/cert2/cert2.key | 60 +- .../src/test/resources/certs/cert2/cert2.p12 | Bin 2456 -> 2606 bytes .../test/resources/certs/pem-utils/README.md | 108 ++- .../pem-utils/dsa_key_openssl_encrypted.pem | 30 +- .../certs/pem-utils/dsa_key_openssl_plain.pem | 28 +- .../dsa_key_openssl_plain_with_params.pem | 28 +- .../pem-utils/dsa_key_pkcs8_encrypted.pem | 18 + .../certs/pem-utils/dsa_key_pkcs8_plain.pem | 20 +- .../pem-utils/ec_key_openssl_encrypted.pem | 7 +- .../certs/pem-utils/ec_key_openssl_plain.pem | 5 +- .../ec_key_openssl_plain_with_params.pem | 7 +- .../pem-utils/ec_key_pkcs8_encrypted.pem | 6 + .../certs/pem-utils/key_DSA_enc_pbkdf2.pem | 18 + .../certs/pem-utils/key_EC_enc_pbkdf2.pem | 6 + .../certs/pem-utils/key_PKCS8_enc_pbkdf2.pem | 30 + .../resources/certs/pem-utils/testnode.jks | Bin 9360 -> 15253 bytes modules/reindex/build.gradle | 5 - .../reindex/ReindexRestClientSslTests.java | 10 +- .../org/opensearch/index/reindex/README.md | 48 ++ .../org/opensearch/index/reindex/README.txt | 16 - .../org/opensearch/index/reindex/ca.key | 30 + .../org/opensearch/index/reindex/ca.pem | 43 +- .../index/reindex/client/client.crt | 35 +- .../index/reindex/client/client.key | 60 +- .../opensearch/index/reindex/http/http.crt | 38 +- .../opensearch/index/reindex/http/http.key | 60 +- .../SecureNetty4HttpServerTransportTests.java | 2 +- .../ssl/SimpleSecureNetty4TransportTests.java | 2 +- .../src/test/resources/README.md | 26 + .../src/test/resources/README.txt | 17 - .../src/test/resources/netty4-secure.jks | Bin 2790 -> 2790 bytes .../src/test/resources/netty4-secure.p12 | Bin 0 -> 2790 bytes .../AzureDiscoveryClusterFormationTests.java | 4 +- plugins/ingest-attachment/build.gradle | 3 - .../licenses/bcmail-jdk18on-1.78.jar.sha1 | 1 - .../licenses/bcmail-jdk18on-LICENSE.txt | 23 - .../licenses/bcmail-jdk18on-NOTICE.txt | 0 .../licenses/bcpkix-jdk18on-LICENSE.txt | 23 - .../licenses/bcpkix-jdk18on-NOTICE.txt | 0 .../licenses/bcprov-jdk18on-LICENSE.txt | 22 - .../licenses/bcprov-jdk18on-NOTICE.txt | 0 plugins/repository-gcs/build.gradle | 2 +- .../gcs/GoogleCloudStorageServiceTests.java | 2 +- .../repositories/gcs/TestUtils.java | 2 +- ...ReactorNetty4HttpServerTransportTests.java | 16 +- .../src/test/resources/README.txt | 14 - .../src/test/resources/certificate.crt | 22 - .../src/test/resources/certificate.key | 28 - qa/evil-tests/build.gradle | 4 +- .../org/opensearch/bootstrap/test.policy | 21 +- .../resources/provision/kdc.conf.template | 7 +- .../resources/provision/krb5.conf.template | 13 +- test/framework/build.gradle | 3 + .../licenses/bcpkix-jdk18on-1.78.jar.sha1 | 1 + .../licenses/bcprov-jdk18on-1.78.jar.sha1 | 1 + .../licenses/bouncycastle-LICENSE.txt | 14 + .../licenses/bouncycastle-NOTICE.txt | 1 + .../org/opensearch/test/KeyStoreUtils.java | 68 ++ 95 files changed, 1214 insertions(+), 1312 deletions(-) rename {plugins/ingest-attachment => libs/ssl-config}/licenses/bcpkix-jdk18on-1.78.jar.sha1 (100%) rename {plugins/ingest-attachment => libs/ssl-config}/licenses/bcprov-jdk18on-1.78.jar.sha1 (100%) create mode 100644 libs/ssl-config/licenses/bcutil-jdk18on-1.78.jar.sha1 create mode 100644 libs/ssl-config/licenses/bouncycastle-LICENSE.txt create mode 100644 libs/ssl-config/licenses/bouncycastle-NOTICE.txt create mode 100644 libs/ssl-config/src/test/resources/certs/README.md delete mode 100644 libs/ssl-config/src/test/resources/certs/README.txt create mode 100644 libs/ssl-config/src/test/resources/certs/cert-all/empty.jks create mode 100644 libs/ssl-config/src/test/resources/certs/cert1/cert1-pkcs1.crt create mode 100644 libs/ssl-config/src/test/resources/certs/cert1/cert1-pkcs1.key delete mode 100644 libs/ssl-config/src/test/resources/certs/cert1/cert1-pkcs8.key create mode 100644 libs/ssl-config/src/test/resources/certs/cert2/cert2-pkcs1.crt create mode 100644 libs/ssl-config/src/test/resources/certs/cert2/cert2-pkcs1.key delete mode 100644 libs/ssl-config/src/test/resources/certs/cert2/cert2-pkcs8.key create mode 100644 libs/ssl-config/src/test/resources/certs/pem-utils/dsa_key_pkcs8_encrypted.pem create mode 100644 libs/ssl-config/src/test/resources/certs/pem-utils/ec_key_pkcs8_encrypted.pem create mode 100644 libs/ssl-config/src/test/resources/certs/pem-utils/key_DSA_enc_pbkdf2.pem create mode 100644 libs/ssl-config/src/test/resources/certs/pem-utils/key_EC_enc_pbkdf2.pem create mode 100644 libs/ssl-config/src/test/resources/certs/pem-utils/key_PKCS8_enc_pbkdf2.pem create mode 100644 modules/reindex/src/test/resources/org/opensearch/index/reindex/README.md delete mode 100644 modules/reindex/src/test/resources/org/opensearch/index/reindex/README.txt create mode 100644 modules/reindex/src/test/resources/org/opensearch/index/reindex/ca.key create mode 100644 modules/transport-netty4/src/test/resources/README.md delete mode 100644 modules/transport-netty4/src/test/resources/README.txt create mode 100644 modules/transport-netty4/src/test/resources/netty4-secure.p12 delete mode 100644 plugins/ingest-attachment/licenses/bcmail-jdk18on-1.78.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/bcmail-jdk18on-LICENSE.txt delete mode 100644 plugins/ingest-attachment/licenses/bcmail-jdk18on-NOTICE.txt delete mode 100644 plugins/ingest-attachment/licenses/bcpkix-jdk18on-LICENSE.txt delete mode 100644 plugins/ingest-attachment/licenses/bcpkix-jdk18on-NOTICE.txt delete mode 100644 plugins/ingest-attachment/licenses/bcprov-jdk18on-LICENSE.txt delete mode 100644 plugins/ingest-attachment/licenses/bcprov-jdk18on-NOTICE.txt delete mode 100644 plugins/transport-reactor-netty4/src/test/resources/README.txt delete mode 100644 plugins/transport-reactor-netty4/src/test/resources/certificate.crt delete mode 100644 plugins/transport-reactor-netty4/src/test/resources/certificate.key create mode 100644 test/framework/licenses/bcpkix-jdk18on-1.78.jar.sha1 create mode 100644 test/framework/licenses/bcprov-jdk18on-1.78.jar.sha1 create mode 100644 test/framework/licenses/bouncycastle-LICENSE.txt create mode 100644 test/framework/licenses/bouncycastle-NOTICE.txt create mode 100644 test/framework/src/main/java/org/opensearch/test/KeyStoreUtils.java diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index e4ae38e8da2ae..4c366d0c7714f 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -44,6 +44,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Refactor the `:server` module `org.opensearch.client` to `org.opensearch.transport.client` to eliminate top level split packages for JPMS support ([#17272](https://github.com/opensearch-project/OpenSearch/pull/17272)) - Use Lucene `BM25Similarity` as default since the `LegacyBM25Similarity` is marked as deprecated ([#17306](https://github.com/opensearch-project/OpenSearch/pull/17306)) - Wildcard field index only 3gram of the input data [#17349](https://github.com/opensearch-project/OpenSearch/pull/17349) +- Use BC libraries to parse PEM files, increase key length, allow general use of known cryptographic binary extensions, remove unused BC dependencies ([#3420](https://github.com/opensearch-project/OpenSearch/pull/14912)) ### Deprecated diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 6e30bb0199086..65986f2361c9d 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -229,12 +229,8 @@ if (project != rootProject) { forbiddenPatterns { exclude '**/*.wav' - exclude '**/*.p12' - exclude '**/*.jks' - exclude '**/*.crt' // the file that actually defines nocommit exclude '**/ForbiddenPatternsTask.java' - exclude '**/*.bcfks' } testingConventions { diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ForbiddenPatternsTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ForbiddenPatternsTask.java index 1790b32fb2f36..fbf96483443ee 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ForbiddenPatternsTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ForbiddenPatternsTask.java @@ -83,8 +83,13 @@ public class ForbiddenPatternsTask extends DefaultTask { .exclude("**/*.ico") .exclude("**/*.jar") .exclude("**/*.zip") + .exclude("**/*.p12") .exclude("**/*.jks") .exclude("**/*.crt") + .exclude("**/*.der") + .exclude("**/*.pem") + .exclude("**/*.key") + .exclude("**/*.bcfks") .exclude("**/*.keystore") .exclude("**/*.png"); diff --git a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchNode.java b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchNode.java index aaa2daef2a158..c7af3d0a155f7 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchNode.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchNode.java @@ -548,7 +548,7 @@ public synchronized void start() { logToProcessStdout("Creating opensearch keystore with password set to [" + keystorePassword + "]"); if (keystorePassword.length() > 0) { - runOpenSearchBinScriptWithInput(keystorePassword + "\n" + keystorePassword, "opensearch-keystore", "create", "-p"); + runOpenSearchBinScriptWithInput(keystorePassword + "\n" + keystorePassword + "\n", "opensearch-keystore", "create", "-p"); } else { runOpenSearchBinScript("opensearch-keystore", "-v", "create"); } @@ -556,7 +556,7 @@ public synchronized void start() { if (keystoreSettings.isEmpty() == false || keystoreFiles.isEmpty() == false) { logToProcessStdout("Adding " + keystoreSettings.size() + " keystore settings and " + keystoreFiles.size() + " keystore files"); - keystoreSettings.forEach((key, value) -> runKeystoreCommandWithPassword(keystorePassword, value.toString(), "add", "-x", key)); + keystoreSettings.forEach((key, value) -> runKeystoreCommandWithPassword(keystorePassword, value.toString(), "add", key)); for (Map.Entry entry : keystoreFiles.entrySet()) { File file = entry.getValue(); @@ -738,7 +738,12 @@ private void runOpenSearchBinScriptWithInput(String input, String tool, CharSequ } private void runKeystoreCommandWithPassword(String keystorePassword, String input, CharSequence... args) { - final String actualInput = keystorePassword.length() > 0 ? keystorePassword + "\n" + input : input; + final String actualInput; + if (keystorePassword.length() > 0) { + actualInput = keystorePassword + "\n" + input + "\n" + input; + } else { + actualInput = input + "\n" + input; + } runOpenSearchBinScriptWithInput(actualInput, "opensearch-keystore", args); } diff --git a/client/rest/build.gradle b/client/rest/build.gradle index 93faf0024b51e..29d76e6910ee3 100644 --- a/client/rest/build.gradle +++ b/client/rest/build.gradle @@ -75,10 +75,6 @@ tasks.withType(CheckForbiddenApis).configureEach { replaceSignatureFiles('jdk-signatures', 'http-signatures') } -forbiddenPatterns { - exclude '**/*.der' -} - tasks.named('forbiddenApisTest').configure { //we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage bundledSignatures -= 'jdk-non-portable' diff --git a/distribution/tools/plugin-cli/build.gradle b/distribution/tools/plugin-cli/build.gradle index 784cdc457a1a9..ecb86ecb1eb0b 100644 --- a/distribution/tools/plugin-cli/build.gradle +++ b/distribution/tools/plugin-cli/build.gradle @@ -39,7 +39,9 @@ dependencies { compileOnly project(":libs:opensearch-cli") api "org.bouncycastle:bcpg-fips:2.0.9" api "org.bouncycastle:bc-fips:2.0.0" - testImplementation project(":test:framework") + testImplementation(project(":test:framework")) { + exclude group: 'org.bouncycastle' + } testImplementation 'com.google.jimfs:jimfs:1.3.0' testRuntimeOnly("com.google.guava:guava:${versions.guava}") { transitive = false diff --git a/libs/ssl-config/build.gradle b/libs/ssl-config/build.gradle index 3226ec12ff6f7..da0829cb533da 100644 --- a/libs/ssl-config/build.gradle +++ b/libs/ssl-config/build.gradle @@ -34,6 +34,9 @@ apply plugin: "opensearch.publish" dependencies { api project(':libs:opensearch-common') + api "org.bouncycastle:bcprov-jdk18on:${versions.bouncycastle}" + api "org.bouncycastle:bcpkix-jdk18on:${versions.bouncycastle}" + runtimeOnly "org.bouncycastle:bcutil-jdk18on:${versions.bouncycastle}" testImplementation(project(":test:framework")) { exclude group: 'org.opensearch', module: 'opensearch-ssl-config' @@ -44,16 +47,12 @@ dependencies { testImplementation "org.hamcrest:hamcrest:${versions.hamcrest}" } - tasks.named('forbiddenApisMain').configure { replaceSignatureFiles 'jdk-signatures' } -forbiddenPatterns { - exclude '**/*.key' - exclude '**/*.pem' - exclude '**/*.p12' - exclude '**/*.jks' +tasks.named("dependencyLicenses").configure { + mapping from: /bc.*/, to: 'bouncycastle' } tasks.test { diff --git a/plugins/ingest-attachment/licenses/bcpkix-jdk18on-1.78.jar.sha1 b/libs/ssl-config/licenses/bcpkix-jdk18on-1.78.jar.sha1 similarity index 100% rename from plugins/ingest-attachment/licenses/bcpkix-jdk18on-1.78.jar.sha1 rename to libs/ssl-config/licenses/bcpkix-jdk18on-1.78.jar.sha1 diff --git a/plugins/ingest-attachment/licenses/bcprov-jdk18on-1.78.jar.sha1 b/libs/ssl-config/licenses/bcprov-jdk18on-1.78.jar.sha1 similarity index 100% rename from plugins/ingest-attachment/licenses/bcprov-jdk18on-1.78.jar.sha1 rename to libs/ssl-config/licenses/bcprov-jdk18on-1.78.jar.sha1 diff --git a/libs/ssl-config/licenses/bcutil-jdk18on-1.78.jar.sha1 b/libs/ssl-config/licenses/bcutil-jdk18on-1.78.jar.sha1 new file mode 100644 index 0000000000000..9c88eef3ace17 --- /dev/null +++ b/libs/ssl-config/licenses/bcutil-jdk18on-1.78.jar.sha1 @@ -0,0 +1 @@ +81c1f5e06f206be5dad137d563609dbe66c81d31 \ No newline at end of file diff --git a/libs/ssl-config/licenses/bouncycastle-LICENSE.txt b/libs/ssl-config/licenses/bouncycastle-LICENSE.txt new file mode 100644 index 0000000000000..5c7c14696849d --- /dev/null +++ b/libs/ssl-config/licenses/bouncycastle-LICENSE.txt @@ -0,0 +1,14 @@ +Copyright (c) 2000 - 2023 The Legion of the Bouncy Castle Inc. (https://www.bouncycastle.org) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the +Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/libs/ssl-config/licenses/bouncycastle-NOTICE.txt b/libs/ssl-config/licenses/bouncycastle-NOTICE.txt new file mode 100644 index 0000000000000..8b137891791fe --- /dev/null +++ b/libs/ssl-config/licenses/bouncycastle-NOTICE.txt @@ -0,0 +1 @@ + diff --git a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/PemKeyConfig.java b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/PemKeyConfig.java index bfc29a5801b11..d957ffa457149 100644 --- a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/PemKeyConfig.java +++ b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/PemKeyConfig.java @@ -32,6 +32,8 @@ package org.opensearch.common.ssl; +import org.bouncycastle.pkcs.PKCSException; + import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.X509ExtendedKeyManager; @@ -91,7 +93,7 @@ private PrivateKey getPrivateKey() { throw new SslConfigException("the configured ssl private key file [" + key.toAbsolutePath() + "] does not exist", e); } catch (IOException e) { throw new SslConfigException("the configured ssl private key file [" + key.toAbsolutePath() + "] cannot be read", e); - } catch (GeneralSecurityException e) { + } catch (PKCSException e) { throw new SslConfigException("cannot load ssl private key file [" + key.toAbsolutePath() + "]", e); } } diff --git a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/PemUtils.java b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/PemUtils.java index 8a3730ee554f9..441e17b808feb 100644 --- a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/PemUtils.java +++ b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/PemUtils.java @@ -32,628 +32,136 @@ package org.opensearch.common.ssl; -import org.opensearch.common.CharArrays; +import org.bouncycastle.asn1.ASN1ObjectIdentifier; +import org.bouncycastle.asn1.pkcs.PrivateKeyInfo; +import org.bouncycastle.jce.provider.BouncyCastleProvider; +import org.bouncycastle.openssl.PEMEncryptedKeyPair; +import org.bouncycastle.openssl.PEMKeyPair; +import org.bouncycastle.openssl.PEMParser; +import org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter; +import org.bouncycastle.openssl.jcajce.JcePEMDecryptorProviderBuilder; +import org.bouncycastle.pkcs.PKCS8EncryptedPrivateKeyInfo; +import org.bouncycastle.pkcs.PKCSException; +import org.bouncycastle.pkcs.jcajce.JcePKCSPBEInputDecryptorProviderBuilder; -import javax.crypto.Cipher; -import javax.crypto.EncryptedPrivateKeyInfo; -import javax.crypto.SecretKey; -import javax.crypto.SecretKeyFactory; -import javax.crypto.spec.IvParameterSpec; -import javax.crypto.spec.PBEKeySpec; -import javax.crypto.spec.SecretKeySpec; - -import java.io.BufferedReader; -import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; -import java.math.BigInteger; import java.nio.charset.StandardCharsets; import java.nio.file.Files; -import java.nio.file.NoSuchFileException; import java.nio.file.Path; -import java.security.GeneralSecurityException; -import java.security.KeyFactory; -import java.security.KeyPairGenerator; -import java.security.MessageDigest; import java.security.PrivateKey; +import java.security.Provider; import java.security.cert.Certificate; import java.security.cert.CertificateException; import java.security.cert.CertificateFactory; -import java.security.interfaces.ECKey; -import java.security.spec.AlgorithmParameterSpec; -import java.security.spec.DSAPrivateKeySpec; -import java.security.spec.ECGenParameterSpec; -import java.security.spec.ECParameterSpec; -import java.security.spec.ECPrivateKeySpec; -import java.security.spec.PKCS8EncodedKeySpec; -import java.security.spec.RSAPrivateCrtKeySpec; import java.util.ArrayList; -import java.util.Arrays; -import java.util.Base64; import java.util.Collection; -import java.util.HashMap; import java.util.List; -import java.util.Map; +import java.util.Locale; import java.util.function.Supplier; final class PemUtils { - private static final String PKCS1_HEADER = "-----BEGIN RSA PRIVATE KEY-----"; - private static final String PKCS1_FOOTER = "-----END RSA PRIVATE KEY-----"; - private static final String OPENSSL_DSA_HEADER = "-----BEGIN DSA PRIVATE KEY-----"; - private static final String OPENSSL_DSA_FOOTER = "-----END DSA PRIVATE KEY-----"; - private static final String OPENSSL_DSA_PARAMS_HEADER = "-----BEGIN DSA PARAMETERS-----"; - private static final String OPENSSL_DSA_PARAMS_FOOTER = "-----END DSA PARAMETERS-----"; - private static final String PKCS8_HEADER = "-----BEGIN PRIVATE KEY-----"; - private static final String PKCS8_FOOTER = "-----END PRIVATE KEY-----"; - private static final String PKCS8_ENCRYPTED_HEADER = "-----BEGIN ENCRYPTED PRIVATE KEY-----"; - private static final String PKCS8_ENCRYPTED_FOOTER = "-----END ENCRYPTED PRIVATE KEY-----"; - private static final String OPENSSL_EC_HEADER = "-----BEGIN EC PRIVATE KEY-----"; - private static final String OPENSSL_EC_FOOTER = "-----END EC PRIVATE KEY-----"; - private static final String OPENSSL_EC_PARAMS_HEADER = "-----BEGIN EC PARAMETERS-----"; - private static final String OPENSSL_EC_PARAMS_FOOTER = "-----END EC PARAMETERS-----"; - private static final String HEADER = "-----BEGIN"; + private static final Provider BC = new BouncyCastleProvider(); - private PemUtils() { + PemUtils() { throw new IllegalStateException("Utility class should not be instantiated"); } /** * Creates a {@link PrivateKey} from the contents of a file. Supports PKCS#1, PKCS#8 - * encoded formats of encrypted and plaintext RSA, DSA and EC(secp256r1) keys + * encoded formats of encrypted and plaintext RSA, DSA and EC(secp256r1) keys. * * @param keyPath the path for the key file - * @param passwordSupplier A password supplier for the potentially encrypted (password protected) key + * @param passwordSupplier A password supplier for the potentially encrypted (password protected) key. Unencrypted keys ignore this value. * @return a private key from the contents of the file */ - public static PrivateKey readPrivateKey(Path keyPath, Supplier passwordSupplier) throws IOException, GeneralSecurityException { - try (BufferedReader bReader = Files.newBufferedReader(keyPath, StandardCharsets.UTF_8)) { - String line = bReader.readLine(); - while (null != line && line.startsWith(HEADER) == false) { - line = bReader.readLine(); - } - if (null == line) { - throw new SslConfigException("Error parsing Private Key [" + keyPath.toAbsolutePath() + "], file is empty"); - } - if (PKCS8_ENCRYPTED_HEADER.equals(line.trim())) { - char[] password = passwordSupplier.get(); - if (password == null) { - throw new SslConfigException("cannot read encrypted key [" + keyPath.toAbsolutePath() + "] without a password"); - } - return parsePKCS8Encrypted(bReader, password); - } else if (PKCS8_HEADER.equals(line.trim())) { - return parsePKCS8(bReader); - } else if (PKCS1_HEADER.equals(line.trim())) { - return parsePKCS1Rsa(bReader, passwordSupplier); - } else if (OPENSSL_DSA_HEADER.equals(line.trim())) { - return parseOpenSslDsa(bReader, passwordSupplier); - } else if (OPENSSL_DSA_PARAMS_HEADER.equals(line.trim())) { - return parseOpenSslDsa(removeDsaHeaders(bReader), passwordSupplier); - } else if (OPENSSL_EC_HEADER.equals(line.trim())) { - return parseOpenSslEC(bReader, passwordSupplier); - } else if (OPENSSL_EC_PARAMS_HEADER.equals(line.trim())) { - return parseOpenSslEC(removeECHeaders(bReader), passwordSupplier); - } else { - throw new SslConfigException( - "error parsing Private Key [" + keyPath.toAbsolutePath() + "], file does not contain a supported key format" - ); - } - } catch (FileNotFoundException | NoSuchFileException e) { - throw new SslConfigException("private key file [" + keyPath.toAbsolutePath() + "] does not exist", e); - } catch (IOException | GeneralSecurityException e) { - throw new SslConfigException("private key file [" + keyPath.toAbsolutePath() + "] cannot be parsed", e); - } - } - - /** - * Removes the EC Headers that OpenSSL adds to EC private keys as the information in them - * is redundant - * - * @throws IOException if the EC Parameter footer is missing - */ - private static BufferedReader removeECHeaders(BufferedReader bReader) throws IOException { - String line = bReader.readLine(); - while (line != null) { - if (OPENSSL_EC_PARAMS_FOOTER.equals(line.trim())) { - break; - } - line = bReader.readLine(); - } - if (null == line || OPENSSL_EC_PARAMS_FOOTER.equals(line.trim()) == false) { - throw new IOException("Malformed PEM file, EC Parameters footer is missing"); - } - // Verify that the key starts with the correct header before passing it to parseOpenSslEC - if (OPENSSL_EC_HEADER.equals(bReader.readLine()) == false) { - throw new IOException("Malformed PEM file, EC Key header is missing"); - } - return bReader; - } - - /** - * Removes the DSA Params Headers that OpenSSL adds to DSA private keys as the information in them - * is redundant - * - * @throws IOException if the EC Parameter footer is missing - */ - private static BufferedReader removeDsaHeaders(BufferedReader bReader) throws IOException { - String line = bReader.readLine(); - while (line != null) { - if (OPENSSL_DSA_PARAMS_FOOTER.equals(line.trim())) { - break; - } - line = bReader.readLine(); - } - if (null == line || OPENSSL_DSA_PARAMS_FOOTER.equals(line.trim()) == false) { - throw new IOException("Malformed PEM file, DSA Parameters footer is missing"); - } - // Verify that the key starts with the correct header before passing it to parseOpenSslDsa - if (OPENSSL_DSA_HEADER.equals(bReader.readLine()) == false) { - throw new IOException("Malformed PEM file, DSA Key header is missing"); - } - return bReader; - } - - /** - * Creates a {@link PrivateKey} from the contents of {@code bReader} that contains an plaintext private key encoded in - * PKCS#8 - * - * @param bReader the {@link BufferedReader} containing the key file contents - * @return {@link PrivateKey} - * @throws IOException if the file can't be read - * @throws GeneralSecurityException if the private key can't be generated from the {@link PKCS8EncodedKeySpec} - */ - private static PrivateKey parsePKCS8(BufferedReader bReader) throws IOException, GeneralSecurityException { - StringBuilder sb = new StringBuilder(); - String line = bReader.readLine(); - while (line != null) { - if (PKCS8_FOOTER.equals(line.trim())) { - break; - } - sb.append(line.trim()); - line = bReader.readLine(); - } - if (null == line || PKCS8_FOOTER.equals(line.trim()) == false) { - throw new IOException("Malformed PEM file, PEM footer is invalid or missing"); - } - byte[] keyBytes = Base64.getDecoder().decode(sb.toString()); - String keyAlgo = getKeyAlgorithmIdentifier(keyBytes); - KeyFactory keyFactory = KeyFactory.getInstance(keyAlgo); - return keyFactory.generatePrivate(new PKCS8EncodedKeySpec(keyBytes)); + public static PrivateKey readPrivateKey(Path keyPath, Supplier passwordSupplier) throws IOException, PKCSException { + PrivateKeyInfo pki = loadPrivateKeyFromFile(keyPath, passwordSupplier); + JcaPEMKeyConverter converter = new JcaPEMKeyConverter(); + return converter.getPrivateKey(pki); } - /** - * Creates a {@link PrivateKey} from the contents of {@code bReader} that contains an EC private key encoded in - * OpenSSL traditional format. - * - * @param bReader the {@link BufferedReader} containing the key file contents - * @param passwordSupplier A password supplier for the potentially encrypted (password protected) key - * @return {@link PrivateKey} - * @throws IOException if the file can't be read - * @throws GeneralSecurityException if the private key can't be generated from the {@link ECPrivateKeySpec} - */ - private static PrivateKey parseOpenSslEC(BufferedReader bReader, Supplier passwordSupplier) throws IOException, - GeneralSecurityException { - StringBuilder sb = new StringBuilder(); - String line = bReader.readLine(); - Map pemHeaders = new HashMap<>(); - while (line != null) { - if (OPENSSL_EC_FOOTER.equals(line.trim())) { - break; - } - // Parse PEM headers according to https://www.ietf.org/rfc/rfc1421.txt - if (line.contains(":")) { - String[] header = line.split(":"); - pemHeaders.put(header[0].trim(), header[1].trim()); - } else { - sb.append(line.trim()); - } - line = bReader.readLine(); - } - if (null == line || OPENSSL_EC_FOOTER.equals(line.trim()) == false) { - throw new IOException("Malformed PEM file, PEM footer is invalid or missing"); - } - byte[] keyBytes = possiblyDecryptPKCS1Key(pemHeaders, sb.toString(), passwordSupplier); - KeyFactory keyFactory = KeyFactory.getInstance("EC"); - ECPrivateKeySpec ecSpec = parseEcDer(keyBytes); - return keyFactory.generatePrivate(ecSpec); - } - - /** - * Creates a {@link PrivateKey} from the contents of {@code bReader} that contains an RSA private key encoded in - * OpenSSL traditional format. - * - * @param bReader the {@link BufferedReader} containing the key file contents - * @param passwordSupplier A password supplier for the potentially encrypted (password protected) key - * @return {@link PrivateKey} - * @throws IOException if the file can't be read - * @throws GeneralSecurityException if the private key can't be generated from the {@link RSAPrivateCrtKeySpec} - */ - private static PrivateKey parsePKCS1Rsa(BufferedReader bReader, Supplier passwordSupplier) throws IOException, - GeneralSecurityException { - StringBuilder sb = new StringBuilder(); - String line = bReader.readLine(); - Map pemHeaders = new HashMap<>(); - - while (line != null) { - if (PKCS1_FOOTER.equals(line.trim())) { - // Unencrypted - break; - } - // Parse PEM headers according to https://www.ietf.org/rfc/rfc1421.txt - if (line.contains(":")) { - String[] header = line.split(":"); - pemHeaders.put(header[0].trim(), header[1].trim()); - } else { - sb.append(line.trim()); + static List readCertificates(Collection certPaths) throws CertificateException, IOException { + CertificateFactory certFactory = CertificateFactory.getInstance("X.509"); + List certificates = new ArrayList<>(certPaths.size()); + for (Path path : certPaths) { + try (InputStream input = Files.newInputStream(path)) { + final Collection parsed = certFactory.generateCertificates(input); + if (parsed.isEmpty()) { + throw new SslConfigException("Failed to parse any certificate from [" + path.toAbsolutePath() + "]"); + } + certificates.addAll(parsed); } - line = bReader.readLine(); } - if (null == line || PKCS1_FOOTER.equals(line.trim()) == false) { - throw new IOException("Malformed PEM file, PEM footer is invalid or missing"); - } - byte[] keyBytes = possiblyDecryptPKCS1Key(pemHeaders, sb.toString(), passwordSupplier); - RSAPrivateCrtKeySpec spec = parseRsaDer(keyBytes); - KeyFactory keyFactory = KeyFactory.getInstance("RSA"); - return keyFactory.generatePrivate(spec); + return certificates; } /** - * Creates a {@link PrivateKey} from the contents of {@code bReader} that contains an DSA private key encoded in - * OpenSSL traditional format. + * Creates a {@link PrivateKey} from the private key, with or without encryption. + * When enforcing the approved-only mode in Java security settings, some functionalities might be restricted due to the limited + * set of allowed algorithms. One such restriction includes Password Based Key Derivation Functions (PBKDF) like those used by OpenSSL + * and PKCS#12 formats. Since these formats rely on PBKDF algorithms, they cannot operate correctly within the approved-only mode. + * Consequently, attempting to utilize them could result in a {@link java.security.NoSuchAlgorithmException}. * - * @param bReader the {@link BufferedReader} containing the key file contents - * @param passwordSupplier A password supplier for the potentially encrypted (password protected) key + * @param passwordSupplier The password supplier for the encrypted (password protected) key * @return {@link PrivateKey} - * @throws IOException if the file can't be read - * @throws GeneralSecurityException if the private key can't be generated from the {@link DSAPrivateKeySpec} - */ - private static PrivateKey parseOpenSslDsa(BufferedReader bReader, Supplier passwordSupplier) throws IOException, - GeneralSecurityException { - StringBuilder sb = new StringBuilder(); - String line = bReader.readLine(); - Map pemHeaders = new HashMap<>(); - - while (line != null) { - if (OPENSSL_DSA_FOOTER.equals(line.trim())) { - // Unencrypted - break; - } - // Parse PEM headers according to https://www.ietf.org/rfc/rfc1421.txt - if (line.contains(":")) { - String[] header = line.split(":"); - pemHeaders.put(header[0].trim(), header[1].trim()); + * @throws IOException If the file can't be read + */ + private static PrivateKeyInfo loadPrivateKeyFromFile(Path keyPath, Supplier passwordSupplier) throws IOException, + PKCSException { + + try (PEMParser pemParser = new PEMParser(Files.newBufferedReader(keyPath, StandardCharsets.UTF_8))) { + Object object = readObject(keyPath, pemParser); + + if (object instanceof PKCS8EncryptedPrivateKeyInfo) { // encrypted private key in pkcs8-format + var privateKeyInfo = (PKCS8EncryptedPrivateKeyInfo) object; + var inputDecryptorProvider = new JcePKCSPBEInputDecryptorProviderBuilder().setProvider(BC).build(passwordSupplier.get()); + return privateKeyInfo.decryptPrivateKeyInfo(inputDecryptorProvider); + } else if (object instanceof PEMEncryptedKeyPair) { // encrypted private key + var encryptedKeyPair = (PEMEncryptedKeyPair) object; + var decryptorProvider = new JcePEMDecryptorProviderBuilder().setProvider(BC).build(passwordSupplier.get()); + var keyPair = encryptedKeyPair.decryptKeyPair(decryptorProvider); + return keyPair.getPrivateKeyInfo(); + } else if (object instanceof PEMKeyPair) { // unencrypted private key + return ((PEMKeyPair) object).getPrivateKeyInfo(); + } else if (object instanceof PrivateKeyInfo) { // unencrypted private key in pkcs8-format + return (PrivateKeyInfo) object; } else { - sb.append(line.trim()); - } - line = bReader.readLine(); - } - if (null == line || OPENSSL_DSA_FOOTER.equals(line.trim()) == false) { - throw new IOException("Malformed PEM file, PEM footer is invalid or missing"); - } - byte[] keyBytes = possiblyDecryptPKCS1Key(pemHeaders, sb.toString(), passwordSupplier); - DSAPrivateKeySpec spec = parseDsaDer(keyBytes); - KeyFactory keyFactory = KeyFactory.getInstance("DSA"); - return keyFactory.generatePrivate(spec); - } - - /** - * Creates a {@link PrivateKey} from the contents of {@code bReader} that contains an encrypted private key encoded in - * PKCS#8 - * - * @param bReader the {@link BufferedReader} containing the key file contents - * @param keyPassword The password for the encrypted (password protected) key - * @return {@link PrivateKey} - * @throws IOException if the file can't be read - * @throws GeneralSecurityException if the private key can't be generated from the {@link PKCS8EncodedKeySpec} - */ - private static PrivateKey parsePKCS8Encrypted(BufferedReader bReader, char[] keyPassword) throws IOException, GeneralSecurityException { - StringBuilder sb = new StringBuilder(); - String line = bReader.readLine(); - while (line != null) { - if (PKCS8_ENCRYPTED_FOOTER.equals(line.trim())) { - break; - } - sb.append(line.trim()); - line = bReader.readLine(); - } - if (null == line || PKCS8_ENCRYPTED_FOOTER.equals(line.trim()) == false) { - throw new IOException("Malformed PEM file, PEM footer is invalid or missing"); - } - byte[] keyBytes = Base64.getDecoder().decode(sb.toString()); - - EncryptedPrivateKeyInfo encryptedPrivateKeyInfo = new EncryptedPrivateKeyInfo(keyBytes); - SecretKeyFactory secretKeyFactory = SecretKeyFactory.getInstance(encryptedPrivateKeyInfo.getAlgName()); - SecretKey secretKey = secretKeyFactory.generateSecret(new PBEKeySpec(keyPassword)); - Cipher cipher = Cipher.getInstance(encryptedPrivateKeyInfo.getAlgName()); - cipher.init(Cipher.DECRYPT_MODE, secretKey, encryptedPrivateKeyInfo.getAlgParameters()); - PKCS8EncodedKeySpec keySpec = encryptedPrivateKeyInfo.getKeySpec(cipher); - String keyAlgo = getKeyAlgorithmIdentifier(keySpec.getEncoded()); - KeyFactory keyFactory = KeyFactory.getInstance(keyAlgo); - return keyFactory.generatePrivate(keySpec); - } - - /** - * Decrypts the password protected contents using the algorithm and IV that is specified in the PEM Headers of the file - * - * @param pemHeaders The Proc-Type and DEK-Info PEM headers that have been extracted from the key file - * @param keyContents The key as a base64 encoded String - * @param passwordSupplier A password supplier for the encrypted (password protected) key - * @return the decrypted key bytes - * @throws GeneralSecurityException if the key can't be decrypted - * @throws IOException if the PEM headers are missing or malformed - */ - private static byte[] possiblyDecryptPKCS1Key(Map pemHeaders, String keyContents, Supplier passwordSupplier) - throws GeneralSecurityException, IOException { - byte[] keyBytes = Base64.getDecoder().decode(keyContents); - String procType = pemHeaders.get("Proc-Type"); - if ("4,ENCRYPTED".equals(procType)) { - // We only handle PEM encryption - String encryptionParameters = pemHeaders.get("DEK-Info"); - if (null == encryptionParameters) { - // malformed pem - throw new IOException("Malformed PEM File, DEK-Info header is missing"); - } - char[] password = passwordSupplier.get(); - if (password == null) { - throw new IOException("cannot read encrypted key without a password"); + throw new SslConfigException( + String.format( + Locale.ROOT, + "error parsing private key [%s], invalid encrypted private key class: [%s]", + keyPath.toAbsolutePath(), + object.getClass().getName() + ) + ); } - Cipher cipher = getCipherFromParameters(encryptionParameters, password); - byte[] decryptedKeyBytes = cipher.doFinal(keyBytes); - return decryptedKeyBytes; } - return keyBytes; } /** - * Creates a {@link Cipher} from the contents of the DEK-Info header of a PEM file. RFC 1421 indicates that supported algorithms are - * defined in RFC 1423. RFC 1423 only defines DES-CBS and triple DES (EDE) in CBC mode. AES in CBC mode is also widely used though ( 3 - * different variants of 128, 192, 256 bit keys ) + * Supports PEM files that includes parameters. * - * @param dekHeaderValue The value of the DEK-Info PEM header - * @param password The password with which the key is encrypted - * @return a cipher of the appropriate algorithm and parameters to be used for decryption - * @throws GeneralSecurityException if the algorithm is not available in the used security provider, or if the key is inappropriate - * for the cipher - * @throws IOException if the DEK-Info PEM header is invalid - */ - private static Cipher getCipherFromParameters(String dekHeaderValue, char[] password) throws GeneralSecurityException, IOException { - final String padding = "PKCS5Padding"; - final SecretKey encryptionKey; - final String[] valueTokens = dekHeaderValue.split(","); - if (valueTokens.length != 2) { - throw new IOException("Malformed PEM file, DEK-Info PEM header is invalid"); - } - final String algorithm = valueTokens[0]; - final String ivString = valueTokens[1]; - final byte[] iv; - try { - iv = hexStringToByteArray(ivString); - } catch (IllegalArgumentException e) { - throw new IOException("Malformed PEM file, DEK-Info IV is invalid", e); - } - if ("DES-CBC".equals(algorithm)) { - byte[] key = generateOpenSslKey(password, iv, 8); - encryptionKey = new SecretKeySpec(key, "DES"); - } else if ("DES-EDE3-CBC".equals(algorithm)) { - byte[] key = generateOpenSslKey(password, iv, 24); - encryptionKey = new SecretKeySpec(key, "DESede"); - } else if ("AES-128-CBC".equals(algorithm)) { - byte[] key = generateOpenSslKey(password, iv, 16); - encryptionKey = new SecretKeySpec(key, "AES"); - } else if ("AES-192-CBC".equals(algorithm)) { - byte[] key = generateOpenSslKey(password, iv, 24); - encryptionKey = new SecretKeySpec(key, "AES"); - } else if ("AES-256-CBC".equals(algorithm)) { - byte[] key = generateOpenSslKey(password, iv, 32); - encryptionKey = new SecretKeySpec(key, "AES"); - } else { - throw new GeneralSecurityException("Private Key encrypted with unsupported algorithm [" + algorithm + "]"); - } - String transformation = encryptionKey.getAlgorithm() + "/" + "CBC" + "/" + padding; - Cipher cipher = Cipher.getInstance(transformation); - cipher.init(Cipher.DECRYPT_MODE, encryptionKey, new IvParameterSpec(iv)); - return cipher; - } - - /** - * Performs key stretching in the same manner that OpenSSL does. This is basically a KDF - * that uses n rounds of salted MD5 (as many times as needed to get the necessary number of key bytes) - *

- * https://www.openssl.org/docs/man1.1.0/crypto/PEM_write_bio_PrivateKey_traditional.html - */ - private static byte[] generateOpenSslKey(char[] password, byte[] salt, int keyLength) { - byte[] passwordBytes = CharArrays.toUtf8Bytes(password); - MessageDigest md5 = SslUtil.messageDigest("md5"); - byte[] key = new byte[keyLength]; - int copied = 0; - int remaining; - while (copied < keyLength) { - remaining = keyLength - copied; - md5.update(passwordBytes, 0, passwordBytes.length); - md5.update(salt, 0, 8);// AES IV (salt) is longer but we only need 8 bytes - byte[] tempDigest = md5.digest(); - int bytesToCopy = (remaining > 16) ? 16 : remaining; // MD5 digests are 16 bytes - System.arraycopy(tempDigest, 0, key, copied, bytesToCopy); - copied += bytesToCopy; - if (remaining == 0) { - break; - } - md5.update(tempDigest, 0, 16); // use previous round digest as IV - } - Arrays.fill(passwordBytes, (byte) 0); - return key; - } - - /** - * Converts a hexadecimal string to a byte array - */ - private static byte[] hexStringToByteArray(String hexString) { - int len = hexString.length(); - if (len % 2 == 0) { - byte[] data = new byte[len / 2]; - for (int i = 0; i < len; i += 2) { - final int k = Character.digit(hexString.charAt(i), 16); - final int l = Character.digit(hexString.charAt(i + 1), 16); - if (k == -1 || l == -1) { - throw new IllegalStateException("String [" + hexString + "] is not hexadecimal"); + * @return high-level Object from the content + */ + private static Object readObject(Path keyPath, PEMParser pemParser) throws IOException { + while (pemParser.ready()) { + try { + var object = pemParser.readObject(); + if (object == null) { // ignore unknown objects; + continue; } - data[i / 2] = (byte) ((k << 4) + l); - } - return data; - } else { - throw new IllegalStateException( - "Hexadecimal string [" + hexString + "] has odd length and cannot be converted to a byte array" - ); - } - } - - /** - * Parses a DER encoded EC key to an {@link ECPrivateKeySpec} using a minimal {@link DerParser} - * - * @param keyBytes the private key raw bytes - * @return {@link ECPrivateKeySpec} - * @throws IOException if the DER encoded key can't be parsed - */ - private static ECPrivateKeySpec parseEcDer(byte[] keyBytes) throws IOException, GeneralSecurityException { - DerParser parser = new DerParser(keyBytes); - DerParser.Asn1Object sequence = parser.readAsn1Object(); - parser = sequence.getParser(); - parser.readAsn1Object().getInteger(); // version - String keyHex = parser.readAsn1Object().getString(); - BigInteger privateKeyInt = new BigInteger(keyHex, 16); - DerParser.Asn1Object choice = parser.readAsn1Object(); - parser = choice.getParser(); - String namedCurve = getEcCurveNameFromOid(parser.readAsn1Object().getOid()); - KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance("EC"); - AlgorithmParameterSpec algorithmParameterSpec = new ECGenParameterSpec(namedCurve); - keyPairGenerator.initialize(algorithmParameterSpec); - ECParameterSpec parameterSpec = ((ECKey) keyPairGenerator.generateKeyPair().getPrivate()).getParams(); - return new ECPrivateKeySpec(privateKeyInt, parameterSpec); - } - - /** - * Parses a DER encoded RSA key to a {@link RSAPrivateCrtKeySpec} using a minimal {@link DerParser} - * - * @param keyBytes the private key raw bytes - * @return {@link RSAPrivateCrtKeySpec} - * @throws IOException if the DER encoded key can't be parsed - */ - private static RSAPrivateCrtKeySpec parseRsaDer(byte[] keyBytes) throws IOException { - DerParser parser = new DerParser(keyBytes); - DerParser.Asn1Object sequence = parser.readAsn1Object(); - parser = sequence.getParser(); - parser.readAsn1Object().getInteger(); // (version) We don't need it but must read to get to modulus - BigInteger modulus = parser.readAsn1Object().getInteger(); - BigInteger publicExponent = parser.readAsn1Object().getInteger(); - BigInteger privateExponent = parser.readAsn1Object().getInteger(); - BigInteger prime1 = parser.readAsn1Object().getInteger(); - BigInteger prime2 = parser.readAsn1Object().getInteger(); - BigInteger exponent1 = parser.readAsn1Object().getInteger(); - BigInteger exponent2 = parser.readAsn1Object().getInteger(); - BigInteger coefficient = parser.readAsn1Object().getInteger(); - return new RSAPrivateCrtKeySpec(modulus, publicExponent, privateExponent, prime1, prime2, exponent1, exponent2, coefficient); - } - - /** - * Parses a DER encoded DSA key to a {@link DSAPrivateKeySpec} using a minimal {@link DerParser} - * - * @param keyBytes the private key raw bytes - * @return {@link DSAPrivateKeySpec} - * @throws IOException if the DER encoded key can't be parsed - */ - private static DSAPrivateKeySpec parseDsaDer(byte[] keyBytes) throws IOException { - DerParser parser = new DerParser(keyBytes); - DerParser.Asn1Object sequence = parser.readAsn1Object(); - parser = sequence.getParser(); - parser.readAsn1Object().getInteger(); // (version) We don't need it but must read to get to p - BigInteger p = parser.readAsn1Object().getInteger(); - BigInteger q = parser.readAsn1Object().getInteger(); - BigInteger g = parser.readAsn1Object().getInteger(); - parser.readAsn1Object().getInteger(); // we don't need x - BigInteger x = parser.readAsn1Object().getInteger(); - return new DSAPrivateKeySpec(x, p, q, g); - } - - /** - * Parses a DER encoded private key and reads its algorithm identifier Object OID. - * - * @param keyBytes the private key raw bytes - * @return A string identifier for the key algorithm (RSA, DSA, or EC) - * @throws GeneralSecurityException if the algorithm oid that is parsed from ASN.1 is unknown - * @throws IOException if the DER encoded key can't be parsed - */ - private static String getKeyAlgorithmIdentifier(byte[] keyBytes) throws IOException, GeneralSecurityException { - DerParser parser = new DerParser(keyBytes); - DerParser.Asn1Object sequence = parser.readAsn1Object(); - parser = sequence.getParser(); - parser.readAsn1Object().getInteger(); // version - DerParser.Asn1Object algSequence = parser.readAsn1Object(); - parser = algSequence.getParser(); - String oidString = parser.readAsn1Object().getOid(); - switch (oidString) { - case "1.2.840.10040.4.1": - return "DSA"; - case "1.2.840.113549.1.1.1": - return "RSA"; - case "1.2.840.10045.2.1": - return "EC"; - } - throw new GeneralSecurityException( - "Error parsing key algorithm identifier. Algorithm with OID [" + oidString + "] is not żsupported" - ); - } - - static List readCertificates(Collection certPaths) throws CertificateException, IOException { - CertificateFactory certFactory = CertificateFactory.getInstance("X.509"); - List certificates = new ArrayList<>(certPaths.size()); - for (Path path : certPaths) { - try (InputStream input = Files.newInputStream(path)) { - final Collection parsed = certFactory.generateCertificates(input); - if (parsed.isEmpty()) { - throw new SslConfigException("failed to parse any certificates from [" + path.toAbsolutePath() + "]"); + if (object instanceof ASN1ObjectIdentifier) { // ignore -----BEGIN EC PARAMETERS----- + continue; } - certificates.addAll(parsed); + return object; + } catch (IOException e) { // ignore -----BEGIN DSA PARAMETERS----- + // ignore } } - return certificates; - } - - private static String getEcCurveNameFromOid(String oidString) throws GeneralSecurityException { - switch (oidString) { - // see https://tools.ietf.org/html/rfc5480#section-2.1.1.1 - case "1.2.840.10045.3.1": - return "secp192r1"; - case "1.3.132.0.1": - return "sect163k1"; - case "1.3.132.0.15": - return "sect163r2"; - case "1.3.132.0.33": - return "secp224r1"; - case "1.3.132.0.26": - return "sect233k1"; - case "1.3.132.0.27": - return "sect233r1"; - case "1.2.840.10045.3.1.7": - return "secp256r1"; - case "1.3.132.0.16": - return "sect283k1"; - case "1.3.132.0.17": - return "sect283r1"; - case "1.3.132.0.34": - return "secp384r1"; - case "1.3.132.0.36": - return "sect409k1"; - case "1.3.132.0.37": - return "sect409r1"; - case "1.3.132.0.35": - return "secp521r1"; - case "1.3.132.0.38": - return "sect571k1"; - case "1.3.132.0.39": - return "sect571r1"; - } - throw new GeneralSecurityException( - "Error parsing EC named curve identifier. Named curve with OID: " + oidString + " is not supported" + throw new SslConfigException( + "Error parsing Private Key [" + keyPath.toAbsolutePath() + "]. The file is empty, or does not contain expected key format." ); } diff --git a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslConfiguration.java b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslConfiguration.java index 23acb0ff269e2..546d7f0ebd994 100644 --- a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslConfiguration.java +++ b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslConfiguration.java @@ -38,7 +38,6 @@ import java.nio.file.Path; import java.security.GeneralSecurityException; -import java.security.NoSuchAlgorithmException; import java.util.Collection; import java.util.Collections; import java.util.HashSet; @@ -66,12 +65,7 @@ public class SslConfiguration { static final Map ORDERED_PROTOCOL_ALGORITHM_MAP; static { LinkedHashMap protocolAlgorithmMap = new LinkedHashMap<>(); - try { - SSLContext.getInstance("TLSv1.3"); - protocolAlgorithmMap.put("TLSv1.3", "TLSv1.3"); - } catch (NoSuchAlgorithmException e) { - // ignore since we support JVMs (and BC JSSE in FIPS mode) that do not support TLSv1.3 - } + protocolAlgorithmMap.put("TLSv1.3", "TLSv1.3"); protocolAlgorithmMap.put("TLSv1.2", "TLSv1.2"); protocolAlgorithmMap.put("TLSv1.1", "TLSv1.1"); protocolAlgorithmMap.put("TLSv1", "TLSv1"); diff --git a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslConfigurationLoader.java b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslConfigurationLoader.java index 0b06a0692197e..433bec734e0b8 100644 --- a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslConfigurationLoader.java +++ b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslConfigurationLoader.java @@ -360,14 +360,11 @@ private List resolveListSetting(String key, Function parser, L private static List loadDefaultCiphers() { final boolean has256BitAES = has256BitAES(); - final boolean tlsV13Supported = DEFAULT_PROTOCOLS.contains("TLSv1.3"); List ciphers = new ArrayList<>(); - if (tlsV13Supported) { // TLSv1.3 cipher has PFS, AEAD, hardware support - if (has256BitAES) { - ciphers.add("TLS_AES_256_GCM_SHA384"); - } - ciphers.add("TLS_AES_128_GCM_SHA256"); + if (has256BitAES) { + ciphers.add("TLS_AES_256_GCM_SHA384"); } + ciphers.add("TLS_AES_128_GCM_SHA256"); // use GCM: PFS, AEAD, hardware support if (has256BitAES) { ciphers.addAll( diff --git a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemKeyConfigTests.java b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemKeyConfigTests.java index 688f03a1e51fa..70cb76ceaec51 100644 --- a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemKeyConfigTests.java +++ b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemKeyConfigTests.java @@ -41,11 +41,11 @@ import java.nio.file.NoSuchFileException; import java.nio.file.Path; import java.nio.file.StandardCopyOption; -import java.security.GeneralSecurityException; import java.security.PrivateKey; import java.security.cert.CertificateParsingException; import java.security.cert.X509Certificate; import java.util.Arrays; +import java.util.function.Supplier; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.containsInAnyOrder; @@ -58,6 +58,7 @@ public class PemKeyConfigTests extends OpenSearchTestCase { private static final int IP_NAME = 7; private static final int DNS_NAME = 2; + private static final Supplier STRONG_PRIVATE_SECRET = "6!6428DQXwPpi7@$ggeg/="::toCharArray; public void testBuildKeyConfigFromPkcs1PemFilesWithoutPassword() throws Exception { final Path cert = getDataPath("/certs/cert1/cert1.crt"); @@ -68,8 +69,8 @@ public void testBuildKeyConfigFromPkcs1PemFilesWithoutPassword() throws Exceptio } public void testBuildKeyConfigFromPkcs1PemFilesWithPassword() throws Exception { - final Path cert = getDataPath("/certs/cert2/cert2.crt"); - final Path key = getDataPath("/certs/cert2/cert2.key"); + final Path cert = getDataPath("/certs/cert2/cert2-pkcs1.crt"); + final Path key = getDataPath("/certs/cert2/cert2-pkcs1.key"); final PemKeyConfig keyConfig = new PemKeyConfig(cert, key, "c2-pass".toCharArray()); assertThat(keyConfig.getDependentFiles(), Matchers.containsInAnyOrder(cert, key)); assertCertificateAndKey(keyConfig, "CN=cert2"); @@ -77,7 +78,7 @@ public void testBuildKeyConfigFromPkcs1PemFilesWithPassword() throws Exception { public void testBuildKeyConfigFromPkcs8PemFilesWithoutPassword() throws Exception { final Path cert = getDataPath("/certs/cert1/cert1.crt"); - final Path key = getDataPath("/certs/cert1/cert1-pkcs8.key"); + final Path key = getDataPath("/certs/cert1/cert1.key"); final PemKeyConfig keyConfig = new PemKeyConfig(cert, key, new char[0]); assertThat(keyConfig.getDependentFiles(), Matchers.containsInAnyOrder(cert, key)); assertCertificateAndKey(keyConfig, "CN=cert1"); @@ -86,8 +87,8 @@ public void testBuildKeyConfigFromPkcs8PemFilesWithoutPassword() throws Exceptio public void testBuildKeyConfigFromPkcs8PemFilesWithPassword() throws Exception { assumeFalse("Can't run in a FIPS JVM, PBE KeySpec is not available", inFipsJvm()); final Path cert = getDataPath("/certs/cert2/cert2.crt"); - final Path key = getDataPath("/certs/cert2/cert2-pkcs8.key"); - final PemKeyConfig keyConfig = new PemKeyConfig(cert, key, "c2-pass".toCharArray()); + final Path key = getDataPath("/certs/cert2/cert2.key"); + final PemKeyConfig keyConfig = new PemKeyConfig(cert, key, STRONG_PRIVATE_SECRET.get()); assertThat(keyConfig.getDependentFiles(), Matchers.containsInAnyOrder(cert, key)); assertCertificateAndKey(keyConfig, "CN=cert2"); } @@ -166,7 +167,7 @@ private void assertPasswordIsIncorrect(PemKeyConfig keyConfig, Path key) { final SslConfigException exception = expectThrows(SslConfigException.class, keyConfig::createKeyManager); assertThat(exception.getMessage(), containsString("private key file")); assertThat(exception.getMessage(), containsString(key.toAbsolutePath().toString())); - assertThat(exception.getCause(), instanceOf(GeneralSecurityException.class)); + assertThat(exception, instanceOf(SslConfigException.class)); } private void assertFileNotFound(PemKeyConfig keyConfig, String type, Path file) { diff --git a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemTrustConfigTests.java b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemTrustConfigTests.java index e664e379d1e97..4175b0ee424b7 100644 --- a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemTrustConfigTests.java +++ b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemTrustConfigTests.java @@ -81,7 +81,7 @@ public void testEmptyFileFails() throws Exception { final Path ca = createTempFile("ca", ".crt"); final PemTrustConfig trustConfig = new PemTrustConfig(Collections.singletonList(ca)); assertThat(trustConfig.getDependentFiles(), Matchers.containsInAnyOrder(ca)); - assertEmptyFile(trustConfig, ca); + assertFailedToParse(trustConfig, ca); } public void testMissingFileFailsWithMeaningfulMessage() throws Exception { @@ -135,21 +135,16 @@ private void assertCertificateChain(PemTrustConfig trustConfig, String... caName assertThat(issuerNames, Matchers.containsInAnyOrder(caNames)); } - private void assertEmptyFile(PemTrustConfig trustConfig, Path file) { + private void assertFailedToParse(PemTrustConfig trustConfig, Path file) { final SslConfigException exception = expectThrows(SslConfigException.class, trustConfig::createTrustManager); logger.info("failure", exception); assertThat(exception.getMessage(), Matchers.containsString(file.toAbsolutePath().toString())); - assertThat(exception.getMessage(), Matchers.containsString("failed to parse any certificates")); + assertThat(exception.getMessage(), Matchers.containsString("Failed to parse any certificate from")); } private void assertInvalidFileFormat(PemTrustConfig trustConfig, Path file) { final SslConfigException exception = expectThrows(SslConfigException.class, trustConfig::createTrustManager); assertThat(exception.getMessage(), Matchers.containsString(file.toAbsolutePath().toString())); - // When running on BC-FIPS, an invalid file format *might* just fail to parse, without any errors (just like an empty file) - // or it might behave per the SUN provider, and throw a GSE (depending on exactly what was invalid) - if (inFipsJvm() && exception.getMessage().contains("failed to parse any certificates")) { - return; - } assertThat(exception.getMessage(), Matchers.containsString("cannot create trust")); assertThat(exception.getMessage(), Matchers.containsString("PEM")); assertThat(exception.getCause(), Matchers.instanceOf(GeneralSecurityException.class)); diff --git a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemUtilsTests.java b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemUtilsTests.java index c7ca19bb679d3..f1255ab64f672 100644 --- a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemUtilsTests.java +++ b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemUtilsTests.java @@ -32,8 +32,11 @@ package org.opensearch.common.ssl; +import org.bouncycastle.asn1.ASN1ObjectIdentifier; +import org.bouncycastle.asn1.pkcs.PrivateKeyInfo; import org.opensearch.test.OpenSearchTestCase; +import java.io.IOException; import java.io.InputStream; import java.nio.file.Files; import java.nio.file.Path; @@ -44,6 +47,7 @@ import java.security.interfaces.ECPrivateKey; import java.security.spec.ECGenParameterSpec; import java.security.spec.ECParameterSpec; +import java.util.Locale; import java.util.function.Supplier; import static org.hamcrest.Matchers.equalTo; @@ -55,6 +59,7 @@ public class PemUtilsTests extends OpenSearchTestCase { private static final Supplier EMPTY_PASSWORD = () -> new char[0]; private static final Supplier TESTNODE_PASSWORD = "testnode"::toCharArray; + private static final Supplier STRONG_PRIVATE_SECRET = "6!6428DQXwPpi7@$ggeg/="::toCharArray; public void testReadPKCS8RsaKey() throws Exception { Key key = getKeyFromKeystore("RSA"); @@ -82,6 +87,16 @@ public void testReadPKCS8DsaKey() throws Exception { assertThat(privateKey, equalTo(key)); } + public void testReadEncryptedPKCS8DsaKey() throws Exception { + Key key = getKeyFromKeystore("DSA"); + assertThat(key, notNullValue()); + assertThat(key, instanceOf(PrivateKey.class)); + PrivateKey privateKey = PemUtils.readPrivateKey(getDataPath("/certs/pem-utils/dsa_key_pkcs8_encrypted.pem"), TESTNODE_PASSWORD); + + assertThat(privateKey, notNullValue()); + assertThat(privateKey, equalTo(key)); + } + public void testReadEcKeyCurves() throws Exception { String curve = randomFrom("secp256r1", "secp384r1", "secp521r1"); PrivateKey privateKey = PemUtils.readPrivateKey(getDataPath("/certs/pem-utils/private_" + curve + ".pem"), ""::toCharArray); @@ -102,6 +117,16 @@ public void testReadPKCS8EcKey() throws Exception { assertThat(privateKey, equalTo(key)); } + public void testReadEncryptedPKCS8EcKey() throws Exception { + var key = getKeyFromKeystore("EC"); + assertThat(key, notNullValue()); + assertThat(key, instanceOf(PrivateKey.class)); + var privateKey = PemUtils.readPrivateKey(getDataPath("/certs/pem-utils/ec_key_pkcs8_encrypted.pem"), TESTNODE_PASSWORD); + + assertThat(privateKey, notNullValue()); + assertThat(privateKey, equalTo(key)); + } + public void testReadEncryptedPKCS8Key() throws Exception { assumeFalse("Can't run in a FIPS JVM, PBE KeySpec is not available", inFipsJvm()); Key key = getKeyFromKeystore("RSA"); @@ -176,13 +201,12 @@ public void testReadEncryptedOpenSslDsaKey() throws Exception { } public void testReadOpenSslEcKey() throws Exception { - Key key = getKeyFromKeystore("EC"); + var key = getKeyFromKeystore("EC"); assertThat(key, notNullValue()); assertThat(key, instanceOf(PrivateKey.class)); - PrivateKey privateKey = PemUtils.readPrivateKey(getDataPath("/certs/pem-utils/ec_key_openssl_plain.pem"), EMPTY_PASSWORD); + var privateKey = PemUtils.readPrivateKey(getDataPath("/certs/pem-utils/ec_key_openssl_plain.pem"), EMPTY_PASSWORD); - assertThat(privateKey, notNullValue()); - assertThat(privateKey, equalTo(key)); + assertTrue(isCryptographicallyEqual((ECPrivateKey) key, (ECPrivateKey) privateKey)); } public void testReadOpenSslEcKeyWithParams() throws Exception { @@ -194,16 +218,41 @@ public void testReadOpenSslEcKeyWithParams() throws Exception { EMPTY_PASSWORD ); + assertTrue(isCryptographicallyEqual((ECPrivateKey) key, (ECPrivateKey) privateKey)); + } + + public void testReadEncryptedOpenSslEcKey() throws Exception { + var key = getKeyFromKeystore("EC"); + assertThat(key, notNullValue()); + assertThat(key, instanceOf(PrivateKey.class)); + var privateKey = PemUtils.readPrivateKey(getDataPath("/certs/pem-utils/ec_key_openssl_encrypted.pem"), TESTNODE_PASSWORD); + + assertTrue(isCryptographicallyEqual((ECPrivateKey) key, (ECPrivateKey) privateKey)); + } + + public void testReadEncryptedPKCS8KeyWithPBKDF2() throws Exception { + Key key = getKeyFromKeystore("PKCS8_PBKDF2"); + assertThat(key, notNullValue()); + assertThat(key, instanceOf(PrivateKey.class)); + PrivateKey privateKey = PemUtils.readPrivateKey(getDataPath("/certs/pem-utils/key_PKCS8_enc_pbkdf2.pem"), STRONG_PRIVATE_SECRET); assertThat(privateKey, notNullValue()); assertThat(privateKey, equalTo(key)); } - public void testReadEncryptedOpenSslEcKey() throws Exception { - Key key = getKeyFromKeystore("EC"); + public void testReadEncryptedDsaKeyWithPBKDF2() throws Exception { + Key key = getKeyFromKeystore("DSA_PBKDF2"); assertThat(key, notNullValue()); assertThat(key, instanceOf(PrivateKey.class)); - PrivateKey privateKey = PemUtils.readPrivateKey(getDataPath("/certs/pem-utils/ec_key_openssl_encrypted.pem"), TESTNODE_PASSWORD); + PrivateKey privateKey = PemUtils.readPrivateKey(getDataPath("/certs/pem-utils/key_DSA_enc_pbkdf2.pem"), STRONG_PRIVATE_SECRET); + assertThat(privateKey, notNullValue()); + assertThat(privateKey, equalTo(key)); + } + public void testReadEncryptedEcKeyWithPBKDF2() throws Exception { + Key key = getKeyFromKeystore("EC_PBKDF2"); + assertThat(key, notNullValue()); + assertThat(key, instanceOf(PrivateKey.class)); + PrivateKey privateKey = PemUtils.readPrivateKey(getDataPath("/certs/pem-utils/key_EC_enc_pbkdf2.pem"), EMPTY_PASSWORD); assertThat(privateKey, notNullValue()); assertThat(privateKey, equalTo(key)); } @@ -211,24 +260,24 @@ public void testReadEncryptedOpenSslEcKey() throws Exception { public void testReadUnsupportedKey() { final Path path = getDataPath("/certs/pem-utils/key_unsupported.pem"); SslConfigException e = expectThrows(SslConfigException.class, () -> PemUtils.readPrivateKey(path, TESTNODE_PASSWORD)); - assertThat(e.getMessage(), containsString("file does not contain a supported key format")); + assertThat(e.getMessage(), containsString("Error parsing Private Key")); assertThat(e.getMessage(), containsString(path.toAbsolutePath().toString())); + assertThat(e.getMessage(), containsString("file is empty")); } public void testReadPemCertificateAsKey() { final Path path = getDataPath("/certs/pem-utils/testnode.crt"); SslConfigException e = expectThrows(SslConfigException.class, () -> PemUtils.readPrivateKey(path, TESTNODE_PASSWORD)); - assertThat(e.getMessage(), containsString("file does not contain a supported key format")); + assertThat(e.getMessage(), containsString("invalid encrypted private key class")); assertThat(e.getMessage(), containsString(path.toAbsolutePath().toString())); } public void testReadCorruptedKey() { final Path path = getDataPath("/certs/pem-utils/corrupted_key_pkcs8_plain.pem"); SslConfigException e = expectThrows(SslConfigException.class, () -> PemUtils.readPrivateKey(path, TESTNODE_PASSWORD)); - assertThat(e.getMessage(), containsString("private key")); - assertThat(e.getMessage(), containsString("cannot be parsed")); + assertThat(e.getMessage(), containsString("Error parsing Private Key")); assertThat(e.getMessage(), containsString(path.toAbsolutePath().toString())); - assertThat(e.getCause().getMessage(), containsString("PEM footer is invalid or missing")); + assertThat(e.getMessage(), containsString("file is empty")); } public void testReadEmptyFile() { @@ -239,11 +288,27 @@ public void testReadEmptyFile() { } private Key getKeyFromKeystore(String algo) throws Exception { - Path keystorePath = getDataPath("/certs/pem-utils/testnode.jks"); + var keystorePath = getDataPath("/certs/pem-utils/testnode.jks"); + var alias = "testnode_" + algo.toLowerCase(Locale.ROOT); + var password = "testnode".toCharArray(); try (InputStream in = Files.newInputStream(keystorePath)) { KeyStore keyStore = KeyStore.getInstance("jks"); - keyStore.load(in, "testnode".toCharArray()); - return keyStore.getKey("testnode_" + algo, "testnode".toCharArray()); + keyStore.load(in, password); + return keyStore.getKey(alias, password); } } + + private boolean isCryptographicallyEqual(ECPrivateKey key1, ECPrivateKey key2) throws IOException { + var pki1 = PrivateKeyInfo.getInstance(key1.getEncoded()); + var pki2 = PrivateKeyInfo.getInstance(key2.getEncoded()); + + var privateKey1 = org.bouncycastle.asn1.sec.ECPrivateKey.getInstance(pki1.parsePrivateKey()).getKey(); + var privateKey2 = org.bouncycastle.asn1.sec.ECPrivateKey.getInstance(pki2.parsePrivateKey()).getKey(); + + var oid1 = ASN1ObjectIdentifier.getInstance(pki1.getPrivateKeyAlgorithm().getParameters()); + var oid2 = ASN1ObjectIdentifier.getInstance(pki2.getPrivateKeyAlgorithm().getParameters()); + + return privateKey1.equals(privateKey2) && oid1.equals(oid2); + } + } diff --git a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/SslConfigurationLoaderTests.java b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/SslConfigurationLoaderTests.java index 5af7ddc73e680..366e936ca4852 100644 --- a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/SslConfigurationLoaderTests.java +++ b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/SslConfigurationLoaderTests.java @@ -53,6 +53,7 @@ public class SslConfigurationLoaderTests extends OpenSearchTestCase { + private final String STRONG_PRIVATE_SECRET = "6!6428DQXwPpi7@$ggeg/="; private final Path certRoot = getDataPath("/certs/ca1/ca.crt").getParent().getParent(); private Settings settings; @@ -166,9 +167,9 @@ public void testLoadKeysFromPemFiles() { .put("test.ssl.key", certName + "/" + certName + ".key"); if (usePassword) { if (useLegacyPassword) { - builder.put("test.ssl.key_passphrase", "c2-pass"); + builder.put("test.ssl.key_passphrase", STRONG_PRIVATE_SECRET); } else { - secureSettings.setString("test.ssl.secure_key_passphrase", "c2-pass"); + secureSettings.setString("test.ssl.secure_key_passphrase", STRONG_PRIVATE_SECRET); } } settings = builder.build(); diff --git a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/SslDiagnosticsTests.java b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/SslDiagnosticsTests.java index c966b4259219f..e19fa91f7773e 100644 --- a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/SslDiagnosticsTests.java +++ b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/SslDiagnosticsTests.java @@ -70,6 +70,12 @@ public class SslDiagnosticsTests extends OpenSearchTestCase { private static final byte[] MOCK_ENCODING_4 = { 0x64, 0x65, 0x66, 0x67, 0x68, 0x69 }; private static final String MOCK_FINGERPRINT_4 = "5d96965bfae50bf2be0d6259eb87a6cc9f5d0b26"; + public void testTrustEmptyStore() { + var fileName = "cert-all/empty.jks"; + var exception = assertThrows(CertificateException.class, () -> loadCertificate(fileName)); + assertThat(exception.getMessage(), Matchers.equalTo("No certificate data found")); + } + public void testDiagnosticMessageWhenServerProvidesAFullCertChainThatIsTrusted() throws Exception { X509Certificate[] chain = loadCertChain("cert1/cert1.crt", "ca1/ca.crt"); final SSLSession session = session("192.168.1.1"); @@ -85,7 +91,7 @@ public void testDiagnosticMessageWhenServerProvidesAFullCertChainThatIsTrusted() message, Matchers.equalTo( "failed to establish trust with server at [192.168.1.1];" - + " the server provided a certificate with subject name [CN=cert1] and fingerprint [3bebe388a66362784afd6c51a9000961a4e10050];" + + " the server provided a certificate with subject name [CN=cert1] and fingerprint [7e0919348e566651a136f2a1d5974585d5b3712e];" + " the certificate has subject alternative names [DNS:localhost,IP:127.0.0.1];" + " the certificate is issued by [CN=Test CA 1];" + " the certificate is signed by" @@ -110,7 +116,7 @@ public void testDiagnosticMessageWhenServerProvidesAFullCertChainThatIsntTrusted message, Matchers.equalTo( "failed to establish trust with server at [192.168.1.1];" - + " the server provided a certificate with subject name [CN=cert1] and fingerprint [3bebe388a66362784afd6c51a9000961a4e10050];" + + " the server provided a certificate with subject name [CN=cert1] and fingerprint [7e0919348e566651a136f2a1d5974585d5b3712e];" + " the certificate has subject alternative names [DNS:localhost,IP:127.0.0.1];" + " the certificate is issued by [CN=Test CA 1];" + " the certificate is signed by (subject [CN=Test CA 1] fingerprint [2b7b0416391bdf86502505c23149022d2213dadc])" @@ -134,7 +140,7 @@ public void testDiagnosticMessageWhenServerFullCertChainIsntTrustedButMimicIssue message, Matchers.equalTo( "failed to establish trust with server at [192.168.1.1];" - + " the server provided a certificate with subject name [CN=cert1] and fingerprint [3bebe388a66362784afd6c51a9000961a4e10050];" + + " the server provided a certificate with subject name [CN=cert1] and fingerprint [7e0919348e566651a136f2a1d5974585d5b3712e];" + " the certificate has subject alternative names [DNS:localhost,IP:127.0.0.1];" + " the certificate is issued by [CN=Test CA 1];" + " the certificate is signed by (subject [CN=Test CA 1] fingerprint [2b7b0416391bdf86502505c23149022d2213dadc])" @@ -160,7 +166,7 @@ public void testDiagnosticMessageWhenServerProvidesEndCertificateOnlyAndTheCertA message, Matchers.equalTo( "failed to establish trust with server at [192.168.1.1];" - + " the server provided a certificate with subject name [CN=cert1] and fingerprint [3bebe388a66362784afd6c51a9000961a4e10050];" + + " the server provided a certificate with subject name [CN=cert1] and fingerprint [7e0919348e566651a136f2a1d5974585d5b3712e];" + " the certificate has subject alternative names [DNS:localhost,IP:127.0.0.1];" + " the certificate is issued by [CN=Test CA 1]" + " but the server did not provide a copy of the issuing certificate in the certificate chain;" @@ -185,7 +191,7 @@ public void testDiagnosticMessageWhenServerProvidesEndCertificateOnlyButTheCertA message, Matchers.equalTo( "failed to establish trust with server at [192.168.1.1];" - + " the server provided a certificate with subject name [CN=cert1] and fingerprint [3bebe388a66362784afd6c51a9000961a4e10050];" + + " the server provided a certificate with subject name [CN=cert1] and fingerprint [7e0919348e566651a136f2a1d5974585d5b3712e];" + " the certificate has subject alternative names [DNS:localhost,IP:127.0.0.1];" + " the certificate is issued by [CN=Test CA 1]" + " but the server did not provide a copy of the issuing certificate in the certificate chain;" @@ -209,7 +215,7 @@ public void testDiagnosticMessageWhenServerProvidesEndCertificateOnlyWithMimicIs message, Matchers.equalTo( "failed to establish trust with server at [192.168.1.1];" - + " the server provided a certificate with subject name [CN=cert1] and fingerprint [3bebe388a66362784afd6c51a9000961a4e10050];" + + " the server provided a certificate with subject name [CN=cert1] and fingerprint [7e0919348e566651a136f2a1d5974585d5b3712e];" + " the certificate has subject alternative names [DNS:localhost,IP:127.0.0.1];" + " the certificate is issued by [CN=Test CA 1]" + " but the server did not provide a copy of the issuing certificate in the certificate chain;" @@ -235,7 +241,7 @@ public void testDiagnosticMessageWhenServerProvidesEndCertificateWithMultipleMim message, Matchers.equalTo( "failed to establish trust with server at [192.168.1.9];" - + " the server provided a certificate with subject name [CN=cert1] and fingerprint [3bebe388a66362784afd6c51a9000961a4e10050];" + + " the server provided a certificate with subject name [CN=cert1] and fingerprint [7e0919348e566651a136f2a1d5974585d5b3712e];" + " the certificate has subject alternative names [DNS:localhost,IP:127.0.0.1];" + " the certificate is issued by [CN=Test CA 1]" + " but the server did not provide a copy of the issuing certificate in the certificate chain;" @@ -538,7 +544,7 @@ public void testDiagnosticMessageForClientCertificate() throws Exception { Matchers.equalTo( "failed to establish trust with client at [192.168.1.7];" + " the client provided a certificate with subject name [CN=cert1]" - + " and fingerprint [3bebe388a66362784afd6c51a9000961a4e10050];" + + " and fingerprint [7e0919348e566651a136f2a1d5974585d5b3712e];" + " the certificate is issued by [CN=Test CA 1]" + " but the client did not provide a copy of the issuing certificate in the certificate chain;" + " the issuing certificate with fingerprint [2b7b0416391bdf86502505c23149022d2213dadc]" @@ -571,7 +577,7 @@ public void testDiagnosticMessageWhenCaHasNewIssuingCertificate() throws Excepti message, Matchers.equalTo( "failed to establish trust with server at [192.168.1.4];" - + " the server provided a certificate with subject name [CN=cert1] and fingerprint [3bebe388a66362784afd6c51a9000961a4e10050];" + + " the server provided a certificate with subject name [CN=cert1] and fingerprint [7e0919348e566651a136f2a1d5974585d5b3712e];" + " the certificate has subject alternative names [DNS:localhost,IP:127.0.0.1];" + " the certificate is issued by [CN=Test CA 1];" + " the certificate is signed by (subject [CN=Test CA 1]" diff --git a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/StoreKeyConfigTests.java b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/StoreKeyConfigTests.java index 7806671d02793..1745c547d04ee 100644 --- a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/StoreKeyConfigTests.java +++ b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/StoreKeyConfigTests.java @@ -48,6 +48,7 @@ import java.security.cert.X509Certificate; import java.util.Arrays; +import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; @@ -95,7 +96,7 @@ public void testLoadMultipleKeyJksWithSeparateKeyPassword() throws Exception { assertKeysLoaded(keyConfig, "cert1", "cert2"); } - public void testKeyManagerFailsWithIncorrectStorePassword() throws Exception { + public void testKeyManagerFailsWithIncorrectJksStorePassword() throws Exception { assumeFalse("Can't use JKS/PKCS12 keystores in a FIPS JVM", inFipsJvm()); final Path jks = getDataPath("/certs/cert-all/certs.jks"); final StoreKeyConfig keyConfig = new StoreKeyConfig( @@ -109,7 +110,7 @@ public void testKeyManagerFailsWithIncorrectStorePassword() throws Exception { assertPasswordIsIncorrect(keyConfig, jks); } - public void testKeyManagerFailsWithIncorrectKeyPassword() throws Exception { + public void testKeyManagerFailsWithIncorrectJksKeyPassword() throws Exception { assumeFalse("Can't use JKS/PKCS12 keystores in a FIPS JVM", inFipsJvm()); final Path jks = getDataPath("/certs/cert-all/certs.jks"); final StoreKeyConfig keyConfig = new StoreKeyConfig(jks, JKS_PASS, "jks", JKS_PASS, KeyManagerFactory.getDefaultAlgorithm()); @@ -125,21 +126,20 @@ public void testKeyManagerFailsWithMissingKeystoreFile() throws Exception { assertFileNotFound(keyConfig, path); } - public void testMissingKeyEntriesFailsWithMeaningfulMessage() throws Exception { + public void testMissingKeyEntriesFailsForJksWithMeaningfulMessage() throws Exception { assumeFalse("Can't use JKS/PKCS12 keystores in a FIPS JVM", inFipsJvm()); - final Path ks; - final char[] password; - final String type; - if (randomBoolean()) { - type = "PKCS12"; - ks = getDataPath("/certs/ca-all/ca.p12"); - password = P12_PASS; - } else { - type = "jks"; - ks = getDataPath("/certs/ca-all/ca.jks"); - password = JKS_PASS; - } - final StoreKeyConfig keyConfig = new StoreKeyConfig(ks, password, type, password, KeyManagerFactory.getDefaultAlgorithm()); + final Path ks = getDataPath("/certs/ca-all/ca.jks"); + final char[] password = JKS_PASS; + final StoreKeyConfig keyConfig = new StoreKeyConfig(ks, password, "jks", password, KeyManagerFactory.getDefaultAlgorithm()); + assertThat(keyConfig.getDependentFiles(), Matchers.containsInAnyOrder(ks)); + assertNoPrivateKeyEntries(keyConfig, ks); + } + + public void testMissingKeyEntriesFailsForP12WithMeaningfulMessage() throws Exception { + assumeFalse("Can't use JKS/PKCS12 keystores in a FIPS JVM", inFipsJvm()); + final Path ks = getDataPath("/certs/ca-all/ca.p12"); + final char[] password = P12_PASS; + final StoreKeyConfig keyConfig = new StoreKeyConfig(ks, password, "PKCS12", password, KeyManagerFactory.getDefaultAlgorithm()); assertThat(keyConfig.getDependentFiles(), Matchers.containsInAnyOrder(ks)); assertNoPrivateKeyEntries(keyConfig, ks); } @@ -211,7 +211,10 @@ private void assertPasswordIsIncorrect(StoreKeyConfig keyConfig, Path key) { assertThat(exception.getMessage(), containsString("password")); } else { assertThat(exception.getCause(), instanceOf(IOException.class)); - assertThat(exception.getCause().getMessage(), containsString("password")); + assertThat( + exception.getCause().getMessage(), + anyOf(containsString("Keystore was tampered with, or password was incorrect"), containsString("BCFKS KeyStore corrupted")) + ); } } diff --git a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/StoreTrustConfigTests.java b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/StoreTrustConfigTests.java index 5609f0fa2c877..8058ffe95dc93 100644 --- a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/StoreTrustConfigTests.java +++ b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/StoreTrustConfigTests.java @@ -58,7 +58,7 @@ public class StoreTrustConfigTests extends OpenSearchTestCase { private static final char[] JKS_PASS = "jks-pass".toCharArray(); private static final String DEFAULT_ALGORITHM = TrustManagerFactory.getDefaultAlgorithm(); - public void testBuildTrustConfigFromPKCS12() throws Exception { + public void testBuildTrustConfigFromP12() throws Exception { assumeFalse("Can't use JKS/PKCS12 keystores in a FIPS JVM", inFipsJvm()); final Path ks = getDataPath("/certs/ca1/ca.p12"); final StoreTrustConfig trustConfig = new StoreTrustConfig(ks, P12_PASS, "PKCS12", DEFAULT_ALGORITHM); @@ -66,7 +66,7 @@ public void testBuildTrustConfigFromPKCS12() throws Exception { assertCertificateChain(trustConfig, "CN=Test CA 1"); } - public void testBuildTrustConfigFromJKS() throws Exception { + public void testBuildTrustConfigFromJks() throws Exception { assumeFalse("Can't use JKS/PKCS12 keystores in a FIPS JVM", inFipsJvm()); final Path ks = getDataPath("/certs/ca-all/ca.jks"); final StoreTrustConfig trustConfig = new StoreTrustConfig(ks, JKS_PASS, "jks", DEFAULT_ALGORITHM); @@ -91,28 +91,25 @@ public void testMissingKeyStoreFailsWithMeaningfulMessage() throws Exception { assertFileNotFound(trustConfig, ks); } - public void testIncorrectPasswordFailsWithMeaningfulMessage() throws Exception { + public void testIncorrectPasswordFailsForP12WithMeaningfulMessage() throws Exception { final Path ks = getDataPath("/certs/ca1/ca.p12"); final StoreTrustConfig trustConfig = new StoreTrustConfig(ks, new char[0], "PKCS12", DEFAULT_ALGORITHM); assertThat(trustConfig.getDependentFiles(), Matchers.containsInAnyOrder(ks)); assertPasswordIsIncorrect(trustConfig, ks); } - public void testMissingTrustEntriesFailsWithMeaningfulMessage() throws Exception { + public void testMissingTrustEntriesFailsForJksKeystoreWithMeaningfulMessage() throws Exception { assumeFalse("Can't use JKS/PKCS12 keystores in a FIPS JVM", inFipsJvm()); - final Path ks; - final char[] password; - final String type; - if (randomBoolean()) { - type = "PKCS12"; - ks = getDataPath("/certs/cert-all/certs.p12"); - password = P12_PASS; - } else { - type = "jks"; - ks = getDataPath("/certs/cert-all/certs.jks"); - password = JKS_PASS; - } - final StoreTrustConfig trustConfig = new StoreTrustConfig(ks, password, type, DEFAULT_ALGORITHM); + final Path ks = getDataPath("/certs/cert-all/certs.jks"); + final StoreTrustConfig trustConfig = new StoreTrustConfig(ks, JKS_PASS, "jks", DEFAULT_ALGORITHM); + assertThat(trustConfig.getDependentFiles(), Matchers.containsInAnyOrder(ks)); + assertNoCertificateEntries(trustConfig, ks); + } + + public void testMissingTrustEntriesFailsForP12KeystoreWithMeaningfulMessage() throws Exception { + assumeFalse("Can't use JKS/PKCS12 keystores in a FIPS JVM", inFipsJvm()); + final Path ks = getDataPath("/certs/cert-all/certs.p12"); + final StoreTrustConfig trustConfig = new StoreTrustConfig(ks, P12_PASS, "PKCS12", DEFAULT_ALGORITHM); assertThat(trustConfig.getDependentFiles(), Matchers.containsInAnyOrder(ks)); assertNoCertificateEntries(trustConfig, ks); } diff --git a/libs/ssl-config/src/test/resources/certs/README.md b/libs/ssl-config/src/test/resources/certs/README.md new file mode 100644 index 0000000000000..79790a4918f3e --- /dev/null +++ b/libs/ssl-config/src/test/resources/certs/README.md @@ -0,0 +1,155 @@ +# Create first CA PEM ("ca1") + +```bash +opensearch-certutil ca --pem --out ca1.zip --days 9999 --ca-dn "CN=Test CA 1" +unzip ca1.zip +mv ca ca1 +``` + +# Create first CA PEM ("ca2") + +```bash +opensearch-certutil ca --pem --out ca2.zip --days 9999 --ca-dn "CN=Test CA 2" +unzip ca2.zip +mv ca ca2 +``` + +# Create first CA PEM ("ca3") + +```bash +opensearch-certutil ca --pem --out ca3.zip --days 9999 --ca-dn "CN=Test CA 3" +unzip ca3.zip +mv ca ca3 +``` + +# Create "cert1-pkcs1" PEM + +```bash +opensearch-certutil cert --pem --out cert1-pkcs1.zip --name cert1 --ip 127.0.0.1 --dns localhost --days 9999 --ca-key ca1/ca.key --ca-cert ca1/ca.crt +unzip cert1.zip +``` + +# Create "cert2-pkcs1" PEM (same as cert1, but with a password) + +```bash +opensearch-certutil cert --pem --out cert2-pkcs1.zip --name cert2 --ip 127.0.0.1 --dns localhost --days 9999 --ca-key ca1/ca.key --ca-cert ca1/ca.crt --pass "c2-pass" +unzip cert2.zip +``` + +# Create "cert1" PEM + +```bash +openssl genpkey -algorithm RSA -out cert1/cert1.key +openssl req -new \ + -key cert1/cert1.key \ + -subj "/CN=cert1" \ + -out cert1/cert1.csr +openssl x509 -req \ + -in cert1/cert1.csr \ + -CA ca1/ca.crt \ + -CAkey ca1/ca.key \ + -CAcreateserial \ + -out cert1/cert1.crt \ + -days 3650 \ + -sha256 \ + -extfile <(printf "subjectAltName=DNS:localhost,IP:127.0.0.1") +rm cert1/cert1.csr +``` + +# Create "cert2" PEM (same as cert1, but with a password) + +```bash +openssl genpkey -algorithm RSA -out cert2/cert2.key -aes256 -pass pass:"$KEY_PW" +openssl req -new \ +-key cert2/cert2.key \ +-subj "/CN=cert2" \ +-out cert2/cert2.csr \ +-passin pass:"$KEY_PW" +openssl x509 -req \ +-in cert2/cert2.csr \ +-CA ca1/ca.crt \ +-CAkey ca1/ca.key \ +-CAcreateserial \ +-out cert2/cert2.crt \ +-days 3650 \ +-sha256 \ +-extfile <(printf "subjectAltName=DNS:localhost,IP:127.0.0.1") \ +-passin pass:"$KEY_PW" +rm cert2/cert2.csr +``` + +# Convert CAs to PKCS#12 + +```bash +for n in 1 2 3 +do + keytool -importcert -file ca${n}/ca.crt -alias ca -keystore ca${n}/ca.p12 -storetype PKCS12 -storepass p12-pass -v + keytool -importcert -file ca${n}/ca.crt -alias ca${n} -keystore ca-all/ca.p12 -storetype PKCS12 -storepass p12-pass -v +done +``` + +# Convert CAs to JKS + +```bash +for n in 1 2 3 +do + keytool -importcert -file ca${n}/ca.crt -alias ca${n} -keystore ca-all/ca.jks -storetype jks -storepass jks-pass -v +done +``` + +# Convert Certs to PKCS#12 + +```bash +for Cert in cert1 cert2 +do + openssl pkcs12 -export -out $Cert/$Cert.p12 -inkey $Cert/$Cert.key -in $Cert/$Cert.crt -name $Cert -passout pass:p12-pass +done +``` + +# Import Certs into single PKCS#12 keystore + +```bash +for Cert in cert1 cert2 +do + keytool -importkeystore -noprompt \ + -srckeystore $Cert/$Cert.p12 -srcstoretype PKCS12 -srcstorepass p12-pass \ + -destkeystore cert-all/certs.p12 -deststoretype PKCS12 -deststorepass p12-pass +done +``` + +# Import Certs into single JKS keystore with separate key-password + +```bash +for Cert in cert1 cert2 +do + keytool -importkeystore -noprompt \ + -srckeystore $Cert/$Cert.p12 -srcstoretype PKCS12 -srcstorepass p12-pass \ + -destkeystore cert-all/certs.jks -deststoretype jks -deststorepass jks-pass + keytool -keypasswd -keystore cert-all/certs.jks -alias $Cert -keypass p12-pass -new key-pass -storepass jks-pass +done +``` + +# Create a mimic of the first CA ("ca1b") for testing certificates with the same name but different keys + +```bash +opensearch-certutil ca --pem --out ${PWD}/ca1-b.zip --days 9999 --ca-dn "CN=Test CA 1" +unzip ca1-b.zip +mv ca ca1-b +``` + +# Create empty KeyStore + +```bash +keytool -genkeypair \ + -alias temp \ + -storetype JKS \ + -keyalg rsa \ + -storepass storePassword \ + -keypass secretPassword \ + -keystore cert-all/empty.jks \ + -dname "CN=foo,DC=example,DC=com" +keytool -delete \ + -alias temp \ + -storepass storePassword \ + -keystore cert-all/empty.jks +``` diff --git a/libs/ssl-config/src/test/resources/certs/README.txt b/libs/ssl-config/src/test/resources/certs/README.txt deleted file mode 100644 index 09910e99a132e..0000000000000 --- a/libs/ssl-config/src/test/resources/certs/README.txt +++ /dev/null @@ -1,85 +0,0 @@ -#!/usr/bin/env bash -# -# This is README describes how the certificates in this directory were created. -# This file can also be executed as a script -# - -# 1. Create first CA PEM ("ca1") - -opensearch-certutil ca --pem --out ca1.zip --days 9999 --ca-dn "CN=Test CA 1" -unzip ca1.zip -mv ca ca1 - -# 2. Create first CA PEM ("ca2") - -opensearch-certutil ca --pem --out ca2.zip --days 9999 --ca-dn "CN=Test CA 2" -unzip ca2.zip -mv ca ca2 - -# 3. Create first CA PEM ("ca3") - -opensearch-certutil ca --pem --out ca3.zip --days 9999 --ca-dn "CN=Test CA 3" -unzip ca3.zip -mv ca ca3 - -# 4. Create "cert1" PEM - -opensearch-certutil cert --pem --out cert1.zip --name cert1 --ip 127.0.0.1 --dns localhost --days 9999 --ca-key ca1/ca.key --ca-cert ca1/ca.crt -unzip cert1.zip - -# 5. Create "cert2" PEM (same as cert1, but with a password) - -opensearch-certutil cert --pem --out cert2.zip --name cert2 --ip 127.0.0.1 --dns localhost --days 9999 --ca-key ca1/ca.key --ca-cert ca1/ca.crt --pass "c2-pass" -unzip cert2.zip - -# 6. Convert CAs to PKCS#12 - -for n in 1 2 3 -do - keytool -importcert -file ca${n}/ca.crt -alias ca -keystore ca${n}/ca.p12 -storetype PKCS12 -storepass p12-pass -v - keytool -importcert -file ca${n}/ca.crt -alias ca${n} -keystore ca-all/ca.p12 -storetype PKCS12 -storepass p12-pass -v -done - -# 7. Convert CAs to JKS - -for n in 1 2 3 -do - keytool -importcert -file ca${n}/ca.crt -alias ca${n} -keystore ca-all/ca.jks -storetype jks -storepass jks-pass -v -done - -# 8. Convert Certs to PKCS#12 - -for Cert in cert1 cert2 -do - openssl pkcs12 -export -out $Cert/$Cert.p12 -inkey $Cert/$Cert.key -in $Cert/$Cert.crt -name $Cert -passout pass:p12-pass -done - -# 9. Import Certs into single PKCS#12 keystore - -for Cert in cert1 cert2 -do - keytool -importkeystore -noprompt \ - -srckeystore $Cert/$Cert.p12 -srcstoretype PKCS12 -srcstorepass p12-pass \ - -destkeystore cert-all/certs.p12 -deststoretype PKCS12 -deststorepass p12-pass -done - -# 10. Import Certs into single JKS keystore with separate key-password - -for Cert in cert1 cert2 -do - keytool -importkeystore -noprompt \ - -srckeystore $Cert/$Cert.p12 -srcstoretype PKCS12 -srcstorepass p12-pass \ - -destkeystore cert-all/certs.jks -deststoretype jks -deststorepass jks-pass - keytool -keypasswd -keystore cert-all/certs.jks -alias $Cert -keypass p12-pass -new key-pass -storepass jks-pass -done - -# 11. Create a mimic of the first CA ("ca1b") for testing certificates with the same name but different keys - -opensearch-certutil ca --pem --out ${PWD}/ca1-b.zip --days 9999 --ca-dn "CN=Test CA 1" -unzip ca1-b.zip -mv ca ca1-b - -# 12. Convert certifcate keys to pkcs8 - -openssl pkcs8 -topk8 -inform PEM -in cert1/cert1.key -outform PEM -out cert1/cert1-pkcs8.key -nocrypt -openssl pkcs8 -topk8 -inform PEM -in cert2/cert2.key -outform PEM -out cert2/cert2-pkcs8.key -passin pass:"c2-pass" -passout pass:"c2-pass" diff --git a/libs/ssl-config/src/test/resources/certs/cert-all/certs.p12 b/libs/ssl-config/src/test/resources/certs/cert-all/certs.p12 index b971a1e39c83baeea8e4fab3cf6b76804047ee48..73912976ca7cc61d310d02f1f8805d35ea75f612 100644 GIT binary patch literal 4895 zcmbW3WmFW5mxp2Kl5WYNq#R%- z&+fkevtM@i?5BI~^E>x`y5~6;1S6*hpkshwWV6_K+;A264G}sHdI5}V5EDk$_ZN48 zU<9)NEg}%agc0!n#oQnmRth%3e@+qNqXP49>IJx9~XGZhuS|<`ngy?V1`SYn`F_>sZLIEB z62SqlOjq!GYG#jdSaXZD0FM?eQz>DgIf4xH54b{pNX2u7dudW(Y=WG9^hz3%QM9#GC}f|kavMIhHq5+cWb19HowMddyAl$;BUtf&f>N@lKCZn>2HLwQ$V6npQ$0}(*;JUg~=bQA4PvNdLD zfm|5qwRdOntvv~sqYdK9_Gd8OA^_~`SicplMfjOl&2q!Fdg{&msm@mX`V^Y3Kof-wJa;f^FM9wuYJ1Ms1o?K0=l0 z#unEY9glTqU5oYS&exA(5`@jLc_wLK9`o z)`R*oU+`;or1rR(q;1KUUYIDJ3oeoJ{J2ugg4a5tk%hK0lh8h;C1bSMH}zu5Pfe<` zk%hhJYrS&`tfR`__I)RH*TiRH$aDH(hwDNSJPt%WpKkO{FNa1Eh9kx8*Ki;?M0SjH z*dkPZ$|1wF%Fdb?mvZgfAD2Wf2RLM6jT}I&Dx1c*x}>xfr^81a{Ba0KFRa?$?UMa4 z;*V)c_PgHQJW?`6Rm~`e19uZ{Klba8=Mye5*H~uLV3D7}SBRBMRkcD(G2$&-(!z~I zieKxF$ReXsB-ifMpV#<&L%LRZq9eO9QiZ$Y5K&qxqrC@HrXA-Lm?++fRJa(eXVdxz}m^# z^D6Bs`U7S0BK%&a*`kU&bHX6s>E0Hfj8Pr0E;WDHyKm(CI^|e9YYaZbybdktS=Pl; zg-f{h;tWbQ{Lh+6%Ns=AY%T>Y=tI&|TA3X22Uq?{Gg1wXgp5KZoXbpG=gE69kWMvB zx2f>^c~gmySb1bGN~}Z>hl3A}9V=Yh>efbWG-wnFbny(#he|GA)3je~uZ07fx;KfY zJR!{^W?PQiwugl#Ku;U*VfZnZ_8dxvVZ2~A`he+HNyrj|Azg}6Ft+cdv~IClsIKAuCSps>S5K8F zKTcAXOEc=|GRhJr^9WNYUE{C#r7xUO(LS8Yugz1$j9{9PG-*-uKH;Ce0PWDcYc^Z9 zOre{9?7t=|_T|7?=rF>u`*s;h39Hp(yNlsdwkIv;&&7^h9!`@h{8G=$$n;}58KG7=3Lf7xYA*5kqtqo3Gc z$xm`^;yh;MNzb@wJ5~R(*3wZ%8i2>^8uA5Q*WEH6Z_@l0+(__@?lW)nyYFWn%scA3^n3 zt4)R#Dunn)f=?oK>djcil004nf_H8|={G=&VfhFlzYQzHGmULZ`W!}{*^7Ap!6$lj zNYA16M^OqTOJpH~8}8w$qrD?VP!jb%<!ucxtxTrfs zLF{pJxk9-T$d_4KaJXt5+7hIG|V87dj#m>I0`Jc}}RSkJ4wD0W{Z33rNhp7s&o{Q}?qaggwz>0Y=UqE7yi#LI4UX?J`_x@<8+fOkhUSt<&|M>hfB^`>#efJ^vR^x>J zWyJ34nX@{ltyzI|`a>+FJ@^Y*>b>2xY5DZqc5G*Lt@fACM;xC_#>Ob0J}e;TwpnTm=&#A_Umbx~v91yc)X}9z=W&A7WQyzN1KJvY z^S{?9+f=^EGwM)zJ41RwOz~iR)pbBH2J)HcGi0iw0EgY}zuk4Sadf8t{Pyw7)LuZ} z4vwa!<4}jhFV&ZUaq75yf=fgm653_`QXRz(a=8h)=D%ksZh&*mYanN3FiY7is30G9H>&%^kiOmzV|E( z^g~(9X{Ctw%;|zV#=u&Oc ze~g%7<@QmBdN{L|KctD@QAj~tI{!~vHJH1{oNWo^#4H;?gl)|+QI`o5%I!?^C?K&S zhMzhExu_Qob*+&|+xbjNK9RH4bvYwuhr}_c2G-l24=g%e8*{AGr9&Iz0Z^leJ+N^P1)e}f8HEn6-^q8rA!vmkg716noa zdWO__j-hh}gSO+ux8<%oO!+pg%G4U#X?vUxChqg~$`I|(ZntZCx`m-^M}eBM>A7e@ z;sKbSlKf8?4GMJ~x(BEC5Odw5Y6|pT=!Mg~?iZVblNDbDg8#UD%<_ zTAm8d^qA-Hou*95H6M8KapP4)Px4hO5&m4)-ZW78C1x~Dg>p?*NafMaBX z1!RlT7v0tF^RDDFo5pPcea3L!vj zs@y+HJ+V4gQ-DF~8cC*?7xqya$uBx_D>^5|gFiFg3V*(UZrUN1-D1dQJV=K=&-#q# zrb8@$hDF3sP?vWS$=ABHniu)bkgFa#xt{IfMrzj);{S?5OJ3)6{GK_+LG7!hwTP{) zei-Vfqqo{9w_>!fv7V%qjGa9G9nQ4uP-z++)6&E=Kp#`GT8r zd_b*cMOVUxHo7(Ise{B&Bk`l3gz(nySf4cLZ2YgjgZ=9YPWi}%+x~IQaAFL_a?%vy z&O~B`tgVS&>IACK>%satLjaKVz)Kfv%1tKR@hx^<1ikZ!VzFFN@@l9|@W*=FvR>&^ ze@JAZ#_tbSH$KHO8-c63J}4PW^rYsFhaL(mJ}bCX^%;Hws0)jFId%}W=#59tOJxyd z)l_RQ!}{29aAuM8NnZV!>GjX=9wh>Nn8W_65=mlm0bqWySDqWNZ0~{CGT^oz^%)2x z1!4i=VPo+;!2r-Vk#3|rT_l91k(3bW64;;a zR(e6|IHzXL<(co|oq67Qzssk9H;o4YLeeDT;o|=d9U_20gjNKW9G4ik5J@9XfTWTB zi^V}mVDtZ}fG7eau=Fo31R)6_M5O<{A_L;$6(R|4Ku7{C=qVBKfAHVuECh4{pAT9d z+HY+ukCdi|lYeY=lN^fR;o~5BUl1S(1~IU@opAU{CswZbJcqN!uwA6_^<)(p9RtCY zYg~~?Ev1^i(ttc%bu2J?YeH3utxZIHv|{_zWAd#^Iyy#ghDh4?YiW;)W^1sUz*85^ z$Oyq)pnnQy*O9{Hz>cY5lsAuQ3iBt2&D3>|-f2N}O$PX`FyEZPbP6FR;YkE5>Z#1_ zci>x%jKTS(!NsT6TC-XV44gTASvZ$YK@l$dLArTWY(=2)cqvhK{!fg`)yl%F7F$h= zA^!E%0aMB%<_+>@70#H`ti4}#Hl|wPmyzq950YY|z>CXfIrbGHzwmESt@blh``o2v2H|mXc!|^CNe9fp`o%(Z zbl>ac_;Ry16mMqgY8ezoe`0EOJDmNIx&WC_b)Zeh>Ghx|)kEy-c%{X6zg+3aLK=p2 zeHqjnQ!iK;#CZC1j>N=k0H;LCFU<8jm0OT8_^4KeXujP*K=uCXe!r7zLfCO1IHu}O__9dPkH##IIwr({iLY+#2vF^i7HQY;T0e~ z25NU6e*u2S8$hP%!R*k?`O&#l?71S52owmb2_qEVSCnh~ARP*yHgcp;E4%FZZT1fx zG{XU5|7KLYrrZZQm*RNx484tyJwE(orW<}?h}X9XE?yW_RPrKM3M+^54KnRGB>d#+ z_vlU1bhw5ecbCV|XxD)Th~1vTBtntwggGX)IW8D}t6lGM{1265rihh`7{E{`ZUm8@ z0Zd9G^_}SR{7-u+;D^1P{S|0e-~gCvDT7ejB#p%I57aqtu9x_ z&mkvw3DSf6rb}xZoFxm5JXf)6 zmp{H*VvTkSDUNVP4*2L<5y|ga*ndtU$y-^zseEr7QD6MAE{Vum4+>{*;@!W-*WGjW zx>$~h(rR98X1zqw|GMucK1?@`Z9yAorFG4vSD;7UxAIf~a;BeWQue%(=fpb{h8ue&K!|n5&N`)V~_HSpZz?q+Uw=M}?btmMLfPzg= zqVkXMo%*Q@d*ae>BdF9p&ANmZ7y|rv*GRsESc^_(J(lHT((T0imM={cGW43j7Bl0Y z-8wd|JwA6A9MTKVM%+9Su({p0;th({bb=QsNM%qGdCoG;!G`Y;!2yjD=dTWPy|GXE zvLfPKt$CYjS!c&c0y2zksRr6zI?s}Rax3kx<-09c)4_DLqd60~m+5vA4X2a`U~xWm zEgn2deIK(H8?*`Q7W-U|dG317&opxk$CT4QuVBYY@L1Q65gg?gWH&b#shs!Ir4)PL zS@tg9zP3pkdgLROdC@Tw&-EkZHJ`OCfD+`G2{`yxr(US3w?U#hBL)9HAws#`9$FZs z-?M=#sz5BZb!)r{j484iDnqpS)-j||ZT0|$Zyicp)0J7m<$MzORf>eOm494mS8VLm z!-ozIUTMv=BH&iL>eX1gSQP++ctK=h=$in`goMPSx@n=g>&~PZRUgwW0s=#~y75PQ zCxZB31P(NCGK7)IHM1wZ5Xy1xf9@t|^=2l!yv2CV?qB6&*)~4Uyzh}GN{m_sGqGsm z{9XJst;x>!{Nc-05_O(wV~?C+yV%R`CbudwSG*(g2o4xAuXf2KRpN0{@V1APIU}ZZ z&jU)TU0m&l5Q^H|&*;{kG6ad?gGsun-p~(jexHa`spY7z zT9va#h|XxQz-+>O;11%flZl}D%H7p&|C{b+RY?WN(KPkf&9XYw3T|k^$7i!rc>qj< zNFoZ6l522`|L?{#HiGAFnHH21Eoau=WyOBx)8BS~FS0PU*#cevobTWq+2Q+VEsbBc z)Jf*1_6nJ)v~yXuU6?`&+IAX*HvK=fl4zVT=wBjT>sc1=vIkznyrt{iLO11cz?#Lmxc2nEEpzvuUSYUcO{*Ge^_JLa|8#4Uc5l~ zmukuO@3ESQDlDTKr+#9J*)mP4j3idPZ6R{6;j+#E=B`>9en+&y)tNX)P72AMOe|QD z`+ds&$Lwo`y|$Him|4b(35YmYX%ULu5Fw6LiR@dg9C=|~xrD@x8+p9#;$&deMe~AJ z9KIZzW!^L?e{&bJU@kt>JCW~X&xlk0fM9`YWqR5zG?SF)m#6ZPN|61pDkY`FQyod= z5ig3Aj2qvx!R7?ytujqHX%pPPF#M+kLy^Rf)1-%wpU*+wr5L@e?1Kfz_L%=h5706< za4am~=`txRzR#QYekVy(*peO9ib_nqh(<$CHAnyLIp)MYvtOi1ssa|XE&4kFSila_M``(xW}klueD)9 zz_n4;qf)`f*0|R-kJxzU&g{xSCc_wqGPA6rl*q%G`mG- zqIlo*rmX0m;c&0Du>iNK)QiFI)9%pshptM4sqmVkk15P+ z_)oRdmn!=qyx!VA>naeF;O)bLid>u{y$eTHI|M3kb8)vEv{&zQS@M zfJlgcJ+zOV6P)0I*x*q%M{1$H^~I9U1d$^_2D7}Yt(7Z`*5i_98HV%5*v_@fvyR;) z9UsCf{ltxrt5-^){`j-c5WHhL7@u99_}4DpCN8O0nL3y6(cVvs1e@O|sGr_D8LyCm zM1Fhbfc*XK4k3Qoc<}qhB>q`{!u)@e%!Z&(#Dw8Z^6|Hbn;FM_QQXyjik!jVi&!Y6zSS2_SVpjDHPGjr)EHw(gWD8LujA3O@5bphMtWM&3;1hGjj_VSMl<#j|86u8wib;w$ z%ZP_wPfTWMbEf?SkV%B!Q(d%Gi`i(a`6X+B_*eQ-rFe4Oz;e}2^^#`TQH18SiFJ#Z zHB1V~3pVPU*^s@^r3Mdnrjo)|h)hQ&#U@-{}{v-7u?U>U0e-R9usOy!AGZ zwTJ>Y;Mh1&Xt7HzR?NC@L29(j(X?BXA_&jv5(U zB^sIhbB=Mv1>oezQ=tW!cYjLw6?;XdFApn6-=l5h(vHqiMr)Os7Q*MG%z>dPb@l0I zLw4lix(b6m60k1K_PIZR*{BU6uTyED6POkii7M@M-`@q&M4KeC!{(Z*j*G_;I|vf9 z`@t!a67fEvyU?g~(kI#Ue2~blM^P!)FDeslI|oPNo#Q75q?c}7a#>Bk8=vcgX;xMi7@# z>!_lYnX{*i1V7i=`-?X!^>?1wzX+lMu;q?j^sh5JvdwtqQVlC1F{1#&b-}Z;QMd_< znmC?9TOYAnQ}+E`Wb_SZWJfTQ+wV6K3$#h%Gxo;1*u5j;i+|D>pufiNNa*z@qn}rP zT#y+C{d(l)yWsgQ?qR~$aoY+hZGZUOMOgJE8ce|bg9bQf?}jbzM*$8&B)#>9VWHa^QKE+bFqXYIF8#+-oUQC`6wG$=&!QVrvynNQn z6+mUvaQ-l`aCfr4B$irtY}67LgrAdvWrQK}&9&RRAkBVKlLOpj`+chjn9^t-hPONH43< z#W)N(1>#=sF@le1uLY{?J2u%mL3%NeFvYaR3mBLT)cM=4gt#FTqzK{!0f-0%De&=_ z32_MM>cPH@V?z&p!Q9?8YBl=PJ#@fp{(jgB34Vx5(yn?nmRnh~E)iJ|4_?hUh^+z*z(QPJn2eUjr=@+7ONTJ;KmPFkD$t_3l^R4itZz z$*}wUm#ZKU1b~LoLH@TD1f>O_dFY_FkvD-(G+>|@7=G0CAr&ZLM(Og~$y@|ojR?Wh z5WJ9XK}?^W1bLX&NlVi`6lyEY%2qBXF(*pql|TXN@YF1ZT~p#|L|2H}GT_;Ph-#q`_|kw=gwb)$R)i5lCYiKjl zbd4!%JX>_0VXEH8xFjAfa0sv0Q36uq3oWV{!KpVKj?2v_>OO9j4SL25*EMdSH-ZK z+{O@U-}9k1VqB8Hst9O9g?l}0H#zg@SZwVr-A+0R1s|Lc*kqLyJprA{rYRR!~td)&0Px|rMSz?Zj zYMa4-7eU{V7G!l$gk54!A2>nD>WNKXYTC;ExP;vvC*-VO~ANCSoP0)}WDKTUF>5F7? zNs~loQ$Ozwrr}y~60uu+;U~OOB|}Lt5W2Ux3XWZVoq7KA9p(w=DnNp78Sj&>k3~6g zjJCT?ChX_*q9nY-+uS2V&$-ylk;@56$~f-oAMIc_{`}-82^e zORaLjd3AA9*Pp`#wt6V;t>T_o3eKsy9E3a*2W{!EJO5W4wCP|xZOR!YoYmiSjQ@8E zi~$Hb3uVvX)&CO3f-{Z6xXYmStI(gMq`oNA{}jd0VaSTtIqOLD(09VOX6oZS+jlNUV;{llXHThPo#+4N`eROBnRl#Vi7b?q-zFw4 z`532S^DuC>Ds)s(eG$zX3LV*n8FyP%(|yQIQw#Af$;Up=;-fquG}cF?=uQh{Y>khP zkC?M*_TGq8lw-yMk9)B-qj6Trs83OgdX8lMeDPSP_Sf$93Aywm@ZbSmCwsSw8vwZ{ zJao=dN!%^46m%giV{dkB#(QP;ft-{2sBGlDH!Tkr6(YM zpx~Z8t<*8T1Fs{hwO15}LI1Ym)-?pO@cc9aHr05n*6CTv>o^Kg>NtQd+f?$Xz>&jO ziJQe__b7XX!0ZRH7H>3Dij%Yk*LqzcJQxc1G@FAjwRiN_7`IBII+jtQTzT&NUqz9^QzHj|Rbw0<}t> z_z~46n9A-mf8+Y(q@+RT&oIM0STlNSYp%)Ks>bDe^sw;tiQ4orp7Sdg`rc+v+2MjD zdZw6yWW;E@h0IN3oBE^>V^=Cm;b9lJuir@yzPzn;)E6=!8A>`TxE)#Ets;X;bKPxC zgRy29(r-E0hH1veM^AfH*+h#v35%*K&vApYJ;Kr2Ow6dzAh~b_Z=@n5gcpgF9->gY zTgcQ}4%v4`yNub0m(`|DdLq2T z1(dtVBCDLQp2G`pFKF>Dag&Rp;3yH!+J)e(qE~nk-uqt~>Y8I&X7e)N{W+PN9=2fh zgGC?GNI}z3rENV`gMkOaIUb49)4+HG}$CNVr7vcyfH8 z7iL#s=F9jr_-x`dn^~>03|W7=^eFIfwA62)XWTU~bk7^3jOP3$S|}?9#>)V305}0~ zfCm5!THtrYhPeP{4T5rBi8paJD7dy=Fp;JnG0}iUp;gcrG}EsS3k0MEfG_f9^-Bj{ r3=QjCh}WU@uze|v3vm|9b3n;kj)b?Anzw)Le6qpl#FoFq`0s#Xsf(cp%2`Yw2hW8Bt2LYgh2}=Zm2}dx32}3Y~1K$P- zDuzgg_YDCD2B3li&@h4n%mM)bFoFZc1_>&LNQUZju4r)&f z&2dtI$@s64Id_;%s=O&W5h}x?WkJvOc<>jpS7uHW1VMuRb^k0eHf zZI%BJ=22B}?XSz$t9QtR3TX8kfY&7)N<3QM*;pxmG9qG4|5pM$XOKPNVbpIcn{7rJ zn4Oq9((#jSa%^HRd-{g`3aOqn&SW^EXyD~K7PW|^id)iRt$${V|FeKV0Nh~f;J2B_ z2sAzq)`y+he!)}jQtIeLPX7O0oN|lX>a0r0YzdBRO*a-j&nb zyXe*fE(}%+&6Tn- zRU15FI8t~Fjj2mtYxZIUC&|GyK9(YrFr(GzHQKBmzOaBt%T20l3WskV1zy~LvQ6F^ z+f@zH5oSP3&@h4pTm}g$hDe6@4FLxMpn?TW1cC)gFoFd|FoFd^1`8^NNQUM$Gz3Mz(3hW8Bt3;_c$4g?6R8xnly_&N##0tf&Ef&|END#(^(cD0wB z0QDvJ>-5rs32x=svnL?-8wd4&Jubl}`!i5t8A}&1Tz*7PSNfY=^#XR>qjpyvNR6$i zU8MenH#rSRFSa}#?WIVUNf=atDiRX^Xx^RiH6#>v5EtE`$r*CX8~kD;G>;=dSdcLI zDt2}E%dYStwbwXXtgE8Swx`l@P_`tDl4R`@3sK6`Ju|H&j1cJV{*^<2P#iQRde{r! zUKhzOns;QBE4_-mpz{(QCn4p!VIaZ=e%+N34>IAwC$c*#cC0ZZiore9`sB#UBQ%!t zKG<8b! zYE5xut8zNsW_gKP=85jrU_pLd*DFEv)2CI?G1_Lg;ctPgHJDp4j0>~c8(NX}?~C&=%^!oU=I!(pg9E9B%+7Mhk_P8i{n;%djK z5SUSvExm~}is}QuPS$+vJD#t4kr#pzx+%P4zHyYXIHaL}4N^q{U-B#4CX0rouawn; zj^_@q<~zz9dOPH8K=H4hgQy^65@Q8>3Q`%n`x*o^H{n6THxga`U>vP^MTb^&^x7jp z%SqIWrZ=lUi4bOFZg*&NcJz0-&x9M4XA_X~c#A8tu2QvGzdfp`Ls*OJ1l$lke=0y~ zagn;PL^8>L4lH&Y3jUXowbU|Fe^b=xof1K_hLHusjPoD{$ntru>II$@*dDN33LYQ# zi@OKg1+DS7S7!~SXJ-ywByBe5n=S|mG{ABI4ZoLW(& zC+k|3yRGyK*2xjL4>2*ATE=2q)HlxbLj5A71}n^eBd#N`?68gM)|66u+tu9zFLJxg zL{0=N?Dd@gaxPlM@m*HCxQp}GGbnI+Z0D+}P=P{LOT5(06-nt<$_XSSsDXPmlMwe( zvkp=6DZUR7t+={KKG}EF$2NBB9Hj}hfd-{X}x+;PwjJ7@ZO5#Az0t zK#EVP<=Ae<#839T!%t~~#qGXi7s?jwO0i_d`z;HWXXY09^Z53 zk%@d#bj|nOn@rqtp_=l+|DaYw6vp6Ov!vmXPm`$$E=N1-HpRG+Y?!9{ z^IR|uGE%kU3AWmgZpxk=pg1X4c>ePya$*dQZ0N$C02) z2--1-g9v@W`qmgbLB`{ z{F4(it~?rrEkExOg4>3w1CF{Z6c3|xGdJ6@uY~zg+nw$D?6Yi&(j+S3))H^=Tvj<| zeD1yMs4(MlByN)P{LayyJU%9Q#I{kPX`MP&yjCOMZJ8@v_wc#ao?MFP!D@G3ajo|1 z)X)A=w?D@mLz%Ee#G11#Nm-WP6-)POGUt-b3!biAZ?k9n_#z(4%~xgdP&{&z0Kl`P zU81oWh>Jvxt(4UY4uvM2tG8HwOU8@6ENVumdt|n!`wFRZuX1<#h269b@;@zo5OM%F zQH@j1^NKd~zL39GoD8wKgCJya>C^F7IPrDtLG`19QwB}Ms*RBe!Lt-iNr1Hcbx)}j zcO4J=>{5}UTqmspx3q-Y^8k9@?UbXmTixyp0>i!v$@?sf%{J5uQToHa>DfvzWT3yU zkwX7j`qILz_J^|Lhed}W1)mDYQ`SG?%a6RGD`)_vyF^A*T4jU7+99?TyygWFvgusx z#ISQ5AG~ z!p~}Dj}jW^QrXVzyBSjfciNH}i59E9mkX>a8h`gk@8MIQfE%iT-s)N5rxs9B(Fn7T zaHL`QQ?FFJPiIx~I(oY*n5liN4haoHf5F;Y2NLfe8akF?BPeN8J8B3Gj}!%5=1=+8 z;2h$NveZlk^J~Z6HPR9fr@iZ^Du<5tF4CC09OJQf{U#6jMco~whtY=VV3G9$KUaUm zTh;)81psR6e8um=$iWVVy&~Y2m5At-xhp|Q5vZvpV)>jy>~^EzutSQDE55a7$GSe@ zMtJG2Pj^+DqE^g`2-So))MDIP1wsx~U|_c)Uqa)+uYcY7zv2L=f^guZ6P$2T&mpJ& z?-V!;06YnmPN4jMiK2Tmc;91;_d<4$W|HbpGyne-g(E;1DuZzpYqt4N{1q}P2*QDS z7;i>Y+q#3q{La^tSivO4t^0*(>}=EBi)(K2FvCksPf~3}?}WeZx9Fr-NQGX5*Q*Rt z19y&}X1C6&_8UYR>{T?$JZ^24d^^kggY{dkW800qa(UJp;aSPM*BjUBvIBUGQyQ2! zqxiwV%l^x$*`HUIfMBdKq<=8l<})u3@_UAfZi9oap4s0N4xvXpTYbE{YPQ$pUu`Ql zErz_`?UHrHieDaCZO@Vt%$b+5v3wKPDrfY${FG{fTu-r``w}VR@Bx0r5wRtCJ-(*d z;0diE@$$0!SG^9}3%l~-K1~{dtvlzkgd{=sLV`V?pAeKjlHb1x@=S4;)izy`S=BR7mKDDfS>|z!QMv7&$K*#+nbzu$!8V z47%5q{RVHV2O0$zHXg>p3iE~6#k3#yz>uizp(wS_=2>|Y6vp0FyGV+|MI`x6blS|A zsr?@kI;#5i>W@+cRFB`06iQ;g@Z2MZ$*Hkh)Hdmw<85jk+t#BsCOSjpZc3bOxD|0*- zwgENq_zRJrJaac!cfY( z<*I^Q7uw$XiUgfIR>8Bs>M$N%q{P&;EX_|)f}cTPCgY*~s-feyaKX**hh#I;-Kzsa-!z%K9_bD~*(w_tJ8QV)HRi245KhUJqew0pmviIrKFrfw-T<<9;hbY^6?%+RbB!a~^ z>xG;pdf(bu=pQPOn1?$y>VRwZ;g_Y{a z{*1^BDTj2hXzJvV^#EPp)Gz=5 delta 2438 zcmV;133>Le6qpl#FoFq`0s#Xsf(cp%2`Yw2hW8Bt2LYgh2}=Zm2}dx32}3Y~1K$P- zDuzgg_YDCD2B3li&@h4n%mM)bFoFZc1_>&LNQU{4$JmO{9AnAxe z%pE9Du}mEHoN27Q=W*6s6Ro{so@{L#X}!H4N;*$z_hJAXG!zyA@0GM4$^|As$Ba^! z;XSa!%p$x+$x0J^$|SYjEn5BlS5W6Ibtw7Ok~>U*Iewul+*yTwq+)-Ot$S$rQzT2W z701@Qzw1GNSYyMHY=J@{3u%8ZuNI>%*65q#tD!VMqfj%w;ro)L+Gn8R?ye&)tsK|_ zy=t>GI}}%Rq{_x-`1rrkVSk7{n5=Q9eSEs}EL=|M<11ILj1-1u>KAY>#2Ap4Y!6%I z%V0xkxPC(9VOs9HDHGaDktpKK=DZeuT^$OBDZ5dB`j%-X(~EV@mj!hj3n#21)H0A1 znRnXL|LitM%vVg~R7o;k1E76rL@xvVA{A>ZquL$WENfz0c?``|y=RiACPC5i zKVk-dm&Xe*EGAvC3eJkEAn`{d-q7(=Kw<#f8wm?lnE19;?y2@o*_8>zTsE?CVcIOx zC>qd#F%Y*^{nYOMLT^`9maETFAz7|ML3&2I7-%>IxP(Bvg|`lWfY5dFNQNs@Dw)w> ze#3gH&U;Yz+hfH8$JT<7Kqpv8H5O+#kDw)$7LqFltH8Pn~Jd~N4}(3mI2a*oF#tp(v=g;n!4;Ew=PriKU& zb0{20!r08ZwA_{tI!@S9ycsk2XlL)v(W}_ZJ~t1cgT#}OkAHLkULrdU*1eWjebgy` z5LLb;y7L9I2jo4tN(7YdnCJxVL=B>4X+z?a-iN?Ajfj7AOa7 z*6#rD#-HuK&|l$33)p@*?60<6#lQ+N&8%E-j{*tA?tON3N&ku<738Grt^wSc$mC4G z$cD;p@mWytimb?L$Mam^s0A!jC(~PhRLuq)dZ1Sl9be$C6b6Z&zzw;mNmO{8RhQ@I z>h>*t{n|$s5}tXeH5&+Vui}_dVe)WLT`ijAJ#EatZwKdQ!IuV2%p+fcga28mcn|1c zyPb@{!B0)C;2C%Ix7$(E6`1EtlK$|fO<#p=NGTXuCPTDfy$zID@gx3eZI8HrxS97o zobON$DDHXY)i8nuTm}g$hDe6@4FLxMpn?TW1cC)gFoFd|FoFd^1`8^NNQUM$Gz3Mz(3hW8Bt3;_c$4g?6XOYnNQTJuE$0tf&Ef&|DK32U6pmoc+) z3GEu~2Kp!%Y?s5n$^hS7=@sy3N5NC(DIt;j(9g}Ylg={C1fRmvr~%${VYBaZo_MbVfD99!bE7NzR% z5Kd#Wou{d&?>gMzVCJS?oH=@0c6`BV_q@Uvq`8)!<=KaC;HD-_2qpG#Dw)(C zA9;5urF@31Xv9%=8qQ5M7I1`N1*Xt@qpY(S(kt3EXW>@8oz@G|29aC;3T5RJdB($h z5KaloUq8Ia!F}8BkRq&qYmN>7hdRM3xDl4jC?(q_5ofs8^rk_W5SI4E?ODyXzG?3fn0_m2KfskgZe~v_Qa5#s>aQVAu@m z6Jj?F5LVIZxHZxE1dc^+MlznCvV@a{L*3<(B*DG#kQOmhXiU(%39 zWcN1AX%|d9m~IT1-tz^m0>~f64u^_YFW?g|<;1;YZ|P7c;5EIXAg#*yxT_g3M5z~S zvq}PEn<&N$crVW7dhKjtO~hGWx67u?7Jvry@81Y%rf8xiC+#V$%mmUn4-Q|lFmutB zla#6i#+pZmJyCCep)E~0d)yzfW2alUl}en_4k}PZSCM>{OTtwN5~ZVj*_Lp;_`0z2 z(Xh>7U*KIFU0un1F0N!EAml{ z?|#Off5UFyQr);NU~pE!y%9H(KmZnfe61!fjb5C=BWY)Ud=_c2cOgzjDx89elDYdy zfy6zX6p$*%%8j`1`gN%gS^+fVvuguBpZi#T7DDwn;ZA~NL$h9r3zV-t9&`rB9=SwA zTjgXpFZ-ugz!n$oz-!UpChPrPIT0ruJN78EZAUME=a8`@!`~k&M95{U!0QmhfaDOL z1YuV)^Oj7|uA?|4TCt`M0J6hG+z3gOgxv!^ZzItdP5OxphuAJdyDH@@PqoRB<UAr>3A& zo{q{TC6&iE%c02Z_#^Uy#e!gUDl}S5KG@rlTa&2?E=L($fBS6PWQ#G?>=~AZ65$LL z|2{A=Fd;Ar1_dh)0|FWa00b20bFT$JkH{yYoH6LjuRN%@20#4-2;8v43FM87R00AB E018}lU;qFB diff --git a/libs/ssl-config/src/test/resources/certs/pem-utils/README.md b/libs/ssl-config/src/test/resources/certs/pem-utils/README.md index 28602ac097f78..576b34317bd0a 100644 --- a/libs/ssl-config/src/test/resources/certs/pem-utils/README.md +++ b/libs/ssl-config/src/test/resources/certs/pem-utils/README.md @@ -3,14 +3,15 @@ The certificates in this directory have been generated using the following openssl configuration and commands. -OpenSSL Configuration File is located in this directory as -`openssl_config.cnf`. +OpenSSL Configuration File is `openssl_config.cnf`. The `alt_names` section provides the Subject Alternative Names for each certificate. This is necessary for testing with hostname verification enabled. - openssl req -new -x509 -extensions v3_req -out .cert -keyout .pem -days 1460 -config config.cnf +```bash + openssl req -new -x509 -extensions v3_req -out .cert -keyout .pem -days 1460 -config openssl_config.cnf +``` When prompted the password is always set to the value of <NAME>. @@ -18,13 +19,18 @@ Because we intend to import these certificates into a Java Keystore file, they certificate and private key must be combined in a PKCS12 certificate. + +```bash openssl pkcs12 -export -name -in .cert -inkey .pem -out .p12 +``` # Creating the Keystore We need to create a keystore from the created PKCS12 certificate. +```bash keytool -importkeystore -destkeystore .jks -srckeystore .p12 -srcstoretype pkcs12 -alias +``` The keystore is now created and has the private/public key pair. You can import additional trusted certificates using `keytool -importcert`. When @@ -35,91 +41,141 @@ keystore if necessary. `testnode-unprotected.pem` is simply the decrypted `testnode.pem` +```bash openssl rsa -in testnode.pem -out testnode-unprotected.pem +``` `rsa_key_pkcs8_plain.pem` is the same plaintext key encoded in `PKCS#8` +```bash openssl pkcs8 -topk8 -inform PEM -outform PEM -in testnode-unprotected.pem -out rsa_key_pkcs8_plain.pem -nocrypt +``` `testnode-aes{128,192,256}.pem` is the testnode.pem private key, encrypted with `AES-128`, `AES-192` and `AES-256` respectively, encoded in `PKCS#1` +```bash openssl rsa -aes128 -in testnode-unprotected.pem -out testnode-aes128.pem openssl rsa -aes192 -in testnode-unprotected.pem -out testnode-aes192.pem openssl rsa -aes256 -in testnode-unprotected.pem -out testnode-aes256.pem +``` -Adding `DSA` and `EC` Keys to the Keystore +# Adding `DSA` and `EC` Keys to the Keystore +```bash keytool -genkeypair -keyalg DSA -alias testnode_dsa -keystore testnode.jks -storepass testnode \ - -keypass testnode -validity 10000 -keysize 1024 -dname "CN=OpenSearch Test Node" \ + -keypass testnode -validity 10000 -keysize 2048 -dname "CN=OpenSearch Test Node" \ -ext SAN=dns:localhost,dns:localhost.localdomain,dns:localhost4,dns:localhost4.localdomain4,dns:localhost6,dns:localhost6.localdomain6,ip:127.0.0.1,ip:0:0:0:0:0:0:0:1 keytool -genkeypair -keyalg EC -alias testnode_ec -keystore testnode.jks -storepass testnode \ - -keypass testnode -validity 10000 -keysize 256 -dname "CN=OpenSearch Test Node" \ + -keypass testnode -validity 10000 -groupname secp256r1 -dname "CN=OpenSearch Test Node" \ -ext SAN=dns:localhost,dns:localhost.localdomain,dns:localhost4,dns:localhost4.localdomain4,dns:localhost6,dns:localhost6.localdomain6,ip:127.0.0.1,ip:0:0:0:0:0:0:0:1 +``` -Exporting the `DSA` and `EC` private keys from the keystore +# Export the `DSA` and `EC` private keys from `JKS` to `PKCS#12` +```bash keytool -importkeystore -srckeystore testnode.jks -destkeystore dsa.p12 -deststoretype PKCS12 \ -srcalias testnode_dsa -deststorepass testnode -destkeypass testnode + keytool -importkeystore -srckeystore testnode.jks -destkeystore ec.p12 -deststoretype PKCS12 \ + -srcalias testnode_ec -deststorepass testnode -destkeypass testnode +``` + +# Export the `DSA` and `EC` private keys from `PKCS#12` keystore into `PKCS#8` format + +```bash openssl pkcs12 -in dsa.p12 -nodes -nocerts | openssl pkcs8 -topk8 -nocrypt -outform pem \ -out dsa_key_pkcs8_plain.pem - keytool -importkeystore -srckeystore testnode.jks -destkeystore ec.p12 -deststoretype PKCS12 \ - -srcalias testnode_ec -deststorepass testnode -destkeypass testnode + openssl pkcs12 -in dsa.p12 -nodes -nocerts | openssl pkcs8 -topk8 -outform pem \ + -out dsa_key_pkcs8_encrypted.pem openssl pkcs12 -in ec.p12 -nodes -nocerts | openssl pkcs8 -topk8 -nocrypt -outform pem \ - -out ec_key_pkcs8_plain.pem + -out ec_key_pkcs8_plain.pem -Create `PKCS#8` encrypted key from the encrypted `PKCS#1` encoded -`testnode.pem` + openssl pkcs12 -in ec.p12 -nodes -nocerts | openssl pkcs8 -topk8 -outform pem \ + -out ec_key_pkcs8_encrypted.pem +``` - openssl pkcs8 -topk8 -inform PEM -outform PEM -in testnode.pem -out key_pkcs8_encrypted.pem +# Export the `DSA` and `EC` private keys from `PKCS#12` keystore into `PKCS#1` format - ssh-keygen -t ed25519 -f key_unsupported.pem +```bash + openssl pkcs12 -in dsa.p12 -nodes -nocerts | openssl dsa -out dsa_key_openssl_plain.pem + + openssl pkcs12 -in dsa.p12 -nodes -nocerts | openssl dsa -des3 -out dsa_key_openssl_encrypted.pem -Convert `prime256v1-key-noparam.pem` to `PKCS#8` format + openssl pkcs12 -in ec.p12 -nodes -nocerts | openssl ec -out ec_key_openssl_plain.pem - openssl pkcs8 -topk8 -in prime256v1-key-noparam.pem -nocrypt -out prime256v1-key-noparam-pkcs8.pem + openssl pkcs12 -in ec.p12 -nodes -nocerts | openssl ec -des3 -out ec_key_openssl_encrypted.pem +``` -Generate the keys and self-signed certificates in `nodes/self/` : +# Create SSH key +```bash + ssh-keygen -t ed25519 -f key_unsupported.pem +``` +# Generate the keys and self-signed certificates in `nodes/self/` : +```bash openssl req -newkey rsa:2048 -keyout n1.c1.key -x509 -days 3650 -subj "/CN=n1.c1" -reqexts SAN \ -extensions SAN -config <(cat /etc/ssl/openssl.cnf \ <(printf "[SAN]\nsubjectAltName=otherName.1:2.5.4.3;UTF8:node1.cluster1")) -out n1.c1.crt +``` -Create a `CA` keypair for testing - +# Create a `CA` keypair for testing +```bash openssl req -newkey rsa:2048 -nodes -keyout ca.key -x509 -subj "/CN=certAuth" -days 10000 -out ca.crt +``` -Generate Certificates signed with our CA for testing - -  openssl req -new -newkey rsa:2048 -keyout n2.c2.key -reqexts SAN -extensions SAN \ +# Generate Certificates signed with our CA for testing +```bash + openssl req -new -newkey rsa:2048 -keyout n2.c2.key -reqexts SAN -extensions SAN \ -config <(cat /etc/ssl/openssl.cnf <(printf "[SAN]\nsubjectAltName=otherName.1:2.5.4.3;UTF8:node2.cluster2"))\ -out n2.c2.csr - openssl x509 -req -in n2.c2.csr -extensions SAN -CA ca.crt -CAkey ca.key -CAcreateserial \ -extfile <(cat /etc/ssl/openssl.cnf <(printf "[SAN]\nsubjectAltName=otherName.1:2.5.4.3;UTF8:node2.cluster2"))\ -out n2.c2.crt -days 10000 +``` # Generate EC keys using various curves for testing - +```bash openssl ecparam -list_curves +``` will list all the available curves in a given system. For the purposes of the tests here, the following curves were used to generate ec keys named accordingly: - +```bash openssl ecparam -name secp256r1 -genkey -out private_secp256r1.pem openssl ecparam -name secp384r1 -genkey -out private_secp384r1.pem openssl ecparam -name secp521r1 -genkey -out private_secp521r1.pem +``` and the respective certificates - +```bash openssl req -x509 -extensions v3_req -key private_secp256r1.pem -out certificate_secp256r1.pem -days 1460 -config openssl_config.cnf openssl req -x509 -extensions v3_req -key private_secp384r1.pem -out certificate_secp384r1.pem -days 1460 -config openssl_config.cnf openssl req -x509 -extensions v3_req -key private_secp521r1.pem -out certificate_secp521r1.pem -days 1460 -config openssl_config.cnf +``` + +# Generate encrypted keys with `PBKDF2` standard + +## RSA PKCS#8 +```bash + openssl genrsa -out key-temp.pem 2048 + openssl pkcs8 -in key-temp.pem -topk8 -v2 aes-256-cbc -v2prf hmacWithSHA512 -out key_PKCS8_enc_pbkdf2.pem +``` + +## DSA +```bash + openssl genpkey -genparam -algorithm DSA -out param_temp.pem -pkeyopt pbits:2048 -pkeyopt qbits:224 -pkeyopt digest:SHA256 -pkeyopt gindex:1 -text + openssl genpkey -paramfile param_temp.pem -out key_DSA_enc_pbkdf2.pem -aes256 -pass stdin +``` + +## EC +```bash + openssl genpkey -algorithm EC -out key_EC_enc_pbkdf2.pem -pkeyopt ec_paramgen_curve:secp384r1 -pkeyopt ec_param_enc:named_curve -pass stdin +``` diff --git a/libs/ssl-config/src/test/resources/certs/pem-utils/dsa_key_openssl_encrypted.pem b/libs/ssl-config/src/test/resources/certs/pem-utils/dsa_key_openssl_encrypted.pem index a251de23f4879..6dafbae6a7785 100644 --- a/libs/ssl-config/src/test/resources/certs/pem-utils/dsa_key_openssl_encrypted.pem +++ b/libs/ssl-config/src/test/resources/certs/pem-utils/dsa_key_openssl_encrypted.pem @@ -1,15 +1,23 @@ -----BEGIN DSA PRIVATE KEY----- Proc-Type: 4,ENCRYPTED -DEK-Info: DES-EDE3-CBC,BE9A0B63873F6B7A +DEK-Info: DES-EDE3-CBC,0DE7DF3D64FBE0C5 -lGSpJkwN0J9p+2Wm58706EYz6mmjgz7okjMtsR87GMIiK/wVwjKmyUa73QTVVs15 -N/EOySftBk3VUSPx9G1ZMxKpp3l/hvkIcsDDfCPAZFqwdQQJ8BEeF9jDd5ZoI6Yz -Yus1+X8A1OpX1O7PCZ08e2fLeVuEWg62/JQcNukuvL7AKm+qa1sda5/ktquv2eMZ -nbTiOE3Xe+uDsgABQdy1h4EsMEaMdE6QrWdxLGWDGcdzSzfltvnhmmsK2CQsV4e1 -huQeb8ylShJuIr+mgtKgUlIlJwSd7ka8hIdmGt1LO9+NZOPUGN04daQkETtfwsmu -YIYkh66CuLbT4nZny64Spa7AeINSmf9GA72/QtRSo3M7Khlw/95Lz24iKAy7/Lbt -AKYenSQeJtlNgWzPcDIeUrIzXXmAXHN5YGMg/7X0h7EGu5BxYbLydkBRvSkV9gzU -Ms6JD5aON10DQhjIUwUcBnhSnwPPpIVa2xf9mqytkcg+zDgr57ygZ9n4D+iv4jiC -ZJuFCFrgeqHrCEKRphWRckyhPo25ix9XXv7FmUw8jxb/3uTk93CS4Wv5LK4JkK6Z -AyF99S2kDqsE1u71qHJU2w== +sKlL1ZyhrDo/7CF2bVHPNJMZqbbfQ55ZAB+T2x63j1ssu3c9arMVFiNTm3gl29DX +6PtYopDglgZhK7YYLck5batMjSqwpl+lm6MgTDqzgZAMcCPl5KJd0ScuCP9nw/yE +uAzBBmhhHrxUtyGLZWX/RNq+pIv3rMs2MGrLjidJW4VkIXczEjoVbayzHqdOHzPa +GVeGfm68ykFO94KcJZWsGFQMCtm7DyYLNusC4P6O9hpYNsK09kqwqMiQFKUstIO7 +lyemCDCQa2wikO22wum6PgrSosIU2CoSo7AYgb7zpGOJdtQ15F6mC4+NOFY4Xk3d +N5ZB7vc/1Y4vqKBDMF9DANtK8LjYt13p9mVv2ZDH48qM9EadCtN26PeARlqwFBJv +hrVyVm7GOywY43XTVQqrF8MVDggunyGubEH1Endhh0PmVCL+hG9djQWLuCIUGON4 +6/2mA7dbyxzRi5qgC00BRjrpmti+vddVArFCoKvaOlELaGR1mSkpeKdfuI+WeZ6u +/GJ/tvG/4yWKoRxxsZ5JFj1njMVhgsgicaHV3r+jF4SjDkjDJ4TXcU1QgP0jnNEd +5O2Yn85MuUg99T6r/3lgW8WLelrRKuGocYDMi/huWaBwhA+FsB/5eCm0nwWgNpw5 +Z/aylu/XHqx9pE5veAzXajGkg0z3MbBp2Ig+q0XWxznbQZnMZSuLMlnIbmo77v86 +pAoLumTBaG8unmwPc3WDvyEC4/znx7aJwcLqMLwnDB7+qtNqG6OLzskARQ5+TqXk +6SFn2JX2EhJZ4X3yKqDs93haSlQlOxszEvoz9J94xtHdeTQ1EE8dFwSBU4UrCxmP +kTTTB/p9IfRXyXn7Bp00EfDPc+0+I0pZrnQLA3CLP8oxGngh7RBGE1BtjNU9mxCu +P2dn2lQBh1bB5u5Ggm7T87BEpWmTMaU7wrp5drrbzuS91OQf9UGRxVt0UZwApNBh +tqabqXourZ4NOERuy8WL9wFG8IAymSAEd7noVCXcv25SxBng2tyo9nI272Ufq1JM +ymn1Bf2aDDJsb/n17dAcfxwbnx6GdB0jEIoUaWMkSWh8FfjLpE29uUraVBBYTmd8 +TlkFmodWG8ctHpwDXSmQ80lcKC7lZ1M3NCjKwdKpcM+q8HG3VuFmrg== -----END DSA PRIVATE KEY----- diff --git a/libs/ssl-config/src/test/resources/certs/pem-utils/dsa_key_openssl_plain.pem b/libs/ssl-config/src/test/resources/certs/pem-utils/dsa_key_openssl_plain.pem index a64642fc9ab0c..40290ff1c2b1d 100644 --- a/libs/ssl-config/src/test/resources/certs/pem-utils/dsa_key_openssl_plain.pem +++ b/libs/ssl-config/src/test/resources/certs/pem-utils/dsa_key_openssl_plain.pem @@ -1,12 +1,20 @@ -----BEGIN DSA PRIVATE KEY----- -MIIBuwIBAAKBgQD9f1OBHXUSKVLfSpwu7OTn9hG3UjzvRADDHj+AtlEmaUVdQCJR -+1k9jVj6v8X1ujD2y5tVbNeBO4AdNG/yZmC3a5lQpaSfn+gEexAiwk+7qdf+t8Yb -+DtX58aophUPBPuD9tPFHsMCNVQTWhaRMvZ1864rYdcq7/IiAxmd0UgBxwIVAJdg -UI8VIwvMspK5gqLrhAvwWBz1AoGBAPfhoIXWmz3ey7yrXDa4V7l5lK+7+jrqgvlX -TAs9B4JnUVlXjrrUWU/mcQcQgYC0SRZxI+hMKBYTt88JMozIpuE8FnqLVHyNKOCj -rh4rs6Z1kW6jfwv6ITVi8ftiegEkO8yk8b6oUZCJqIPf4VrlnwaSi2ZegHtVJWQB -TDv+z0kqAoGAd0xuuUUSAXsXaQ/dp9ThBTVzdVhGk6VAcWb403uMXUyXKsnCIAST -m6bVWKjNxO1EsP3Slyd5CwbqIRUBK5NjzdQP/hHGtEIbqtYKY1VZI7T91Lk8/Dc/ -p9Vgh27bPR8Yq8wPKU3EIJzYi0Nw8AxZf10yK+5tQ6pPUa3dH6lXt5oCFF1LyfuB -qBYh7hyIsfkb+cZoQ57t +MIIDTQIBAAKCAQEAj3k12bmq6b+r7Yh6z0lRtvMuxZ47rzcY6OrElh8+/TYG50NR +qcQYMzm4CefCrhxTm6dHW4XQEa24tHmHdUmEaVysDo8UszYIKKIv+icRCj1iqZNF +NAmg/mlsRlj4S90ggZw3CaAQV7GVrc0AIz26VIS2KR+dZI74g0SGd5ec7AS0NKas +LnXpmF3iPbApL8ERjJ/6nYGB5zONt5K3MNe540lZL2gJmHIVORXqPWuLRlPGM0WP +gDsypMLg8nKQJW5OP4o7CDihxFDk4YwaKaN9316hQ95LZv8EkD7VzxYj4VjUh8YI +6X8hHNgdyiPLbjgHZfgi40K+SEwFdjk5YBzWZwIdALr2lqaFePff3uf6Z8l3x4Xv +MrIzuuWAwLzVaV0CggEAFqZcWCBIUHBOdQKjl1cEDTTaOjR4wVTU5KXALSQu4E+W +5h5L0JBKvayPN+6x4J8xgtI8kEPLZC+IAEFg7fnKCbMgdqecMqYn8kc+kYebosTn +RL0ggVRMtVuALDaNH6g+1InpTg+gaI4yQopceMR4xo0FJ7ccmjq7CwvhLERoljnn +08502xAaZaorh/ZMaCbbPscvS1WZg0u07bAvfJDppJbTpV1TW+v8RdT2GfY/Pe27 +hzklwvIk4HcxKW2oh+weR0j4fvtf3rdUhDFrIjLe5VPdrwIRKw0fAtowlzIk/ieu +2oudSyki2bqL457Z4QOmPFKBC8aIt+LtQxbh7xfb3gKCAQBVB6bce7VXrIhB9hEE +jRlAUTm/Zezsl1CfaCjr+lejlxFybg5pkNQCvPsgpELnWXWz/8TXkbzAxSA3yGB0 +LSTp7gfucdFleJrGGZ94RTaIZFslDvk5HtFaZvjvUavyY3wCbMu+T1QUtfpQMQpP +qikplvg/2mzYhh3cMpdhFqj6EQcC12gHPPA7qC2jXnvsW1qqx0wtIxbBJvCqFqmA +gnOj/FoxqpTmMsMDG+8cwkOQ3PZTv1JbqVeJGFMvfsqb05SfZlO8XzXOvTm7Wexc +IXHTUTsXb36rH4tNpBqxCc+l1LOd3vXXPtxxBXsGDV2UeDOLWnMKp+FXj77vh0bc +W3aeAhw3xacY9KJHUobKmnlsyfgPhURZXWxg0U9oSzOr -----END DSA PRIVATE KEY----- diff --git a/libs/ssl-config/src/test/resources/certs/pem-utils/dsa_key_openssl_plain_with_params.pem b/libs/ssl-config/src/test/resources/certs/pem-utils/dsa_key_openssl_plain_with_params.pem index 0a2ea861b9b66..a57dbe80015c6 100644 --- a/libs/ssl-config/src/test/resources/certs/pem-utils/dsa_key_openssl_plain_with_params.pem +++ b/libs/ssl-config/src/test/resources/certs/pem-utils/dsa_key_openssl_plain_with_params.pem @@ -5,14 +5,22 @@ fexykg9Kxe/QBfDtcj3CEJNH/xoptJQVx3hi+0BPPK8+eUXTjwkQerGMwUD7UQak xuUS/22GakHZV5G/kCc= -----END DSA PARAMETERS----- -----BEGIN DSA PRIVATE KEY----- -MIIBuwIBAAKBgQD9f1OBHXUSKVLfSpwu7OTn9hG3UjzvRADDHj+AtlEmaUVdQCJR -+1k9jVj6v8X1ujD2y5tVbNeBO4AdNG/yZmC3a5lQpaSfn+gEexAiwk+7qdf+t8Yb -+DtX58aophUPBPuD9tPFHsMCNVQTWhaRMvZ1864rYdcq7/IiAxmd0UgBxwIVAJdg -UI8VIwvMspK5gqLrhAvwWBz1AoGBAPfhoIXWmz3ey7yrXDa4V7l5lK+7+jrqgvlX -TAs9B4JnUVlXjrrUWU/mcQcQgYC0SRZxI+hMKBYTt88JMozIpuE8FnqLVHyNKOCj -rh4rs6Z1kW6jfwv6ITVi8ftiegEkO8yk8b6oUZCJqIPf4VrlnwaSi2ZegHtVJWQB -TDv+z0kqAoGAd0xuuUUSAXsXaQ/dp9ThBTVzdVhGk6VAcWb403uMXUyXKsnCIAST -m6bVWKjNxO1EsP3Slyd5CwbqIRUBK5NjzdQP/hHGtEIbqtYKY1VZI7T91Lk8/Dc/ -p9Vgh27bPR8Yq8wPKU3EIJzYi0Nw8AxZf10yK+5tQ6pPUa3dH6lXt5oCFF1LyfuB -qBYh7hyIsfkb+cZoQ57t +MIIDTQIBAAKCAQEAj3k12bmq6b+r7Yh6z0lRtvMuxZ47rzcY6OrElh8+/TYG50NR +qcQYMzm4CefCrhxTm6dHW4XQEa24tHmHdUmEaVysDo8UszYIKKIv+icRCj1iqZNF +NAmg/mlsRlj4S90ggZw3CaAQV7GVrc0AIz26VIS2KR+dZI74g0SGd5ec7AS0NKas +LnXpmF3iPbApL8ERjJ/6nYGB5zONt5K3MNe540lZL2gJmHIVORXqPWuLRlPGM0WP +gDsypMLg8nKQJW5OP4o7CDihxFDk4YwaKaN9316hQ95LZv8EkD7VzxYj4VjUh8YI +6X8hHNgdyiPLbjgHZfgi40K+SEwFdjk5YBzWZwIdALr2lqaFePff3uf6Z8l3x4Xv +MrIzuuWAwLzVaV0CggEAFqZcWCBIUHBOdQKjl1cEDTTaOjR4wVTU5KXALSQu4E+W +5h5L0JBKvayPN+6x4J8xgtI8kEPLZC+IAEFg7fnKCbMgdqecMqYn8kc+kYebosTn +RL0ggVRMtVuALDaNH6g+1InpTg+gaI4yQopceMR4xo0FJ7ccmjq7CwvhLERoljnn +08502xAaZaorh/ZMaCbbPscvS1WZg0u07bAvfJDppJbTpV1TW+v8RdT2GfY/Pe27 +hzklwvIk4HcxKW2oh+weR0j4fvtf3rdUhDFrIjLe5VPdrwIRKw0fAtowlzIk/ieu +2oudSyki2bqL457Z4QOmPFKBC8aIt+LtQxbh7xfb3gKCAQBVB6bce7VXrIhB9hEE +jRlAUTm/Zezsl1CfaCjr+lejlxFybg5pkNQCvPsgpELnWXWz/8TXkbzAxSA3yGB0 +LSTp7gfucdFleJrGGZ94RTaIZFslDvk5HtFaZvjvUavyY3wCbMu+T1QUtfpQMQpP +qikplvg/2mzYhh3cMpdhFqj6EQcC12gHPPA7qC2jXnvsW1qqx0wtIxbBJvCqFqmA +gnOj/FoxqpTmMsMDG+8cwkOQ3PZTv1JbqVeJGFMvfsqb05SfZlO8XzXOvTm7Wexc +IXHTUTsXb36rH4tNpBqxCc+l1LOd3vXXPtxxBXsGDV2UeDOLWnMKp+FXj77vh0bc +W3aeAhw3xacY9KJHUobKmnlsyfgPhURZXWxg0U9oSzOr -----END DSA PRIVATE KEY----- diff --git a/libs/ssl-config/src/test/resources/certs/pem-utils/dsa_key_pkcs8_encrypted.pem b/libs/ssl-config/src/test/resources/certs/pem-utils/dsa_key_pkcs8_encrypted.pem new file mode 100644 index 0000000000000..bd97ea336952d --- /dev/null +++ b/libs/ssl-config/src/test/resources/certs/pem-utils/dsa_key_pkcs8_encrypted.pem @@ -0,0 +1,18 @@ +-----BEGIN ENCRYPTED PRIVATE KEY----- +MIIC1TBfBgkqhkiG9w0BBQ0wUjAxBgkqhkiG9w0BBQwwJAQQZyhaVfFi46pW+9xj +VTztDAICCAAwDAYIKoZIhvcNAgkFADAdBglghkgBZQMEASoEEDmoN7JMaRafBZpK +ARWFoW4EggJwiqa8cBsGLJ/o3Q/54SR2CCAJ+UJbtylFwZ+GgvnKrzuqd2vSVSfm +mG/xC1h5hE6miYuZXpMZuNlCAeZi0odBVXzIMMkTXCC5ifufor4bb5EeMwQLder2 +NK4IW9QkOu8IzO/ohuT+xJwiWxnyItX3bh68GFDHJH+z0+WHILHNihoUBg+HZJZc +RDGSU9GATjcX4WMnDJUnaRVJ71umBZ35RJliKKm6oJYgEmbQpytd03paMttvYUD7 +zaRAZFBXXudNVV3GM/+KlJX4huyjKbaJOv99piUwrPr9WK9OqYd//tdU+TjJKZ4/ +8yMEmTfoEUJFtQTuJ7bp74EgrqtN2FtP4v7ZQ32Js/fL0TlS9SuxHY6XnyQAZm8A +C8rSql5nQD2RBfY+OZ8k2ixVUx2kNFBZS3GZds6aRX4AG5dFBajQOJ4sAQVHGLL5 +qB7xNblgL0tepApTQ9teD/O53fSMkbxEROxipG0ukiL2hMq4s1sMZzIHEq5U+wWs +HyqNIRBbrYv2zgE5TT6o+yszddtZcH5spRT22bmNGNrREWP9KMiCuOrfgcBEeLwa +KrWS+0cA0nzmGVxaw6tf5SzQHOy2t/+L93oDSBs/9uq89PpcMrYtW37EE0z9vcNO +PqDFfJOKNtaN45s2cG/iCpMOF5EaII86gCODgl+sUaITtNrOy5OROnsuJJV5Qlom +kRf2p6b1EF41UcF94dvYnMJrumeWfNltMhhDOHF0qiuL+iCwoMPD2M6VyWCypGLc +BNXNA6/pmw+o08blonOJ8grXQ3LWj6LmZdWkPorpwiepmzmKH9wIowSC1j+AddrG +a59Z6s9wFrIl +-----END ENCRYPTED PRIVATE KEY----- diff --git a/libs/ssl-config/src/test/resources/certs/pem-utils/dsa_key_pkcs8_plain.pem b/libs/ssl-config/src/test/resources/certs/pem-utils/dsa_key_pkcs8_plain.pem index fc5f17ce89897..d9f6e6108d227 100644 --- a/libs/ssl-config/src/test/resources/certs/pem-utils/dsa_key_pkcs8_plain.pem +++ b/libs/ssl-config/src/test/resources/certs/pem-utils/dsa_key_pkcs8_plain.pem @@ -1,9 +1,15 @@ -----BEGIN PRIVATE KEY----- -MIIBSwIBADCCASwGByqGSM44BAEwggEfAoGBAP1/U4EddRIpUt9KnC7s5Of2EbdS -PO9EAMMeP4C2USZpRV1AIlH7WT2NWPq/xfW6MPbLm1Vs14E7gB00b/JmYLdrmVCl -pJ+f6AR7ECLCT7up1/63xhv4O1fnxqimFQ8E+4P208UewwI1VBNaFpEy9nXzrith -1yrv8iIDGZ3RSAHHAhUAl2BQjxUjC8yykrmCouuEC/BYHPUCgYEA9+GghdabPd7L -vKtcNrhXuXmUr7v6OuqC+VdMCz0HgmdRWVeOutRZT+ZxBxCBgLRJFnEj6EwoFhO3 -zwkyjMim4TwWeotUfI0o4KOuHiuzpnWRbqN/C/ohNWLx+2J6ASQ7zKTxvqhRkImo -g9/hWuWfBpKLZl6Ae1UlZAFMO/7PSSoEFgIUXUvJ+4GoFiHuHIix+Rv5xmhDnu0= +MIICXAIBADCCAjUGByqGSM44BAEwggIoAoIBAQCPeTXZuarpv6vtiHrPSVG28y7F +njuvNxjo6sSWHz79NgbnQ1GpxBgzObgJ58KuHFObp0dbhdARrbi0eYd1SYRpXKwO +jxSzNggooi/6JxEKPWKpk0U0CaD+aWxGWPhL3SCBnDcJoBBXsZWtzQAjPbpUhLYp +H51kjviDRIZ3l5zsBLQ0pqwudemYXeI9sCkvwRGMn/qdgYHnM423krcw17njSVkv +aAmYchU5Feo9a4tGU8YzRY+AOzKkwuDycpAlbk4/ijsIOKHEUOThjBopo33fXqFD +3ktm/wSQPtXPFiPhWNSHxgjpfyEc2B3KI8tuOAdl+CLjQr5ITAV2OTlgHNZnAh0A +uvaWpoV499/e5/pnyXfHhe8ysjO65YDAvNVpXQKCAQAWplxYIEhQcE51AqOXVwQN +NNo6NHjBVNTkpcAtJC7gT5bmHkvQkEq9rI837rHgnzGC0jyQQ8tkL4gAQWDt+coJ +syB2p5wypifyRz6Rh5uixOdEvSCBVEy1W4AsNo0fqD7UielOD6BojjJCilx4xHjG +jQUntxyaOrsLC+EsRGiWOefTznTbEBplqiuH9kxoJts+xy9LVZmDS7TtsC98kOmk +ltOlXVNb6/xF1PYZ9j897buHOSXC8iTgdzEpbaiH7B5HSPh++1/et1SEMWsiMt7l +U92vAhErDR8C2jCXMiT+J67ai51LKSLZuovjntnhA6Y8UoELxoi34u1DFuHvF9ve +BB4CHDfFpxj0okdShsqaeWzJ+A+FRFldbGDRT2hLM6s= -----END PRIVATE KEY----- diff --git a/libs/ssl-config/src/test/resources/certs/pem-utils/ec_key_openssl_encrypted.pem b/libs/ssl-config/src/test/resources/certs/pem-utils/ec_key_openssl_encrypted.pem index 69dfde4b3c502..374467e05e280 100644 --- a/libs/ssl-config/src/test/resources/certs/pem-utils/ec_key_openssl_encrypted.pem +++ b/libs/ssl-config/src/test/resources/certs/pem-utils/ec_key_openssl_encrypted.pem @@ -1,7 +1,8 @@ -----BEGIN EC PRIVATE KEY----- Proc-Type: 4,ENCRYPTED -DEK-Info: AES-128-CBC,692E4272CB077E56A0D4772B323EFB14 +DEK-Info: DES-EDE3-CBC,0E2911A50F45B630 -BXvDiK0ulUFKw1fDq5TMVb9gAXCeWCGUGOg/+A65aaxd1zU+aR2dxhCGXjsiLzRn -YFSZR2J/L7YP1qvWC7f0NQ== +msSD9vAzUme59T7C1AL9XVLlcjnEEsA5v5fKvIr39GyJ0WeWTz7OaygM67xlkjGr +zBCabxgE4qL4Ydra8kEUZAbIYmdXs0kHBFlu2UFv8yltVfoWa8FR3VPEBrpq99L2 +NTuiWUEo9wvfLj7h4DiD5o3ejbMyomx8+V4uzWpCHbk= -----END EC PRIVATE KEY----- diff --git a/libs/ssl-config/src/test/resources/certs/pem-utils/ec_key_openssl_plain.pem b/libs/ssl-config/src/test/resources/certs/pem-utils/ec_key_openssl_plain.pem index e1d0a6a8319c0..e8009c1d2d520 100644 --- a/libs/ssl-config/src/test/resources/certs/pem-utils/ec_key_openssl_plain.pem +++ b/libs/ssl-config/src/test/resources/certs/pem-utils/ec_key_openssl_plain.pem @@ -1,4 +1,5 @@ -----BEGIN EC PRIVATE KEY----- -MDECAQEEILEXCgqp9wZqKVmG6HTESPeCyx2O4TDoFqyILz7OGocEoAoGCCqGSM49 -AwEH +MHcCAQEEILEXCgqp9wZqKVmG6HTESPeCyx2O4TDoFqyILz7OGocEoAoGCCqGSM49 +AwEHoUQDQgAE7mUZVxp/0TnDu8hSSedG9tGL4Fd1PhaUcdJ8f8ooFo+sYhDCp1m2 +1JzNJihfHNxhxpOYPDlz52yvero+raTAeQ== -----END EC PRIVATE KEY----- diff --git a/libs/ssl-config/src/test/resources/certs/pem-utils/ec_key_openssl_plain_with_params.pem b/libs/ssl-config/src/test/resources/certs/pem-utils/ec_key_openssl_plain_with_params.pem index 2ad57473236b3..c5bed51ef1f86 100644 --- a/libs/ssl-config/src/test/resources/certs/pem-utils/ec_key_openssl_plain_with_params.pem +++ b/libs/ssl-config/src/test/resources/certs/pem-utils/ec_key_openssl_plain_with_params.pem @@ -1,7 +1,8 @@ -----BEGIN EC PARAMETERS----- -Notvalidbutnotparsed +BggqhkjOPQMBBw== -----END EC PARAMETERS----- -----BEGIN EC PRIVATE KEY----- -MDECAQEEILEXCgqp9wZqKVmG6HTESPeCyx2O4TDoFqyILz7OGocEoAoGCCqGSM49 -AwEH +MHcCAQEEILEXCgqp9wZqKVmG6HTESPeCyx2O4TDoFqyILz7OGocEoAoGCCqGSM49 +AwEHoUQDQgAE7mUZVxp/0TnDu8hSSedG9tGL4Fd1PhaUcdJ8f8ooFo+sYhDCp1m2 +1JzNJihfHNxhxpOYPDlz52yvero+raTAeQ== -----END EC PRIVATE KEY----- diff --git a/libs/ssl-config/src/test/resources/certs/pem-utils/ec_key_pkcs8_encrypted.pem b/libs/ssl-config/src/test/resources/certs/pem-utils/ec_key_pkcs8_encrypted.pem new file mode 100644 index 0000000000000..bfef68d57a722 --- /dev/null +++ b/libs/ssl-config/src/test/resources/certs/pem-utils/ec_key_pkcs8_encrypted.pem @@ -0,0 +1,6 @@ +-----BEGIN ENCRYPTED PRIVATE KEY----- +MIGzMF8GCSqGSIb3DQEFDTBSMDEGCSqGSIb3DQEFDDAkBBBJgd9ei6iSF+3O6nhk +A/CTAgIIADAMBggqhkiG9w0CCQUAMB0GCWCGSAFlAwQBKgQQezbgAPm2wh2vFE6l +bGKePwRQZub5Evev8F/53CGRXhF0sdL+i/2zCJcmqrauwPr6VtgQdXmBlJcur3ft +4PDXCe1R+3jhk56gmOBDjnOepPnWge62lKO/nfff6lpgr/uXUe0= +-----END ENCRYPTED PRIVATE KEY----- diff --git a/libs/ssl-config/src/test/resources/certs/pem-utils/key_DSA_enc_pbkdf2.pem b/libs/ssl-config/src/test/resources/certs/pem-utils/key_DSA_enc_pbkdf2.pem new file mode 100644 index 0000000000000..bb1655d2e9548 --- /dev/null +++ b/libs/ssl-config/src/test/resources/certs/pem-utils/key_DSA_enc_pbkdf2.pem @@ -0,0 +1,18 @@ +-----BEGIN ENCRYPTED PRIVATE KEY----- +MIIC1TBfBgkqhkiG9w0BBQ0wUjAxBgkqhkiG9w0BBQwwJAQQ9WLcmXfK4mQgb8z0 +VEFgnAICCAAwDAYIKoZIhvcNAgkFADAdBglghkgBZQMEASoEEGUh9m77oFyis8j5 +VedmDqIEggJwymDZJmHaNgIiJAI/psd+hR4n03oMwUaV72DmQewEdMhI2sEy36WU +Pup7X8VmRLb4tyiSiEUlh8FIX3cMpQ11e1j/lwW7wF+W3Qb6CHcMu8FCz3LN/CS4 +M+sQttfXiHh70qZvRx0SNaJo8A+e8HRGmYrbz6VqdlslSdB4fDT8Igls45rDZbch +LJlHQfy9XQSgCFR6J+6/6Q8GyW07+WnkuYnbixN8ZdZ4jPE5mrZYMMQrQY0l4ThG +vpb7U6VnWepDnXgeNWZTjHVLSAx3bbLUpbwotJnZISyTlRCxFSnunrRIkgaWPNMr +qE78FfE8I8Y/3Ft3AURgM+o/AvgyNCNM9g6DCqjaYpuaK0aJpdvaez9BiiANosBq +Powto+vuaDyYVIEhZ+GbokkvXx9muzvyA3KpqN1dg18au7Mqpkrenrw7Z5J8TnS2 +Pv686vSxCmisInC7c7uQYVxhze7fYMDUsyvWNPNUUrYnqrVtZtjD+VjkuZHJrBnL +haz5xQ0cw7pPY9r8R1y5jxMCVKxMBvbOsQJ+MBqGXseYmeB8qBBMYVdC+bNdEzga +rWD6FCX/k0PH2nP6KaU3qWLh3ueEtwTh0KO4yXgKyiLzF1KXoF93+4i9hX2w+t/W +Y5jgNErriqrW5WOQFDrSlVmMx1dLNFzM1cB7TKygZrzytULAYAg/0el8Gjbw7nKP +HInVUFKWhpNipEhDCGnGKoBvSz88AYAHS2I4fnFg3AfZCWEkkKJg++Y4Wip4+KTC +XjECqMqv6hwNbvMf3JkmqTPZVh8MtLIAiR1rUIWdZMq18+4vnHtW0FXzLb2nYn3u +ZrtXtOGxpBUY +-----END ENCRYPTED PRIVATE KEY----- diff --git a/libs/ssl-config/src/test/resources/certs/pem-utils/key_EC_enc_pbkdf2.pem b/libs/ssl-config/src/test/resources/certs/pem-utils/key_EC_enc_pbkdf2.pem new file mode 100644 index 0000000000000..b851058d17217 --- /dev/null +++ b/libs/ssl-config/src/test/resources/certs/pem-utils/key_EC_enc_pbkdf2.pem @@ -0,0 +1,6 @@ +-----BEGIN PRIVATE KEY----- +MIG2AgEAMBAGByqGSM49AgEGBSuBBAAiBIGeMIGbAgEBBDA0+sj4ekT4h5OgmLaj +idCmLthqOUDdUNf67bBLjRSapUedsBIqSCx2u5E9ca2uGXKhZANiAAS6mhP+8zyk +CYIaOgF35O1KeRxrPsvWfm8tb5+KjuepPI+WR33xiBQcnYfeNrYMgP000Ifk8gfS +mv5aCHa5dBdgTzixsupMng0R8/jLPtS73Fzhi6G+KlRIe58c0xcVB5o= +-----END PRIVATE KEY----- diff --git a/libs/ssl-config/src/test/resources/certs/pem-utils/key_PKCS8_enc_pbkdf2.pem b/libs/ssl-config/src/test/resources/certs/pem-utils/key_PKCS8_enc_pbkdf2.pem new file mode 100644 index 0000000000000..445d50f1cafe2 --- /dev/null +++ b/libs/ssl-config/src/test/resources/certs/pem-utils/key_PKCS8_enc_pbkdf2.pem @@ -0,0 +1,30 @@ +-----BEGIN ENCRYPTED PRIVATE KEY----- +MIIFNTBfBgkqhkiG9w0BBQ0wUjAxBgkqhkiG9w0BBQwwJAQQ86A6gbRa4DZIX+cz +TSf/DAICCAAwDAYIKoZIhvcNAgsFADAdBglghkgBZQMEASoEEABJ5byRdWBpd1Ho +U/5ukYAEggTQoJkyyzwsns3QvYy4hIuwge7G867QPSCnHXhKInOYNDgbTnf36ia/ +eO5PELfEW0sW6ZZt/D9h28vssT0RI4PTyCQCv3DaVym6f9JbmnfvJePlaWkheieN +j2Y1gth4fEFWKQK6Px3hkZCCjc1LGrSSKoqy3YhWlxbjrj0UfCpF60MY0TLcegZ1 +Zdl4HVjROcDpSBC/OyWb9LXtyUM5NJVEjHqr138iP/S/qtkn7kovJEVqUSIZd2T9 +BQwzCDzZD8Rl3W/ivZnCn/3lHkDl2JgQ9gVXrk1QhtKy0XF8z1lrKbYPkCL4nXR7 +2qOScFSvF/JjbmhxlnfjyrpCv4ckcvT/+KFvbNQP1p8/OFfIsapG6wTz2XGcwgA/ +c4uxrnB/110KO2m1zexsasxRTfvyHaTIPHl6NNh565cjieqdvp5KbzZBs9eJA19e +NTeLVbXYZA5Ols0FF9cG5eeU7NPVFVMS7UILHnq9v+i1eKO1VPUWmCZhR8Sje0M2 +DpzSnQmrErVaH/lbZ9ZOklFhpL+UvW+g8IBSLLdCo+MlyOr/Ydr0HiADBb5zSiUo +iWOrzgA9lLDG7VSHrpTU0I+PE+QctLVTPX2f+S+/pErnQ7Y+DE+OOsM37jGt8Zsi +r+XcxxTZUmiakr6fUDEVG0NxbErTRgpHdSoT3RFgcs37MlrC88JbOs1cOiwma7/e +56gqx/3uHJWyPKjVC/RUfIqsSpTx1EjqHeGYnJ9DTW+Yft+d7/HEZOr0Nl+3Qmoa +b6Bxw+5c6Of7HYhEKoi37l7O1//bmrs0pURPWPmawPZtlfwd03ifFTZDOvn8cKEL +TUFHBYd3V8kNmqRI/oUq28gk7uFd0Wby4epXcSVgSX2hAloSMYGfzcUU5u38v18J +JxYgg3DyJAMH3V/GHV98XU0zscbaTKreKMUaXduDS3ktk9maq6Mne/fpI/ZZP7pr +C7c1RJWKbSSdwAchQCMcIHUSZjA0iI7dIde9VP8e1DlErdWch3i7wdJQV4YqMM8v +3sR3fV31vkZcSUDRCcBJPlNd/j6+AaIU0zVt3yWUUSCExSAOCybrlu0JPCSjjyOu +Kkp0xEa6xt240QA+PyeUl7aov1wKZ1P95aek0y1AJy9SmcBUwBBVaeG+ETO/C5gv +g6VqjG18BX6ulzJsOsLnQCxvbQajB/eF7dvex2OzU+jPUPuvZ1IRu4SHw88eyGz6 +r8RzQ1d7sCr+kV6pWXrEaNnwyFhOhwdNMxaYwSUItrfb3+4jPDoHa/di0sJ4Dkr/ +UVuqnc7TAdW9x+PTtUWMQfaX7S8o6XDNXkhcWznhNP7OmkQpT2K5kkaGfLeHKRbz +7NHCwRXEm49ZPfDCnI9kddnejU60vDHW1uBGH2S5kn71noAe7R07s9qeKW50eLOf +Pe9BlOPb205gnibRYjjj0pUZ1YJwD4rkiXaX/fXHkPpgpyUEbw3tAZW+FqXUZSaW +TwAj41oXms0VoaUi/TcvsIDjnldVvZ0MkHUwMtfOvHb/lbrafHKoTHIuBbRAHSNf +uQXvBDwiq2uv3v0EZdz4mouqcp8aNZmunVHu7c22HCaf4s608BIq4FBqrq6XRdqo +cAOcq+WGk+F/helMKaRWo737062tq3dlhtRpGLXbZUcYThUNY4SjR6Q= +-----END ENCRYPTED PRIVATE KEY----- diff --git a/libs/ssl-config/src/test/resources/certs/pem-utils/testnode.jks b/libs/ssl-config/src/test/resources/certs/pem-utils/testnode.jks index ebe6146124e8fd607e46a1a3129bdf9b4de0370d..1502bbad4eaedce7194d7e28acfe87e6b86c29b4 100644 GIT binary patch delta 6649 zcma)=byQW`zQ_08bax1wlI~4+gLH#*w@R0yvS5=+OG$T2mvq;rq@_VXq@-kvDDXJ< z-gn>e-1EkJf6TGQTyu`O#+Y+{zu(Vq#j7U!wvX){`9UCX!ef5n6%YvZ1_npHg5jcLbE1P$!5k1G zFc=k%`o+k1p~IH=y`U+C1@oHVIs`BL8dvUBQ#{29SfjIVOF-m}{oLZq#gAX05?}ho zL7WG5&IGCZ8)<7}CpY4wQhd7ciSl0>1_3+5U!j~BIyVwR33OerIN7pArL`S_1xmSp$JB{qOJ+5pdn zf|fAPs)1FjpXvavg}RQSKb`%nue~cIMP?6)I3b>S5~du|t%E5U&;nPGQ`!kX&>B`~ z_Bb6^VtzCw0}{hg1ZUvh?GR5Po1_zrwPy7-jsEE$d5_aEXCKFn|u*}72zHlA<4lr1^*{H$QGGNM+B$Xwt*E7%NosdQP zpf?NYT0(%vW!q}F;=D>OP*s8nLMNeAssK@_`Usi08mTi5lr2Al)2lnqF1EbGZXhmn zK!h?~h_GE5r?Z+v~+)*0~m7h)&a+5Zjl14+UIZNWhv`hmBqD$;>lYIS<=yh9o zv4Y*Mk%Egk>J;Uf^x-3eLKJqG^!BB-h3-%9t#hkO*E*AE3*sZBiT3=Ok9Hq2vc68p z3NmMJuDt$!|7PP9mEoeuN`VeZC(pKvM@OXjmXNIj`fwL~eAbs)RqXc&E~v|E?{FcL zl$8pW%8^TkTHQPV;3iLAw()sYQ|r~bBSRJDhs4p7Srb@g$B;}qMC{8R?$m>N78k06 z>FxeOtc(vRn;=|i@HkKQSM~2(>)S)*zCtb)ELte#;7QAM*1phVG?5UhJ#RetchN#D zdReflhr<@b`nSsb-B*LBY-|mt(3PGMdgm^+{dn<^@$Fn-W9nN`F+n-aB25 z!ptWkR6be)D68H+Ps{e2d;tSZ4IG(L3|8GCsxZcQwedLSl>VwC$Jh2|9%j%CM7tbe zF4>c=fjCggGmO=^j{7S8X5T%?kkT(JB9u4^rf7yV5}vi^vlZ~Y#A4%6=1QqXCjm+x zBX$j~J|eh)#t@YO;TWka^5!_COTUDPaSDm?CIa_+@p`lwlJY``zWm&P)z zxpuV%pkrxz7gFmD6lTBj`=(6>&CANzvol|`4s2lo$xNio))85QIH_ecZyzAD;{-Gp zdVDbYDDXr49-8JC`%XX6i#iIiM0~q*PcR70m`4aE1_Gfp!{8|OFgPS57X<=_KuGCT zJlwkVCSolqbq8W}>4co*FUeqd=vW-#D&gPpz+fCS5R8tG21bpJVu*@@ORDYR;HK|j zBjaV~OmA>!!Sq^p9t@+Uz~dK$iSmi@3GwmsiI`I03H;F&`B(QpS4eE8gaOSuEvJ{$ zC+-l;x5O7&(7+qS`r5d?kQokRcIXFRZNCLv<<+b0z17p5Z^)`vwaxzg%&SaFYO%ON z3IB$-$c4&zJJ&f^#Ra1*`6F)yO&Ju~wO7UDEwX&- zK)*{gQ*I#ItSZGCDeuCo&fH_pJ-FcI&hLWQq8vK>^|9CJ%EbTpr`*r}beTuzL4FlI zzbbcix%i)bj{bV=9{qN)pkS>oRa5xMDrWVnlEnK_h4Qp_)I?9ZsWVnB&A&rXz#woz z@Ml=?7L4X@hiULp$x%sT^mj%oIN*_aOB#!SC}(fu*Zh)M|~5BmEF1b<8* z0fTQ*(O@Wdk3NAm*ISF_x{96mef5d% zb7ItwZ5#r}55{`GKd5?5ke}seEE~bWT$sG#N)KdbQs<~eSOE1@Q5Xj!<3G=mQn|E^ zP+WT@6icd~yA-_Kqi6q>Fw@3$6v5@RN!gX6if=~sgUtg%eLtyvcGe*RRVvxF=yC`TgnEsUmI^htS~ z1RC%pSqHUSl7Ocz|G56d|E2{8yWdcOPx5a30v3Gs7g7k`B(3QhL~0d=Mr1S(d{N}@ zo0VOWtvt5Urf*}-NN|*+<}F=R#w>?-hG^g0c{16bsJR`LWKi{pO?5wo76uS)lD+Oy zF9K#IJx=aJ>pxz7NHd&e8c8Ah5Zg|qA5$E1ERj@IJD;|5CUNyF?^Cy>r0Slmm%sd+9v&d-sJVGh*mASmS!XPA&eMS=XM zs2yzn>I%T0Z9(XFTM#n+zgO^s5u#)K{1CByhB2LfV~Xk5;zMDr zEm~W*uaS*MbbdK1J=?Zoz8RI*$zh=KJO?^ML57k3ACgRfHsUTy#0mw!V8*(pRVb#P zI;Z|kl7Bma;BPdU2>fXeQJg?9dkBojRVf`{L#aAH`4NU^8Q=qOt~)kch}yQdV&_;< z>(muaGVec-HI?sP)m5>7ln#f$FwCl*NyZ@gZalo^bJ!Zg$*EZZ^>~%u792)u)r8|9 z5gamg3*M_hJg<=wZ}LLae;hULKU4tx0nPT_Hg~a*Op1b6eFxatcSoQ87X`o}k;!&- zAsmj8qA;-7PU*FJwpxy;(S6)>F(94+^zFc&F>`M5aU*j9QE3(?(W~WwDOQbmfb~>#joEs|cknkRdKL zZe68-NBpOvw^F*dOQBY~d>b!sQ4O^s&Wx`6c$N=_jL7pC1hc=!*>Fj(NM;wX~uALUwso zD)3KL&d{zFHanis4=<}%lAL_zE7nK>t=bL&&VdhOz)u0!CAV8dm&_=6EQ5+}4E~!} zff*eisYCQ>L1hFX=M|e$TC#ZH$OTu`+4XqAaU=!%`V&E1YYArI_ffW%_t;!`x#JZ=WpxUYDLe488s=|X&^-RJFEn!qlp0fs(GRbD>CU%(U*u)}B4qDU?ZY&2{iI6N z=B9EG>d52D6N0tO*|aYY>E-4Gq7$@-*eD%>4ch%kWT?_oDSz&=P*sI1i+SsXK zIOtf`;VNJUl;16i!%bDdylo-TLsn6C{Z>G`#@CrzTsJfuWG1^`8)hL(t2S`wtU_zRw8UE z+p`S@yA)A&TMtshNkU4T8n* zczpNXT6~hKVdw23iq5&p3I>9A1!9`tG{Y8Jwp2}cow43}k%^J*OFfQypg$;}6c;4H zU)Z~G<`vKErX}-Q0#h`vUuSzWnu@*P<+^2_!kW6{Eo!{<+$1^UrtxgVAm$#x0R2dV zU>tQ5#c;U1za`(PqJicoCT0ztcY@G>T9GNmVNbuWukBtrjrb3R9RvJz08q#B{(2`-5XKJ77>-7f0pX5!hI7 zKKQg`yuzqGZzM?0`sfD&tDfGkB$L0G^-NhhAtEEce^;@CKG;B`(JY8dIEJoNdiK?x z7D29aEWi9~^QZk!2Z0zg)@EqBcpDDRCYPdfirDVVgwHY|l|wma1X9D9Wm$TNSKTlz zM7`mthWADMUSY~ik%hk5!7rs*RF*pX{l(U(t?b236rfmAFBoSqviZ}I0{P}K z#qt^o9E=9~V=D|XidUXD8oi5@LlU9J+>_N6>vA|eOx4M8W;?htDo7>ra>I9tpM|vl zq%V};HSw*#?O#Y}?;fBR8MgM}X4?CT@zrz6;c5Edy_}~?!jbl7%=kaVXs1jaFOPLA z&g@=5o{V;C0|umxS2}#y+U4x*DVH)!PdCw=DcL!mx~_A11$AoaWS%*u>jQR?#sA~I%@avb&B zt%N2z#M(>_%^5srbR{U<11spFGz+O}u_lXV>$BD_WJm?+DGm$ih_lobxHc|V}I!BQM!KCl* z8k{l|Ylc@xv`9u`KO3oX;_Jj-PA9Hkmt5FUcsHCk7)0`KQoM`E-%yd0^nXuE`=2#3@0}<*gU9T3)?06`EAd~1>0Q1^ zDiYl6uJX2{jP?#aWL91spH62}8#oe%H?rjsNB1m2{Nx6*Nr5o;E54h4DAO!35kdI+ z{o-QqOwv1quSJdZXB9YkuC%Sb>Vd)iTjz_|>~ki#rU0|^L&;dZ%O0@|h3E0zoEpJ% zEW69b9=Oq+d?hubgcwg*-KeezBvt0Dyo1%ri{KknFjAk`XY6AZewOF7yE38H$x|D; zEh!1n&k5g+n2dF!TOv{s0e1KRsYAS+u=h|%uCB7s)w80n_iIKH2-ok2x0Mkmir2;` zO?q`S?ut@xaq$;SY;O;7pE_sz$3Pr@{1_yS6UXFJi$#RQ;K08%2vLaFrVF)n{)}kn z^`#6-ZLG48?Y~LGe|~`Lr!x15I4~7|{49riMune&nhe>L#h;Iby|vlMCzEGS=TdIj<~7^Mm(f8F$ak zPe>1LIWzs*PT3J18B(elMor#pceUomsffdB%0?sRZNF$>%QKv@DG1R_9GLqltAhi3Je_ty?k7Si*KL% z?aPt@Unv4K;C}4ULN-8J_`#cDBuJ2Bhp)qkHDV#M*755xfB?T_h6BUAEbcJ2mWui_ zosALD=x<4hMhJtW;oc=>>%XwxK=~J4%u-$*>gwDERg2yVZ?Njne}^XU*VO!Xz5iFR z|1;MM{?7G+{|~N*qbU4cWZvm-!LljO%%SHKd|z?!=H#arsS^2*08Luxgp5;GyLXh$ z%#WAI?trj$0;(xB5%v6;NS`)d=tQf`lJq2~&R~~UHh+t|`}61&R?L@P8+Lqwi1QBo z$33LWji2XmUM2Rer#d9PIrZ=6O#Ik+G4P`;`cUV>bF#LUi&^>54vfPq1uW^x@5rKmIp%PHBn!hd1E7VZ+gUaqS}r_ z%F+tt@ym%ZW~K&Y$DNg~arjWZSYN8QwBx*LNJpU2$OqmUcvyz?RO-Z4hRFm^3L0uH zYs#;!>x#bvlA}y;Z6WC#35qmp^;nmhdVP--Z4x8IO2*o}q1`r+ukaxamjO-VO{d|-&O(mu z0oIQkW~}rR2IV=}Z9Qe=Y(>DJcK7OH`aPbGm6?1q(}&z8K!nK$J1eqjR;HliuVs`? zarIYl2i-gyVIP*yY_!&n;xkmReeoHU7p95D*ktq0v@y&!yBZ^8V+~iMeNe-LApYUH z*j|o7lxsfk3j1IJ@zAGIQ>V35vZdDI#Lz@bwc?c6&%uGOrAUQD-}mDf{%%hQ+voQ>Mwad( z6aA0K1cUN8<}HTjbtm75b3gAypv#-sj6~~0AH2^MVy0Ciulyn&kZg!t(=(`E8y#O( zUZbZXhZ#M4+e*J8YKX4Wr`ku2McvMoQl? zwSI9L=y)MDK|`fYL1@}Ug?n6A?C;FW!hNsW9i3Hnv}o^J=4$J&>&?h2;Iln4VLUCU zzp-vM+xb6=RqbebrswtVjq}8*$PtUIm{$|!RKFFge>>r>R>k}452QrR-(pq<1e1h7 zRYK^GHf%EplpqHv0W~v}OvBla_Kw@P5cgc=Ay92Limw#sING6S@o$%t?839J$&kI9 uW?!D)Z&@n`nmx`eii4MAPi#ACSD%(LH@9Q=Go9lr{zS- z>dHVD%O9s)t$1Qw%95&Az+N>Qq(BCYHrQ6()@O{OzyDXx{fK^l;7x1RCm`d=KZ(*k zc@DIPdP;}`pKii_p%#6XfSR)vv~qqoDbA%qb8Hx% zJo8K0k4?N4%L#+(D;dc@ zjJgR_HFI{1@b&M1vaAh<&Wt6S<60YhSfJIG!s%80mPl69(gQaq3c|_CS{A$yArZMG zPoY0Uf&u!)b|w9oLAH#0sN_OD*s2~YO#7|}@QF2qG3K>aa(wItBv}1B`8C{62TvrN zKcV%Q&alXYEQ^K~0{6D_5ipi~CygeSz; zJCLUcCzoalmhZ34$A$$^UPId7ZnA88yLCE0iU0rr0RRP9E;TSY0009#FoFX(FoFW~ zpaTK{0s;ggJacL=4F(BdhDZTr0|WyC1pt#A0VR?B3llUmF*sTm4Ky_|FfcMOIW#jd zIFlOzCVzqfw=jYMECvTEhDgph1OYID0UrW^fdKt~Q-K|I5-C#ON}Mk2&TO3(T^Txq_nWgbVOk9Q6W$fdKd6poP|(J>JW_t6Vm?SGjqVue z>Vo-KOba~+f@e`#SB|>WSx@G12M~dPv`H3mBj`*h78AG62{Mexrr|smdW%$ijVR!w zu74gYv!->CZliw-`XM!9@%v(W0VF%jr18F}QILtKgWus=<(~$Ui)LPcdsQW50Zcpo z&q*o+frJ18fq-{RZn;Gg0ecr|58bEK;RQ8wby!A|r9g3J_|togT}+oM$-*E6lbfd1 zSg6g!?L@Hs(w8TB3kK>T6#*-gW6jhL{$dfvv_c!I)(T@)StGRl)VVzTH$SJ~lBQV-e5ldhPAzVKh9c HG#>GrQ0C_2 diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle index a44e1004d93ad..00931848d0644 100644 --- a/modules/reindex/build.gradle +++ b/modules/reindex/build.gradle @@ -87,11 +87,6 @@ thirdPartyAudit.ignoreMissingClasses( 'org.apache.log.Logger', ) -forbiddenPatterns { - // PKCS#12 file are not UTF-8 - exclude '**/*.p12' -} - tasks.named("bundlePlugin").configure { dependsOn("copyParentJoinMetadata") dependsOn("copyTransportNetty4Metadata") diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexRestClientSslTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexRestClientSslTests.java index 1123ae4623300..d0b0403874c7a 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexRestClientSslTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexRestClientSslTests.java @@ -84,6 +84,7 @@ @SuppressForbidden(reason = "use http server") public class ReindexRestClientSslTests extends OpenSearchTestCase { + private static final String STRONG_PRIVATE_SECRET = "6!6428DQXwPpi7@$ggeg/="; private static HttpsServer server; private static Consumer handler = ignore -> {}; @@ -115,11 +116,10 @@ public static void shutdownHttpServer() { private static SSLContext buildServerSslContext() throws Exception { final SSLContext sslContext = SSLContext.getInstance("TLSv1.2"); - final char[] password = "http-password".toCharArray(); final Path cert = PathUtils.get(ReindexRestClientSslTests.class.getResource("http/http.crt").toURI()); final Path key = PathUtils.get(ReindexRestClientSslTests.class.getResource("http/http.key").toURI()); - final X509ExtendedKeyManager keyManager = new PemKeyConfig(cert, key, password).createKeyManager(); + final X509ExtendedKeyManager keyManager = new PemKeyConfig(cert, key, STRONG_PRIVATE_SECRET.toCharArray()).createKeyManager(); final Path ca = PathUtils.get(ReindexRestClientSslTests.class.getResource("ca.pem").toURI()); final X509ExtendedTrustManager trustManager = new PemTrustConfig(Collections.singletonList(ca)).createTrustManager(); @@ -184,7 +184,7 @@ public void testClientPassesClientCertificate() throws IOException { .putList("reindex.ssl.certificate_authorities", ca.toString()) .put("reindex.ssl.certificate", cert) .put("reindex.ssl.key", key) - .put("reindex.ssl.key_passphrase", "client-password") + .put("reindex.ssl.key_passphrase", STRONG_PRIVATE_SECRET) .put("reindex.ssl.supported_protocols", "TLSv1.2") .build(); AtomicReference clientCertificates = new AtomicReference<>(); @@ -206,8 +206,8 @@ public void testClientPassesClientCertificate() throws IOException { assertThat(certs, Matchers.arrayWithSize(1)); assertThat(certs[0], Matchers.instanceOf(X509Certificate.class)); final X509Certificate clientCert = (X509Certificate) certs[0]; - assertThat(clientCert.getSubjectDN().getName(), Matchers.is("CN=client")); - assertThat(clientCert.getIssuerDN().getName(), Matchers.is("CN=Elastic Certificate Tool Autogenerated CA")); + assertThat(clientCert.getSubjectDN().getName(), Matchers.is("CN=localhost, OU=UNIT, O=ORG, L=TORONTO, ST=ONTARIO, C=CA")); + assertThat(clientCert.getIssuerDN().getName(), Matchers.is("CN=OpenSearch Test Node")); } } diff --git a/modules/reindex/src/test/resources/org/opensearch/index/reindex/README.md b/modules/reindex/src/test/resources/org/opensearch/index/reindex/README.md new file mode 100644 index 0000000000000..f2ff25d41a890 --- /dev/null +++ b/modules/reindex/src/test/resources/org/opensearch/index/reindex/README.md @@ -0,0 +1,48 @@ +# generate self-signed CA key + cert +```bash +export KEY_PW='6!6428DQXwPpi7@$ggeg/=' +openssl genpkey -algorithm RSA -out ca.key -aes256 -pass pass:"$KEY_PW" +openssl req -x509 -key ca.key -sha256 -days 3650 -subj "/CN=OpenSearch Test Node" -passin pass:"$KEY_PW" \ + -addext "subjectAltName=DNS:localhost,DNS:localhost.localdomain,DNS:localhost4,DNS:localhost4.localdomain4,DNS:localhost6,DNS:localhost6.localdomain6,IP:127.0.0.1,IP:0:0:0:0:0:0:0:1" \ + -out ca.pem +``` +# generate client key + cert +```bash +export NAME='client' +openssl genpkey -algorithm RSA -out "$NAME".key -aes256 -pass pass:"$KEY_PW" +openssl req -new \ + -key "$NAME".key \ + -subj "/C=CA/ST=ONTARIO/L=TORONTO/O=ORG/OU=UNIT/CN=localhost" \ + -out "$NAME".csr \ + -passin pass:"$KEY_PW" +openssl x509 -req \ + -in "$NAME".csr \ + -CA ../ca.pem \ + -CAkey ../ca.key \ + -CAcreateserial \ + -out "$NAME".crt \ + -days 3650 \ + -sha256 \ + -passin pass:"$KEY_PW" +rm "$NAME".csr +``` +# repeat the same for server key + cert +```bash +export NAME='http' +openssl genpkey -algorithm RSA -out "$NAME".key -aes256 -pass pass:"$KEY_PW" +openssl req -new \ + -key "$NAME".key \ + -subj "/C=CA/ST=ONTARIO/L=TORONTO/O=ORG/OU=UNIT/CN=localhost" \ + -out "$NAME".csr \ + -passin pass:"$KEY_PW" +openssl x509 -req \ + -in "$NAME".csr \ + -CA ../ca.pem \ + -CAkey ../ca.key \ + -CAcreateserial \ + -out "$NAME".crt \ + -days 3650 \ + -sha256 \ + -passin pass:"$KEY_PW" +rm "$NAME".csr +``` diff --git a/modules/reindex/src/test/resources/org/opensearch/index/reindex/README.txt b/modules/reindex/src/test/resources/org/opensearch/index/reindex/README.txt deleted file mode 100644 index efd5e4c20ffd3..0000000000000 --- a/modules/reindex/src/test/resources/org/opensearch/index/reindex/README.txt +++ /dev/null @@ -1,16 +0,0 @@ -# ca.p12 - - -# ca.pem - -openssl pkcs12 -info -in ./ca.p12 -nokeys -out ca.pem -passin "pass:ca-password" - -# http.p12 - -unzip http.zip -rm http.zip - -# client.p12 - -unzip client.zip -rm client.zip diff --git a/modules/reindex/src/test/resources/org/opensearch/index/reindex/ca.key b/modules/reindex/src/test/resources/org/opensearch/index/reindex/ca.key new file mode 100644 index 0000000000000..a04c18c994359 --- /dev/null +++ b/modules/reindex/src/test/resources/org/opensearch/index/reindex/ca.key @@ -0,0 +1,30 @@ +-----BEGIN ENCRYPTED PRIVATE KEY----- +MIIFNTBfBgkqhkiG9w0BBQ0wUjAxBgkqhkiG9w0BBQwwJAQQ8TSOq343U8BV3rEt +vOpSPQICCAAwDAYIKoZIhvcNAgkFADAdBglghkgBZQMEASoEEFXKi3C3VJzsGiCw +Lh2zY40EggTQwtBoa+e+J/UAA/mVv50rVH7oqvs5t9wRfznrldPtUgTR7r06TxNB +DXN1spBSmJjrohC3RbEO4169YqCwAk2HsptENM3MV5A9EwTuXPVBW/ic2SDOwmiP +wvRRKUujjaYZTfVeVJi0LqnCtyv7/hc33MJ3IMeNefEwmYRH3u/ktp+NBXZPEp1G +sdbPLpCxUqtq8zE84ev+RyURbErWVvjI8ma20Hn2gACkQazYTSVMVMxvj4+m0oBd +hzQ54GjRypm6Tc+CkJXGbCp+3sCONUqKARZYo+oiL5wEdGTLOcCwaCZxVkftDZ4V +oGrHVlgFrYgADaOuokjMf178ymMJX1+kTYze/k/ajXHd8qBKRD1X49dDhrHjnlhV +2sGOTKk16fBXSoM/q4vfmBKkd+BxDcdbsDkLDdT266XBy9hdRnL6e3Qk6ag6i0dB +faJwyXHIhiS87nFLpYeXY47DABBvmKVqafdHJDab7GYmLb+2J33EbmQX+tMgKrI+ +l5FjPX0Lz6/c74M6jYGHhbii3fZKGzb9BwWCEG7eIMONfv7IoaP2HI/P5G1WheQ+ +Ocd4lsb+pCmy+tzQcB7+GtWX0sG4ugCTsKIofN9ZmkvdQsvQvjT/oubDtBXUMgIL +/6GpYr7f535wD8jp4qHjSNyiNf93XiepxUsKBh0xvcGRRfhEjrZhnDm8DYP014bL +HhWzPVUgQwDJMa92wzsqFpXCujhLDb3BzLZLCGWDUkDsPjX2hUzNRWw+nN0FEwkD +ezxZOpK7m/ZfZi0rI94oYpmanwLNH5tvwr7pKLJ2SAP2WTNYRtff7vgeKOmgDG97 +pSm49phrSdM/VbwWgoPHpGxn6De5mfp+52dz5sCZMP0tsYMa947z2VDAU9f7+AQL +V73HGQKu8eny2ofOvQiKMK7sVo9dDvf6O4fGUCZh55YmQYzNq1cYh5lgQgPJ/CDb +c2mUVhwPfd4gvmKzBQ+nxjo5Jbh0vJwqOxk0SMCwWqQW5+Y9mdcDseyJwL7iyiTd +xyN9rUdro86foF85Xja+MZ0hVW/q1xwrZSiunWuvg0uaGMdSuknn7skLnKrdbfIU +RocweZPetFxzCm7XeikCaKucoNLNSPjAKW13doZSOc4OxS4hXep211dGVvK43XwX +B6xp8WtquZaGk01J789H1XU/sz6AssuCrMvql0Gd/GeFz+Ql9dMd4bH2ZzjpRcWL +FMZvsxXzqp5zodsn/j26h+WKZYmLSnxvE+WjQHyECt1JgSyYD2I84CxKj9I5ezX7 +1PIc3/OPl14p+ni/lfx6UM5WmbrHcuLM5a2ml/9e+HQci2xDNflkCiRQ1jcXYSB4 +p5mAaxSPbC33mi7jvBtUF1Yk9CiIRW941pKhn5YSj4bEMs6h8tB4M9wfXn9HPe/X +0KdYFMzf5sc9nmDZt2A1EoZexYwMk56wVQ7gnekw9ECCs6OLUmXkAmKojvbNXG0C ++t0W3LSoFsMM6vnINVooK+dQgRLqXFe57HY8j7zTmFh69Kh3/Cv24gQ21xwPYB6y +A9AVrrxRUV4Nlqkw5A4kVKXRry9/xj5DGgZ4SI2rJZ3vhfD2jiLFnl+JBT/Cw2xL +NL32subXNGqY4ymnq1HSG3SO/Jgh21XZL8rl2kZ+QiT7QvRVFWefRdA= +-----END ENCRYPTED PRIVATE KEY----- diff --git a/modules/reindex/src/test/resources/org/opensearch/index/reindex/ca.pem b/modules/reindex/src/test/resources/org/opensearch/index/reindex/ca.pem index ee758ca3e6370..615f00e468ae6 100644 --- a/modules/reindex/src/test/resources/org/opensearch/index/reindex/ca.pem +++ b/modules/reindex/src/test/resources/org/opensearch/index/reindex/ca.pem @@ -1,25 +1,22 @@ -Bag Attributes - friendlyName: ca - localKeyID: 54 69 6D 65 20 31 35 34 37 30 38 36 32 32 39 31 30 37 -subject=/CN=Elastic Certificate Tool Autogenerated CA -issuer=/CN=Elastic Certificate Tool Autogenerated CA -----BEGIN CERTIFICATE----- -MIIDSTCCAjGgAwIBAgIUacmv5ElKJ1cs9n61tEpy5KM3Dv0wDQYJKoZIhvcNAQEL -BQAwNDEyMDAGA1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5l -cmF0ZWQgQ0EwHhcNMTkwMTEwMDIxMDI5WhcNNDYwNTI3MDIxMDI5WjA0MTIwMAYD -VQQDEylFbGFzdGljIENlcnRpZmljYXRlIFRvb2wgQXV0b2dlbmVyYXRlZCBDQTCC -ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJ0rA35tPl0FN+BPk2YfmET9 -MvDWFLvfL2Z1aw1q1vnd12K9zumjN6veilHA2Iw/P4LG/mkQZvY4bDPgibRD7hbE -vwPoju4vr614tw60+FlkpO6HezYo2I3cni1//Gehhs5EW2P3g7Lw7UNCOAfcR2QQ -p/dtwXYWzXHY9jTevQSv2q/x5jWKZT4ltaQExzvXAcxRGqyWV6d5vol3KH/GpCSI -SQvRmRVNQGXhxi66MjCglGAM2oicd1qCUDCrljdFD/RQ1UzqIJRTXZQKOno1/Em9 -xR0Cd5KQapqttPusAO6uZblMO2Ru+XjCD6Y0o41eCDbkd0xA3/wgP3MD5n41yncC -AwEAAaNTMFEwHQYDVR0OBBYEFJTry9da5RZbbELYCaWVVFllSm8DMB8GA1UdIwQY -MBaAFJTry9da5RZbbELYCaWVVFllSm8DMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI -hvcNAQELBQADggEBADA6qhC35PwuL7LRddbhjjW8U/cCmG9m7AIvH6N+Mw/k76gt -tJkEDxztMHUG+A2IPyEcYm7MLr1D8xEQYsq0x4pzFcQnMSQDv4WTK35vRxMtaqwA -WZTyA+DibBknbaP1z3gNhR9A0TKx4cPagN3OYFvAi/24abf8qS6D/bcOiPDQ4oPb -DVhmhqt5zduDM+Xsf6d4nsA6sf9+4AzneaZKGAMgCXgo4mYeP7M4nMQk0L3ao9Ts -+Usr8WRxc4xHGyb09fsXWSz7ZmiJ6iXK2NvRUq46WCINLONLzNkx29WEKQpI3wh4 -kyx6wF9lwBF06P1raFIBMeMOCkqDc+nj7A91PEA= +MIIDszCCApugAwIBAgIUOpUOL6Dz5+T+y+SIDknp8nOB2x4wDQYJKoZIhvcNAQEL +BQAwHzEdMBsGA1UEAwwUT3BlblNlYXJjaCBUZXN0IE5vZGUwHhcNMjQwODI3MTgy +MDE2WhcNMzQwODI1MTgyMDE2WjAfMR0wGwYDVQQDDBRPcGVuU2VhcmNoIFRlc3Qg +Tm9kZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK2bmzHyMB705hS2 +Vu02WaTz7iWU11aVlNwAEVWIpjarDsk1IeICYe2vtv7e9qAp5IAMC6y9Db4XAx6A +PKJHZ5XcrWKpJqanMUwMi7dJ7wLWauMlx4WdyWSdJ3KRVO0Xzdr6My6dV+LCiiYX +cQCFYzEQYX02kU8M8NZ3J9t5OK3MF8/f0gta5vMs/1akPJzTMYyLva+hcNyGC9pW +Ly0w2kWxqze00KjT8wnmUz3h6gxxRwwdocsyZ1AE635anRu2MuAo94sA8kwQdl6z +cKtTzlzbLmrBQzusnuQtJCKGzvH+uBGodFpQhi5JpYVbuSvqI1Lumg7RA524cb0t +OKnijBECAwEAAaOB5jCB4zAdBgNVHQ4EFgQU41fNVZMW0Kc5nmv53kKTINZT0CMw +HwYDVR0jBBgwFoAU41fNVZMW0Kc5nmv53kKTINZT0CMwDwYDVR0TAQH/BAUwAwEB +/zCBjwYDVR0RBIGHMIGEgglsb2NhbGhvc3SCFWxvY2FsaG9zdC5sb2NhbGRvbWFp +boIKbG9jYWxob3N0NIIXbG9jYWxob3N0NC5sb2NhbGRvbWFpbjSCCmxvY2FsaG9z +dDaCF2xvY2FsaG9zdDYubG9jYWxkb21haW42hwR/AAABhxAAAAAAAAAAAAAAAAAA +AAABMA0GCSqGSIb3DQEBCwUAA4IBAQBObbHtMsaa0XTJAlJk4DE9kHgZoxF8ImFI +c1huhnCr2X+XkKxYDF/QUA1XRDWI9S4/6xBDKZdD+RhZ6ds3CbG4JVtoJa1Vvjla +dk11uirkKCqbYrdyc/+KeLS4ruYhG/JoqycTp/G5aCrThZgIgf0jm4peJwd9nqaz ++yjP4L4sDR4rfdLIsk96hPKDImD+5uuJ9KqMj8DO589uqJwhTehfPcNfL4hVdQ66 +IEKK6HM5DMXYzRFr7yAseKZbXngn5QJ+ZBldikP0hgGFYbT1kbNtFOqwpYNvgGvr +ptei46poM3WCB04puszm62E4Jora6rxaLwWGp+6TWELLwUUs9so7 -----END CERTIFICATE----- diff --git a/modules/reindex/src/test/resources/org/opensearch/index/reindex/client/client.crt b/modules/reindex/src/test/resources/org/opensearch/index/reindex/client/client.crt index 337d24e2493ac..9111fb215a448 100644 --- a/modules/reindex/src/test/resources/org/opensearch/index/reindex/client/client.crt +++ b/modules/reindex/src/test/resources/org/opensearch/index/reindex/client/client.crt @@ -1,19 +1,20 @@ -----BEGIN CERTIFICATE----- -MIIDIDCCAgigAwIBAgIUNOREYZadZ2EVkJ1m8Y9jnVmWmtAwDQYJKoZIhvcNAQEL -BQAwNDEyMDAGA1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5l -cmF0ZWQgQ0EwHhcNMTkwMTEwMDIxMDMyWhcNNDYwNTI3MDIxMDMyWjARMQ8wDQYD -VQQDEwZjbGllbnQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCCP2LE -nws2+ZIwSQ3IvIhVfrueUmNt7Y5TdhhwO32p2wC4ZA62J9L8klAzt7R+izcL/qbF -65inbXM0A7ge/2wZ09kbqBk5uS8jDetJS8lQmWVZDHfVi8g/yDMWklz2mQYleYmU -HPyIplai3P3KBoT8HurzHw2C953EZ2HiANFnGoEPZZ5ytcT2WenxuU5kSXSxuDyn -8/dCVHEQL1Yipr2LQKYQAHotjo56OhyL9KS5YPjzSFREeyRfQinssTmpGFsua/PK -Vqj+hRdkaqRfiqPq3wxn8oOSpZLQe58O1e7OlqgjkPuZdjZ0pQ7KJj7N3fUQNSeg -2VC2tk8zv/C/Qr2bAgMBAAGjTTBLMB0GA1UdDgQWBBQziDNuD83ZLwEt1e1txYJu -oSseEDAfBgNVHSMEGDAWgBSU68vXWuUWW2xC2AmllVRZZUpvAzAJBgNVHRMEAjAA -MA0GCSqGSIb3DQEBCwUAA4IBAQAPpyWyR4w6GvfvPmA1nk1qd7fsQ1AucrYweIJx -dTeXg3Ps1bcgNq9Us9xtsKmsoKD8UhtPN6e8W8MkMmri+MSzlEemE+pJZrjHEudi -Sj0AFVOK6jaE0lerbCnTQZvYH+J9Eb1i9RP7XHRShkR4MWgy2BzlENk9/LRbr84W -Yf5TuM9+ApiiiOoX9UfSGBzNnqwhJNpG9yJ+HnQSqTnJJc/wL0211zLme9I/nhf0 -kQx6mPedJ3gGoJ8gqz38djIrhJDxq+0Bd9SsdlR6yT+1+bY7hinYx2eLV91AybZ4 -x07Kyl174DD41PYaE1AtoLlrMrQ5BG7Md50Am+XXOR1X1dkZ +MIIDUTCCAjmgAwIBAgIURxNp9ImDloxqOPNAP0ySBZN/BDQwDQYJKoZIhvcNAQEL +BQAwHzEdMBsGA1UEAwwUT3BlblNlYXJjaCBUZXN0IE5vZGUwHhcNMjQwODI4MTA0 +MzUwWhcNMzQwODI2MTA0MzUwWjBiMQswCQYDVQQGEwJDQTEQMA4GA1UECAwHT05U +QVJJTzEQMA4GA1UEBwwHVE9ST05UTzEMMAoGA1UECgwDT1JHMQ0wCwYDVQQLDARV +TklUMRIwEAYDVQQDDAlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQCp7qyGufu1cQYWJJGZ04XulVdwsKytMeLNSDHT90ratfsAy5WP3CRy +fug0E6nB7eykSHnE8aYomrghJIL0oP3v7b7vV/iasZ17Q2uiY67fQb4s6Rvrcov5 +R7ak5/B22uslDrDY0BaSWKCxHREb55rMhVWlVTXpm91kdGvo4Q61Gcxe45mweKR8 +UMbUlNuXrW/xwTwYI4pdDxha2ZXgTBrBJXppEh/KQp0rdy4Be3KG5IbqrH/Bh6cG +4CZ/di0i6xWxAhQOlOKlcTHpMAtXx0eBjha/Y9+p3/7z9fmE/JsYozw56r75CPDG +VpNiSDoPMPed4uhpbXQVYeCTUe3Hh8WRAgMBAAGjQjBAMB0GA1UdDgQWBBTm5Cel +/aWnBGFDUnZKNYs+BVFHFzAfBgNVHSMEGDAWgBTjV81VkxbQpzmea/neQpMg1lPQ +IzANBgkqhkiG9w0BAQsFAAOCAQEAjaXJN+NyS74cDTAtjVqo4e+h2K/LfYyIpdYp +mTDi+wRBlprJUDl18TK26c0hV6T4MN8QxqoqCXoEVJZWDjBYOUsl3OfSgPpT0aww +3Z/mIPOLb9mR1zOO9tXZhgNdFCLRRepiLyPRsRVQ3K3klle42DHaEIOUlwtqAArF +d9MKg9PShrRjqJwlm8vL3E8KjNeC8gAvebF3e7ADIatXjRK5Rc/LQhgPCaCZKSDF +w36AhGBnXsCgi3IR00E9CWOsC2UVeAhgHHaN1oJjuLfFupG/2Vx6Ii+PAgueE7ec +VWQeasxHihc0VjEYtSiNlYO6A8rcH7lg+0OCzGr97DC+zfFZwQ== -----END CERTIFICATE----- diff --git a/modules/reindex/src/test/resources/org/opensearch/index/reindex/client/client.key b/modules/reindex/src/test/resources/org/opensearch/index/reindex/client/client.key index 95e11f79cea24..ca0c6ba868047 100644 --- a/modules/reindex/src/test/resources/org/opensearch/index/reindex/client/client.key +++ b/modules/reindex/src/test/resources/org/opensearch/index/reindex/client/client.key @@ -1,30 +1,30 @@ ------BEGIN RSA PRIVATE KEY----- -Proc-Type: 4,ENCRYPTED -DEK-Info: DES-EDE3-CBC,81AB10154C04B38F - -0L6Buvpeg6QHh/mbYp/3bXDCsu0k0j5xPdIGWd6NCOdb24OQFsOjeA2WuPqs0WWF -gzVrjh984biS3IqeglEr6X6PfVJ0QOgBkq0XgSBXhuoRJL/302N9oPGsf8T8oW9t -pqR/JIB2L7lMbJlJYSjMl0YQT3hWpo2BlrtSIc/GWOKfjDNWc9BL+oHvKJwql1lb -n4yMvYFYJDqgzgxa3r4IIQNsCn3SP+gqbTx9vF6StOIroV51BdSL4IGWRvqnMJrh -ybk1EHSLR1oGcONLU4Ksi33UxdImG70SsnoH/NnInDvV2bxmxmgf5SfYKtxFhoxz -0hISKTMTerPGtRQ5p8wtEi/ULKyInK+qF3tLgZa+S5VbByjDnUo2dCcbDDSkH5pO -uczJ2bs1kJegpCrUueJdbi9OX2upmF+tJb9+5hzFTvey8dUWTEpdiN0xbp4BLfNd -Yp4sMHZovsDJKIjDb0NbXRgLeFh1ijlLPhKwIXWTF3BaCKcSw34Qv22YPwn3qNuw -0KuUPAo0B65R/hoJguvtks8QAXe0S1jZS/fAlQCoIB0TIduy1qkyje+AnSW+1RL0 -ysBxLqbvRUqWlgnu7/28V4FD8JNu3O+UGBEelXlfokLgCBZ6lSys2d3Zy/XVBnG0 -cPl59if+fxKaMWlhFvMLFBup1Y4a/1zA7Sx6kkhvawekHr40NcG4kLHJ+O6UoM4d -/ibnbfIksLNkuo/nwoEcKp7W6SxafV0hROdxClkGKild66rnHtk4IGATjaBqt9nr -FuO3vRtLuUMS+/4kpvhMwl0RhX2/i6xgV+klWNYNu1JTGDFvdG3qfiY2w88EIbGe -rn8JEvRtaH/XNeGdhBwbuObvTifiHyYzA1i5Zh8zvE2+Dthlk19jbBoOUx//LOi2 -JrNkAsqQCF4HXh7n9HWA/ZrKTP7Xvkig6Vf7M2Y/tO361LSJfzKcRFLpl0P2ntEv -XwFOqTvOURERTVr4sBLOVPRAhIs3yvkI5xfurXzbRWtSeLgrMoDgJlXIQbuXd8sq -zIBLqvYf2bcroB66XJqX1IFWEstym/NHGcbrwjR5Fn2p3YAtXnIbw8VhHwV+LIOl -ky/wH9vbnML/DE81qFqRe8vNZw2sGn9skOyU/QvKeV1NRHYZSV3hMx82bPnjgFeB -ilzkb8FEPOAOJ0m44Q3C9eUoazJT8aCuRIAgSL43se1E2pFlIXQTfYRARaWEkSf9 -0hXqQJc17b+Hj0ire3PUqbG3+/l1qMhhIHwq7Kuyy2neTuW/DXbXp2AMv/bLcnHH -apVeRZaYXVSnGXJNk2CeRnCs8OGir8g5zkH+fmVb9knt6TL2oFIsQqULyrLolhfe -6Q8mLzq/sd+w+VuN1n/5+RQqOJZWEkLFzQPx8wTqeTB19OE0gjncrqzCHq7INqRe -tGClWOj/yL0Sciu3ctVGz1VAbgeBKnLdKm2TX4oFB4OG4E7GMXIL7hGxjtjLAVMW -XNc3ZYNQra+iPqJtFxnmbrF2Sn0Wr0hcAT1V0A0TRKe/n0lpUrfhTy/q4DUlOVKG -qdCsTGoYXObpUWU5G9GyCVWWRJyrTxJcBZ9KWJu9Y/aMFzoa2n0HQw== ------END RSA PRIVATE KEY----- +-----BEGIN ENCRYPTED PRIVATE KEY----- +MIIFNTBfBgkqhkiG9w0BBQ0wUjAxBgkqhkiG9w0BBQwwJAQQO04hOVF1REJsgAkP +xkFZ/gICCAAwDAYIKoZIhvcNAgkFADAdBglghkgBZQMEASoEENoXPnjByIDKjwqz +3+WRgNsEggTQuv3EOfjFwF8f0fac2GjJJxN3L2b88CeKxbjTL/6kQ1bvWSI1+L45 +0zP6CQ+5lI3N9/0YFoCWX5y57e+OXafAWivkUp/LiGkYWcRnqGVhZgSQTFQP9rly ++3PUDLlM5FuGylKvoqYmTIBud1puBiChYj0FKImOyHgPH3/GEGbTSrtvCSZkCw72 +XkkF32/OtSbqTuGlGgl+pGLTtnS2+RhgiCzXMCtvHJqjhAh22J7uoYYqk02QKEme +GMWM4anxmLPBr/Rw04NrlEfgRl8mTIhgrgwKV/mwfK++kqboWpzfXPs/S4KHJxmv +WvVcxHovoyovBA87C8cY4Qz/PZzm9vZr/+hQCF0OJgvZejWiUiuRJ9HgeteKTEMo +CrOlyZXcaMHPCa8CK6U+lUBwTZbAAzMYSazfaf8524yDGksOA4J/KGC3uvviYW09 +hTaqhq0yGqBUe5mrgEEhSV2vIpjK6MKxMtvjKvc1fjfrYIL9BGiiHOCGaljQTQAA +yLZqQwlj//v4om3onR6HOfZeYsQxzH5zNFSIJa96/kBBWG9Q0ZMmqEqB52rNUT28 +ZapjaqqRkos/rBdvzDQzlyx+NjZnOsueEkC+cX/1psIoE+6vLbonMrlzl+SSqtxB +EuSD7dekZ7o3eQLzRI13ohRtzMv4ojWMpr769WsQ4KKflK7pLVdIYFZbL0Q44s/w +Bc9ByiwSGymhEO6uqqfBT1baj19yTrc3FU/jaJyIsRNs/EAc7c6nPejiiwxtE7Ex +oVSwbKoD2CXB/DYlenenBGvuP1jyHSkQqv2YWdL1bm9Rp8DNJ+HG0OP913fTuE3V +7ScOt2ZnR2B+VWN3Eu8MdiX16vi/ub/4H1HihANw/W5HSwuW88V7fGcbSzRWxyCN +5Od7b5y2zAD/tl+x4GXFZ9k+di2sZc7W6zzVqHr55nfxvsFvHt5dWipTxZFdVhRh +tXhGnYCfr1gKN4FdTW/MuYa3otHL4gVpnVdQ10C48bCljCaVdep/AhC5dj0GaTyx +VJBzzD5vp6zt6jsfjI059+zVyR5zxhEKeotURVTqzhz08TOHCkyQP0KRQ+U5ve80 +9cj1odt43JBXFq5w9/aUQWG6ZnBJQup/zlDdGncPd0+3Eh0WoQyDh/XlFosrxt7L +QF9SqN9oTIp9Fgr6yOFrDOamQAb6f+5Ms5XNegHmlqSkGcpJxf2JBNinrY4drrQ8 +GuVCQ94GhjdGMdSM8Vv8Yi+8RHyqn6R2hjiY4PX+86J+xFNOGr5RiXk8NUp5kM5s +ZfffpB0ELlgBQzEv2PV9hdh66M8EGjyQl4ItzXg3JhbiXOKAQLbpPOD22zcZsmm2 +r5E4vgRwYfHnmwqJsrIcvMK1m4USlGuwJYP5ExuwE4xdsaUNwKEd3gZAXzhV1YKn +HyBfJFwYJsBR+l9G9kt/ZWpEd2DNnfss7ujQYTHGQ6WT1zbKbCsb8aE1CNXXs93C +DtuMUvG+BRTwuSAtvWTf+XPcTjgTrrAKQq2tmsbDe3CEgW5r/4+OL6s3nxI/mVVg +4jOcUZ0bePBvu+4/jIRqlx2MZIFRp+vvR4RiQ0wYBcihW7Wed8y+ZWdHxg6eUlJP +WXwdmXsz+NFMXpJvBX0OgntVzxEdJAyGEeBArBJPAKmcbR3JfDWMQ8M= +-----END ENCRYPTED PRIVATE KEY----- diff --git a/modules/reindex/src/test/resources/org/opensearch/index/reindex/http/http.crt b/modules/reindex/src/test/resources/org/opensearch/index/reindex/http/http.crt index 309ade87fbd78..317991a707a16 100644 --- a/modules/reindex/src/test/resources/org/opensearch/index/reindex/http/http.crt +++ b/modules/reindex/src/test/resources/org/opensearch/index/reindex/http/http.crt @@ -1,22 +1,20 @@ -----BEGIN CERTIFICATE----- -MIIDsjCCApqgAwIBAgIUXxlg/0/g3UYekXWBRpkHM84EYfIwDQYJKoZIhvcNAQEL -BQAwNDEyMDAGA1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5l -cmF0ZWQgQ0EwHhcNMTkwMTEwMDIxMDMwWhcNNDYwNTI3MDIxMDMwWjAPMQ0wCwYD -VQQDEwRodHRwMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAi8VQaSR6 -uqgT1Rkw+a39OSXcXuhJBVdoO+AyYPK7hdUTxj1aqnXkKeAiNGpe/J+uXZ837Spy -rmBZS3k6S5hLEceF2xug8yrR7RYEZ+JvGlRgg/jj+61gGbHAD314+vvu0YUo06YG -wbz9AnjJA/sMbsCp3iSzWIkwZBZcCoZ/YsG4I89LSjYL3YmRi2193WMX6/OfQYMN -Fkv61r/iwBEkgJ14cUSYe3norGuQfZuXSh5kI5D5R7q7Bmb0um+jzY/l62kj3oR1 -YWo3g6DdU/Bc/3/KmEEVXIfdTonMBMyL8PvYORoMKrYdph3E8e39ZQhPeBJNJKw0 -XzsZFzIUlTw0kQIDAQABo4HgMIHdMB0GA1UdDgQWBBTiqknjZLa5E1BneHRvTkNa -Bm4nNTAfBgNVHSMEGDAWgBSU68vXWuUWW2xC2AmllVRZZUpvAzCBjwYDVR0RBIGH -MIGEgglsb2NhbGhvc3SCF2xvY2FsaG9zdDYubG9jYWxkb21haW42hwR/AAABhxAA -AAAAAAAAAAAAAAAAAAABggpsb2NhbGhvc3Q0ggpsb2NhbGhvc3Q2ghVsb2NhbGhv -c3QubG9jYWxkb21haW6CF2xvY2FsaG9zdDQubG9jYWxkb21haW40MAkGA1UdEwQC -MAAwDQYJKoZIhvcNAQELBQADggEBAIZr8EhhCbNyc6iHzUJ/NrUGht5RDHUKN9WU -2fd+SJlWijQYGoFW6LfabmYxIVPAFtYzUiA378NFoOZZ4kdC3gQng8izvS2UDcO6 -cAG5q/dxop3VXqcLeK3NpH2jd83M8VZaOThPj/F07eTkVX+sGu+7VL5Lc/XPe8JS -HhH2QtcTPGPpzPnWOUMLpRy4mh5sDyeftWr2PTFgMXFD6dtzDvaklGJvr1TmcOVb -BFYyVyXRq6v8YsrRPp0GIl+X3zd3KgwUMuEzRKkJgeI1lZRjmHMIyFcqxlwMaHpv -r1XUmz02ycy6t3n+2kCgfU6HnjbeFh55KzNCEv8TXQFg8Z8YpDA= +MIIDUTCCAjmgAwIBAgIURxNp9ImDloxqOPNAP0ySBZN/BDUwDQYJKoZIhvcNAQEL +BQAwHzEdMBsGA1UEAwwUT3BlblNlYXJjaCBUZXN0IE5vZGUwHhcNMjQwODI4MTA0 +NDE1WhcNMzQwODI2MTA0NDE1WjBiMQswCQYDVQQGEwJDQTEQMA4GA1UECAwHT05U +QVJJTzEQMA4GA1UEBwwHVE9ST05UTzEMMAoGA1UECgwDT1JHMQ0wCwYDVQQLDARV +TklUMRIwEAYDVQQDDAlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQCk1Ot2RGbUS3yJchvdtrcGJPoR8cTTUfVVMMRT+btXayllbLQd/cHV +jP1DxauXiLQs77R3NGfPs/Sk7fGQh6p/4F52F5wlNqG/Hq0MquqjXEo/ey8i+p5Y +zTB8v2Hv6RwN0HLB2uiAUOWjHvddiz36nfPmQ5jlF+IsR36KMb6AWHaB60kUabZL +vPOrtw7KZMkHRC+3tXvvepNe3uAKTIOEeHJneNNc76ShPnjANev7ONpNHgvMTJDY +nbNtDL2WnHvnyEwIgWLOnJ1WgOAsiSpebPqibi+25FirFKGTB2qp2NfU+tCoK7hG +1nPfPSCxBEqhwoJOywft2AxhDoicvo+HAgMBAAGjQjBAMB0GA1UdDgQWBBQ2Dr4v +2/aWi1JSmXfRITKOTlwa+DAfBgNVHSMEGDAWgBTjV81VkxbQpzmea/neQpMg1lPQ +IzANBgkqhkiG9w0BAQsFAAOCAQEAXEmxgNViixLWVQx9EgWscxaiI4d4OFd7Dfb/ +11qRtKoobEuSK5lOhDim8hZfs+iueKHuT/bRJ59Yu/p4GS+ZeJRgEXfCdY9S3Zeb +qGCi/IBRT1oq4vD3OSWA88C3I+pGXRb7R3fvtIcfy42o1FdHAg3MOlRx7fZHtAdE +GJ4SRsKTex7phWvKZ14R+wj45B8dA8Ty6/6nzPqb5+SLa5w37jU/gdew2cW2lEaN +tZb/aj1l5LmxXje3mvVag5SR2ussDrARcRu+uW7qYq0IzzQDxyzwpEWPC/QsgEme +9GFPd3xNu4tSoM0arrK8xjNtEh4P2gokhNJwy+vDGvKMrrWjVg== -----END CERTIFICATE----- diff --git a/modules/reindex/src/test/resources/org/opensearch/index/reindex/http/http.key b/modules/reindex/src/test/resources/org/opensearch/index/reindex/http/http.key index 8b8d3b4083c67..68b61c6d6e03e 100644 --- a/modules/reindex/src/test/resources/org/opensearch/index/reindex/http/http.key +++ b/modules/reindex/src/test/resources/org/opensearch/index/reindex/http/http.key @@ -1,30 +1,30 @@ ------BEGIN RSA PRIVATE KEY----- -Proc-Type: 4,ENCRYPTED -DEK-Info: DES-EDE3-CBC,127A4142FA81C5A1 - -dP6oSAUl47KCnP0YZSX108qcX5s2nVGpD0qtnVQg89mLVFd7IxpKQaIuODSadRTo -AD0KINITy3ZwUr/TTJgERu88baBsTHv3PLEe7TpQI2DGGDz3aZfO9e6Jvglbdi5b -CBLaxRXGGhhH9YH0E87Lp3JEwg4udWmlNahGIhbqNheZNTtDKt+Lx80TyyIml2r/ -GAhjT4UPvIRrATFAcL/3EKOjRqvb6SeGnZu21n2TSmsBEr02gC0Ox3qmsnRM3kvU -jCuUzWTzJSQLXZwZuMtv5srOSFAbU8EklFXNhWJU/7GBy215aAAW48hCzkPMVEbg -oeH4nuze/Uulih9UxJGCBIpvfTnksyMRGP/zdy1mnKuqQk+yI0n7JWMJL8QoDQc8 -XvzqOmKLdBVezmzOVP/PyMAhYWetILh/1UesjyJot2hwSXPAxqBHPVA9bnmel6CQ -VccNSwaK120yT5YhkUMFc0AmUpztzNMQzJ10g1dW+Qsr+n4vtFmAuTvBgogNNVXn -eX1hbbiXGO1Fw4OMu6qTJ4T/P+VFb0CxoxETWeqdjcs4LGbeqF68nayEsW0ZzhbI -W5c+JAbW18Kb+k/KzKZTtJEXBw6B/2FMe9x9z3BIpVhplM2KsNk7joWnumD8LfUT -ORRHUPV7bkdiDsn2CRaevubDQiChcjsdLWhG7JLm54ttyif7/X7htGOXPZLDLK8B -Vxe09B006f7lM0tXEx8BLFDNroMLlrxB4K5MlwWpS3LLqy4zDbHka2I3s/ST/BD4 -0EURHefiXJkR6bRsfGCl3JDk0EakcUXM+Ob5/2rC/rPXO2pC0ksiQ2DSBm7ak9om -vlC7dIzVipL0LZTd4SUDJyvmK4Ws6V98O5b+79To6oZnVs5CjvcmpSFVePZa5gm/ -DB8LOpW4jklz+ybJtHJRbEIzmpfwpizThto/zLbhPRyvJkagJfWgXI0j+jjKZj+w -sy1V8S44aXJ3GX9p4d/Grnx6WGvEJSV0na7m3YQCPEi5sUgr+EMizGUYstSSUPtU -XhxQRZ95K2cKORul9vzG3zZqqvi73Ju5vu9DLmmlI00sLzyVGFtvkuhrF2p7XclM -GU/rMOeMClMb6qyCzldSs84Anhlh/6mYri6uYPhIGvxqtH44FTbu1APvZp0s2rVm -ueClHG78lat+oqWFpbA8+peT0dMPdSKDAFDiHsGoeWCIoCF44a84bJX35OZk+Y4a -+fDFuSiKYBMfAgqf/ZNzV4+ySka7dWdRQ2TDgIuxnvFV1NgC/ir3/mPgkf0xZU5d -w8T+TW6T8PmJfHnW4nxgHaqgxMoEoPm8zn0HNpRFKwsDYRFfobpCXnoyx50JXxa4 -jg095zlp8X0JwconlGJB1gfeqvS2I50WEDR+2ZtDf7fUEnQ3LYJzP4lSwiSKiQsQ -MPjy0SMQnqmWijylLYKunTl3Uh2DdYg4MOON662H3TxQW8TCYwK2maKujwS9VFLN -GtRGlLrOtrOfHBSwDCujFjqEmQBsF/y2C6XfMoNq6xi5NzREGmNXYrHbLvl2Njwm -WB1ouB4JzmEmb1QNwxkllBAaUp1SJGhW2+fYOe0zjWOP9R4sUq4rRw== ------END RSA PRIVATE KEY----- +-----BEGIN ENCRYPTED PRIVATE KEY----- +MIIFNTBfBgkqhkiG9w0BBQ0wUjAxBgkqhkiG9w0BBQwwJAQQprhRDFFTnmWmHgAB +ULpI4wICCAAwDAYIKoZIhvcNAgkFADAdBglghkgBZQMEASoEEEuzT8itQgHZfKb/ +ReywEdIEggTQD117YFYRhSSivErIhTKQSuofhH/ZgW6nYnKlcDT08bgNQjbEg94a +QZqsPl9D6tfcmg7XlNTEiQpnSnsh6LrrhQbNkt3PvJxfUUy0ATVXXdH538RcPLAC +K2NHi1iwSbnqdcBU+/Be8M1F9e9P5hx6HbJGEF/JIkpWDDmOoCGvlwfH0PSiliY4 +uqxsmekvNgz2GBhELZj4sEJ7C7/I26vOuzS6suDn6xGF8JZIg8i7upamUgLoBtG/ +waxlmfTx+hkYFDQGcy9jvkV043sK/hLTOycUGhmS1ybQSf9ANbsM8RjOIq6QxpIZ +wtV/7EzqDWYradQBRrhAP24yzEj6H1cTr8yMmD6JuxvGZ7uQpTCRiFopB6TgK+x+ +2HqEgeRyBz4hU0i22kyGHC9sSG9WwKhmXhfcBtzJi3JABbkeg9LarwOzbh51DaxN +/gTop4UYRTYbJB9bhcIU0Y5xPSSphphCWmGuBU6CinsBj1w+UBP137GzgnXvV6PL +S8tai963P38Oafw/T2IyFTyAkuHJJ5MjVc71Q+vYLzfu4SfBdSIb1oFPT4otNwHP +NbPvTYq0DWnHFNeIc5vmLJJTWVemBTkxvHr+WfU8meFsjxZT05gzgOk+5BZFya5h +oV53mYQYPSyJiBUz0icHyyzUWaEHQLXHrmE6i+kW7+b4lrhi7KV1AMGRSJXUS9/Q +I7NuCQG3+iCyMd+CupvsiK7xjOytgCstwWIGeHlSmYwS+txi1wpbBJ4X6NQLlHyy +KZoFxyWTKtEdX1QKioBxeoKVy5G5LOh7S/jd9jEsZ2C8snFnDbNHALBmXIH3fshA +bo4keel427V6W3f9/u0nT1RWrYiBK12XJiS3/kXg8krln1Xb/MkgTKmLEZF+VDXO +Y3QwAICNM6/235siHuQG+uJ/WoL9xd1R22/+2mxNy1Rdhd49n8GFg0Kjsbmd+hL9 +aMwRU09SNNPCwdAIHmoMCIYS6uTX1bcGSzMir16JepmIYQllwdOoLk2nxtBCaHwj +ZLYO21W+iFgo4TwXzkuaI2q3Ll0n79BJUVdOnz8hBCq0Ox7sTEY7g1vQGHIsBx98 +PYZmaaXVh+u2chHKrwp6L9mRikXQiNWwtqTH/kp7BydRnYIcaP27SCM8HbaYfV/x +02FjBbpZ7u1PwS3jlGmcxE/qTd+cLkk3pm7WPPMlOnMh/X5N3/OpznUgJnVRtGqk +uDy4HSE5vEhHDp0F67R0ph8/HfIBamvJIoonYzoC2iEMgL4yqL0x44SOCioXScgz +hluYX1kQRfyXWjoP+vBBOUapwYDwk1gGXap5iQjtiVq6FN8DspckHRVI5B1voVIC +37Mn2OXH9JloObouLYMRa1dDm7h+/3Cb9UAhKpOjpLc1apA49+Rjtq1gBExhac74 +9SwrcQJdRx0NDJjoMHKrGUFkg/W+R7OTad7+l98M473nWuV3mzJDXcuxmam9llRI +2O+1QsV5hjd4/zCtIka+pOALp+cVSmktTjKNh105asX7d4XIxtg3M+FJWTEODZfy +VulvKri/rkrbCBwMQyj3TpF4AkVjhSM2P5j7LRsivfGc8VL00OqYJp9pYfav38gs +EpYOmaDEV/Ls744WSJJo5Qq0EpDclBTFjky6kZx7RDfySUzfN/Nhv6A= +-----END ENCRYPTED PRIVATE KEY----- diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransportTests.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransportTests.java index f80ad901ce765..563f89b70545e 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransportTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransportTests.java @@ -127,7 +127,7 @@ public Optional buildSecureHttpServerEngine(Settings settings, HttpSe "password".toCharArray() ); - final KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance("SunX509"); + final KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); keyManagerFactory.init(keyStore, "password".toCharArray()); SSLEngine engine = SslContextBuilder.forServer(keyManagerFactory) diff --git a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/ssl/SimpleSecureNetty4TransportTests.java b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/ssl/SimpleSecureNetty4TransportTests.java index e0600aebd90e5..e573a9d018862 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/ssl/SimpleSecureNetty4TransportTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/ssl/SimpleSecureNetty4TransportTests.java @@ -83,7 +83,7 @@ public Optional buildSecureServerTransportEngine(Settings settings, T "password".toCharArray() ); - final KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance("SunX509"); + final KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); keyManagerFactory.init(keyStore, "password".toCharArray()); SSLEngine engine = SslContextBuilder.forServer(keyManagerFactory) diff --git a/modules/transport-netty4/src/test/resources/README.md b/modules/transport-netty4/src/test/resources/README.md new file mode 100644 index 0000000000000..50cbd432d32c6 --- /dev/null +++ b/modules/transport-netty4/src/test/resources/README.md @@ -0,0 +1,26 @@ +#!/usr/bin/env bash +# +# This is README describes how the certificates in this directory were created. +# This file can also be executed as a script +# + +# 1. Create certificate key + +`openssl req -x509 -sha256 -newkey rsa:2048 -keyout certificate.key -out certificate.crt -days 1024 -nodes` + +# 2. Export the certificate in pkcs12 format + +`openssl pkcs12 -export -in certificate.crt -inkey certificate.key -out netty4-secure.p12 -name netty4-secure -password pass:password` + +# 3. Migrate from P12 to JKS keystore + +``` +keytool -importkeystore -noprompt \ + -srckeystore netty4-secure.p12 \ + -srcstoretype PKCS12 \ + -srcstorepass password \ + -alias netty4-secure \ + -destkeystore netty4-secure.jks \ + -deststoretype JKS \ + -deststorepass password +``` diff --git a/modules/transport-netty4/src/test/resources/README.txt b/modules/transport-netty4/src/test/resources/README.txt deleted file mode 100644 index c8cec5d3803a4..0000000000000 --- a/modules/transport-netty4/src/test/resources/README.txt +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash -# -# This is README describes how the certificates in this directory were created. -# This file can also be executed as a script -# - -# 1. Create certificate key - -openssl req -x509 -sha256 -newkey rsa:2048 -keyout certificate.key -out certificate.crt -days 1024 -nodes - -# 2. Export the certificate in pkcs12 format - -openssl pkcs12 -export -in certificate.crt -inkey certificate.key -out server.p12 -name netty4-secure -password pass:password - -# 3. Import the certificate into JDK keystore (PKCS12 type) - -keytool -importkeystore -srcstorepass password -destkeystore netty4-secure.jks -srckeystore server.p12 -srcstoretype PKCS12 -alias netty4-secure -deststorepass password \ No newline at end of file diff --git a/modules/transport-netty4/src/test/resources/netty4-secure.jks b/modules/transport-netty4/src/test/resources/netty4-secure.jks index 59dfd31c2a1567c6fbae386aa8f15c563bc66ae0..d158f1fe60ef73fdaa610ce8e92a398e4212f20a 100644 GIT binary patch delta 2521 zcmV;~2`2XD73LL?b`-2bsR&Z_>1Z`R1*Lm6UWDR37kraH1|)yv&4@(E?*;s*kwb$P zLCov~f&|b3yv&SmU#EP@XkC5rYDlv`1$+I0m+fF03Zn)vEv;8qtK>O%+1GjAQbr<- zKa;e@BY36x>$Uov1e4am_lJ zx32X@sL$FlM+QpRy}#{%I;Tryx>wiV2Li?ITv`l&SPa0tlW%ng_f3>FzRQD4RQ^$} zg&L4Ao7oz4?vR_Bf<{c^)#;o$c==|EhQ$4=34~p*!1bLOQpHwdgZ{d)g z3{+5L&aiY;c*ftSi)0`;5g~9h`2S`*YsrLzT{eG1G^-q41vwByj7{|qoQa>Al0|U& zJqg6^^*~Q2YBiEvZ=RVpPad!xc3UgY4lQM?t!K)6^X3H9XJbr(Fa2PLUSkQC-81Z@(6*s zI)i`mb*Y+e$WWPp^mqVX6*trZlsMUK1~sVGyk%^Cha+O}q%>1OCd8&ujh@rz(evH1 zla=ptb!KV&X$l|9tENlY6+GdExCt_xFB9$y@RQF=D#7 z`P8yqpWP3Wpu;Z$BY;!H;J3^xWN=5c6M8$d zRo$152a?5dd)u&5X8P(U!;4=n$ipmisagF_hnEty;=5d|Z$yGT^r+~O#TmSWL5~*X zjdc%fCK8#;@e*KpzMy zF-|Zk1_>&LNQURbYfgE zF?W^E_Q+wAKL#X!23am>aNyhp*U{tS*0M6SVGTt# z2yLOt$_*l;Jt_wQQf^w;c=fdx(wD=eK2(q36JgXVBy}Blu_<_tNh|kU**D&8j4sCj zAf$+2i3C9L-e+Js_Vyi}m0=6*F&s;ALbaNYsLn@!Dtgo#(#e#$(m=Xb)xB%*kvqKD z8EOI$AD;ky-jkx+4)CB}3~vi-_4#VlG8xD^@I!#YKRy}zE|HksPPp!#e0V_j3je_W z_x3ht^}~r=4SwZ456eU_u8QWMXyaR+QM^*?rb60% zY(KGzel-wY414%K}=jM>hV>7 zu$*ir*FtxosGimZlyjoWW|(f9ie{~jX0kR$j_tWuOpDLiJvdq^2(3_*5=UNS681VK z92S^Tb3ugm;AfTr2dowG6~d3vAqu;`I{hVq@gnXP9KW#yNQyoPvP~eaFjJ z|CVUhl(X7S_2{T{sHWkcGnQ!Dq&_yJY|63t-p*x*qV@YL80MQQg+P-@e;~Si3_&lu zaKqbG6*rL{mi~?KUW-gzi+fSHAu0Fv(#{hY@+M>qTY}IYBr%VGJWdmrL=ne-c;5C$ zlYTyTOeyRWQW8(X#l+u98+b(L(#8{Z2`iqp2flD^fO6^-4f3OwLD|11u zn)4LMua3hUK(p!-gEWQ`S3)srU zpJ<2Y5)&>6_fp^xp+6jUFRE*OWAud-HTe+#>>!(~RT_JdXL>TE?pR*zWMG#4CHB!N z_C6Sq!*VUcfFXEat=gG_QQ#p>uar$B4k)Lgi*U$KdYXShC-=o+?!hBRL%6Q0J}H{S zJ=>!2x||9Ui?_^!GrrSx33MB`1}R}fR{R2ceq6x{Ya`59VyHsy^c z84YMGyJmkjr_xFMiEq4foE0iFGe{D4?)*e^D~BO`MX2t~r2x?Sr@Gz`Dfgydfx*$ZR3p?!g!`7o`PwnLW-ueZO{X=H*>_WgbV9@VCry z261uU34P$TtpEzk5vIgS#l6RHJF_ACeaB(i+!7lnm*(*K`V-LRUrr+v3d2Zmcfs4; zy5h6yjfx#?c2IwH(lvj8I^AGC7IA}k-9dkEx)70|w)}T(UC+Irr+zvt;)*RMNHI%S zwOwc8)?mW?+m{9v?JIT;y#Mate6-RLz%8}Yc%Lp_F>RlhJ`-nBI{@k+rJ*)gR5xRC z?RH6(Un~F`1xIM=P?n(B*ostw4!B+_Rb1r z<#000*@bbuo#~&IisrJ^5k$(@a0Qw$TO_f!f%UsYS?D`E`?zeltcQpXdEkBP-mcxV zSJQ zWHSyZIFuc(NTk!?c(HOUMQYLEPToAhVi_K0jCAtV;B;^HZ59w`V&tz}4o1<0eFrLu ziH;A)*G$AZzZ-?t-AAk&L<$OTiNUbsmM75uMNu9|YL0i#PB!`NR(d0o)O3FoUr)K- zn?SBH8F9kOeY)XDUNv8C(A}IC9ikxn9u{V$Iv$bngmT%atO(d_n<)*e0_b7;tT2Xz z8E>c~ivpJjQ(zT;hNw~a!vq(-%!6^CiiV@Hh-m#nd!)&V4G-SU04BEfwqzYM0@HDR zF-|Zk1_>&LNQU1CwTI9!q)1Fq zBNfM9&FY+!KL#X!pYb3Oo6gAe1~KMGts78(fPw@d%hCVdm{lW5Q#HBwt<{?EMe>GK zN==RgaV*y)X8c~I+^8n)ZZKjg*;2_u-lR$|0%LD>Vs6{J^;_&*VIhHz>aV{U@idsC zs})6%^WpoI9Nmh+hd?a3hsvP3Vl0kLv24N7=SH>aCGQ-6+`#ra4|~=FD{tC`^g(md zJPmaFJhwl1#jKebxbPNWHJB~u<%#_;kglUlAWaz+@{i?6<(WvgW@4(fSwJn)`VOHZ zNPm3=2WO?qt6E@g*@bDP)$x`ncey3@YRuU`;VS;_rDFMPD_~z$z%l*QaAm%Oc@|ZHx_=pFs zYs1Cbk;vhc-?P3uO;Sgvnp2z88$frosus3XY$c5>l>}^9AqsPuSvO}J6!)&h9`DD! zD!gugZ^jz0{7a(YC`vsRO247p8w^&`_2bvY|OIpB%5q_gInzhH~;TOQsU zouVqm>V%P(;P0|M@bb<<&sLsL;%`O_~cC}Z3 zRy+bT+k&fIvDEN)1bK9AznNEVfQ&WuIPbvu5`_L68*hY_=-2kzpu=ULO716y!^s;t zR#n?)uiIsZ@a^+XW;O0O?PWwLmU@}QSE>E9{jEaI>5xccHKzK%?T!s0xzWe&PJAvY zk6-#6;q^s*W1%x_j6GD9DL$DKt3xDz(gQ9SL_s3yC_wOOO7u~Q^pY1DK-v3|Ge z3F0O9jB=s45do%OiY3bz9S4XhJsO6FY{f}WX)+=*T{8e@!_K@Onj;O*c0Z$=e0$P= zrSHLXFikKqFbxI?V1`HmWdj5P0R;dAAe8@J4!(Yu^PKN2Bm$lCa`TKfz=b#7jxnyU jYy*8_4u0sHGtEP%2W1{=gGb>DV(5!@AmxDgO;&T*6xdkhK>_?vBRbRC@E)X6$G{!s6CPLxV{$o`sV=~!EoWGu=nTC?Rh#N`U-puvz zLSvd`pYX?tERoss3zIabhY^YvBkx^^ZO>(ma+s;zrT$}vp)q7~x^TtW%wO0*)SLbuhY&`-Xq?-|G;P;*Tot}vK5czFRS`!DCzk;6S>*tB;V&2Td5jKRYdx8wc zM%$X~bAtI?rT5US-nl-V{qs6!+H4En2c7|#644UYFe-RZT7ln zQJ0xXrmIAWgJO-)7~xh7_%gq5MN)$2G!{}gW&P!n@Q*wC5fhj`#PwT8bbekNS-@slxl* z-qUT2H+8Q$X5^_((SJG3O0&>>GjS68uREx9g;mP7t)w>YC|2)4`wPO_py!W8WR!6| z(Z!55XY@D|;|ptak9Ey{YiU}kb6@FRK}S+%a-|1p=Hj7PKR(f=S6fak48jm)K*;R= zqrq!!u!cQO*~Yfs`tZ7%s7SKoE%UBV5T$|F;c!Xbh=D=fN2YZRSw7FL>HAV>dUg1I z)4LzPgCnuNuc)hUMZx3ej0Z|@l{BQBT*9d|sKl+sMrm8Dx=j1PAH$Q+cL%3sL`>2R z^b2xdn$JECm}3ebIj^G>e*rn-TqIk;h zm}v^s8LVfP*3XTZJoz3UlHXM&Bu?$HM%4F-sT<-*{6iwhr3s=lw{M;;{L}qGDLp;4 zi@@TG^9qdM*cMX1mAqNYb$lNd)z-a`AQlioyAzX`1Zu?G9G^GKHTk`UFTU%$-1D~t z9a;o~+`Q zs#A=)KtUkc3+1X9JGivQk0T|aUtf7^nZPq&4nmvj?~J5-DTf9=p?U)XI9`T5G?39k z~7JSWAa$s}dWJoO(qZhy2 zW|v+gOX$1VPd_8?aaOntJzad`Ws$%;a?X4wRIzhm!cQO!5UcwPVfWN^aCd?u_6meX za8O%;mYdb!ac`LcEIbi*Nn z$Z9S@e`hp5aKb*e)E|SOpB4JnsxTtlsA~4Nl3ZV%DL^x`{6rvYg}FR*k<-Y;0DE@} z#3(t98(=v;4g`mMti-iS1>F^h=!6TJUSPpDd^; z4=>FyAL;qIo_0>I%9BBBJX$ZcS+wr3LINU|0b((Efpt^9r|M05Ud^fLjrwGF(;=AA zQu$*$u1hX#^D)w6x?7?+P7$fRh2SI7SO>aNzQW zoBcYVH_KSps%LlKsd7?(3I^b5Ud>|DA){NrXnzHN<6C$4sOsn8ou03Sy(8u((5^J5 ztUI3UN(%@(8Bu|~ozmjpa@Jr|kKDL2cN<675UQRI%{w-|#-erxx*ulSFror-Br#eS z$#?wBEGjyzV%^2aYm4C-ai-ozTIZhtI$Ln0&e~HJN5j=3A@J%M!^GnIS zj#kIri|f+gbE?~c>t%Fen^`BDS1+B=o<<3iUwR#;A7hxkx-xWQ_^@Vp3BIDvCMs9I z#lToMM|B)F5MoE#Jdwq4;I5%)UOju|T=d$Ry6d9#>3nIU@1xWSL8PW4CU5%$*@nNrc!H7xDJPa zZ=&*(?WSq-Z4bpqs)909IIW&F< z5$VTtvMw)Qe(knHJUlFdgZTQ?{Owptksuzym^&glW%4O5`yta(Q#;q~bM;3C-0WID zWscpTVs08zdo~jC$1f>l9j^%NDz+Bb|ET^>EEvyDd!*Jgk%|P%W2>EMV&*T zQM`YCKM)WC03&3!eU%ip;M|@a7S@R&`86xqY05<34@ buildHttpServerExceptionHandler(Setti @Override public Optional buildSecureHttpServerEngine(Settings settings, HttpServerTransport transport) throws SSLException { try { - SSLEngine engine = SslContextBuilder.forServer( - SecureReactorNetty4HttpServerTransportTests.class.getResourceAsStream("/certificate.crt"), - SecureReactorNetty4HttpServerTransportTests.class.getResourceAsStream("/certificate.key") - ).trustManager(InsecureTrustManagerFactory.INSTANCE).build().newEngine(NettyAllocator.getAllocator()); + var keyManagerFactory = KeyManagerFactory.getInstance("PKIX"); + keyManagerFactory.init(KeyStoreUtils.createServerKeyStore(), KEYSTORE_PASSWORD); + SSLEngine engine = SslContextBuilder.forServer(keyManagerFactory) + .trustManager(InsecureTrustManagerFactory.INSTANCE) + .build() + .newEngine(NettyAllocator.getAllocator()); return Optional.of(engine); - } catch (final IOException ex) { + } catch (final Exception ex) { throw new SSLException(ex); } } diff --git a/plugins/transport-reactor-netty4/src/test/resources/README.txt b/plugins/transport-reactor-netty4/src/test/resources/README.txt deleted file mode 100644 index a4353cee45a97..0000000000000 --- a/plugins/transport-reactor-netty4/src/test/resources/README.txt +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env bash -# -# This is README describes how the certificates in this directory were created. -# This file can also be executed as a script -# - -# 1. Create certificate key - -openssl req -x509 -sha256 -newkey rsa:2048 -keyout certificate.key -out certificate.crt -days 1024 -nodes - -# 2. Export the certificate in pkcs12 format - -openssl pkcs12 -export -in certificate.crt -inkey certificate.key -out server.p12 -name netty4-secure -password pass:password - diff --git a/plugins/transport-reactor-netty4/src/test/resources/certificate.crt b/plugins/transport-reactor-netty4/src/test/resources/certificate.crt deleted file mode 100644 index 54c78fdbcf6de..0000000000000 --- a/plugins/transport-reactor-netty4/src/test/resources/certificate.crt +++ /dev/null @@ -1,22 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDkzCCAnugAwIBAgIUddAawr5zygcd+Dcn9WVDpO4BJ7YwDQYJKoZIhvcNAQEL -BQAwWTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM -GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDESMBAGA1UEAwwJbG9jYWxob3N0MB4X -DTI0MDMxNDE5NDQzOVoXDTI3MDEwMjE5NDQzOVowWTELMAkGA1UEBhMCQVUxEzAR -BgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5 -IEx0ZDESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A -MIIBCgKCAQEAzjOKkg6Iba5zfZ8b/RYw+PGmGEfbdGuuF10Wz4Jmx/Nk4VfDLxdh -TW8VllUL2JD7uPkjABj7pW3awAbvIJ+VGbKqfBr1Nsz0mPPzhT8cfuMH/FDZgQs3 -4HuqDKr0LfC1Kw5E3WF0GVMBDNu0U+nKoeqySeYjGdxDnd3W4cqK5AnUxL0RnIny -Bw7ZuhcU55XndH/Xauro/2EpvJduDsWMdqt7ZfIf1TOmaiQHK+82yb/drVaJbczK -uTpn1Kv2bnzkQEckgq+z1dLNOOyvP2xf+nsziw5ilJe92e5GJOUJYFAlEgUAGpfD -dv6j/gTRYvdJCJItOQEQtektNCAZsoc0wwIDAQABo1MwUTAdBgNVHQ4EFgQUzHts -wIt+zhB/R4U4Do2P6rr0YhkwHwYDVR0jBBgwFoAUzHtswIt+zhB/R4U4Do2P6rr0 -YhkwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAveh870jJX7vt -oLCrdugsyo79pR4f7Nr1kUy3jJrfoaoUmrjiiiHWgT22fGwp7j1GZF2mVfo8YVaK -63YNn5gB2NNZhguPOFC4AdvHRYOKRBOaOvWK8oq7BcJ//18JYI/pPnpgkYvJjqv4 -gFKaZX9qWtujHpAmKiVGs7pwYGNXfixPHRNV4owcfHMIH5dhbbqT49j94xVpjbXs -OymKtFl4kpCE/0LzKFrFcuu55Am1VLBHx2cPpHLOipgUcF5BHFlQ8AXiCMOwfPAw -d22mLB6Gt1oVEpyvQHYd3e04FetEXQ9E8T+NKWZx/8Ucf+IWBYmZBRxch6O83xgk -bAbGzqkbzQ== ------END CERTIFICATE----- diff --git a/plugins/transport-reactor-netty4/src/test/resources/certificate.key b/plugins/transport-reactor-netty4/src/test/resources/certificate.key deleted file mode 100644 index 228350180935d..0000000000000 --- a/plugins/transport-reactor-netty4/src/test/resources/certificate.key +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDOM4qSDohtrnN9 -nxv9FjD48aYYR9t0a64XXRbPgmbH82ThV8MvF2FNbxWWVQvYkPu4+SMAGPulbdrA -Bu8gn5UZsqp8GvU2zPSY8/OFPxx+4wf8UNmBCzfge6oMqvQt8LUrDkTdYXQZUwEM -27RT6cqh6rJJ5iMZ3EOd3dbhyorkCdTEvRGcifIHDtm6FxTnled0f9dq6uj/YSm8 -l24OxYx2q3tl8h/VM6ZqJAcr7zbJv92tVoltzMq5OmfUq/ZufORARySCr7PV0s04 -7K8/bF/6ezOLDmKUl73Z7kYk5QlgUCUSBQAal8N2/qP+BNFi90kIki05ARC16S00 -IBmyhzTDAgMBAAECggEAVOdiElvLjyX6xeoC00YU6hxOIMdNtHU2HMamwtDV01UD -38mMQ9KjrQelYt4n34drLrHe2IZw75/5J4JzagJrmUY47psHBwaDXItuZRokeJaw -zhLYTEs7OcKRtV+a5WOspUrdzi33aQoFb67zZG3qkpsZyFXrdBV+/fy/Iv+MCvLH -xR0jQ5mzE3cw20R7S4nddChBA/y8oKGOo6QRf2SznC1jL/+yolHvJPEn1v8AUxYm -BMPHxj1O0c4M4IxnJQ3Y5Jy9OaFMyMsFlF1hVhc/3LDDxDyOuBsVsFDicojyrRea -GKngIke0yezy7Wo4NUcp8YQhafonpWVsSJJdOUotcQKBgQD0rihFBXVtcG1d/Vy7 -FvLHrmccD56JNV744LSn2CDM7W1IulNbDUZINdCFqL91u5LpxozeE1FPY1nhwncJ -N7V7XYCaSLCuV1YJzRmUCjnzk2RyopGpzWog3f9uUFGgrk1HGbNAv99k/REya6Iu -IRSkuQhaJOj3bRXzonh0K4GjewKBgQDXvamtCioOUMSP8vq919YMkBw7F+z/fr0p -pamO8HL9eewAUg6N92JQ9kobSo/GptdmdHIjs8LqnS5C3H13GX5Qlf5GskOlCpla -V55ElaSp0gvKwWE168U7gQH4etPQAXXJrOGFaGbPj9W81hTUud7HVE88KYdfWTBo -I7TuE25tWQKBgBRjcr2Vn9xXsvVTCGgamG5lLPhcoNREGz7X0pXt34XT/vhBdnKu -331i5pZMom+YCrzqK5DRwUPBPpseTjb5amj2OKIijn5ojqXQbmI0m/GdBZC71TF2 -CXLlrMQvcy3VeGEFVjd+BYpvwAAYkfIQFZ1IQdbpHnSHpX2guzLK8UmDAoGBANUy -PIcf0EetUVHfkCIjNQfdMcjD8BTcLhsF9vWmcDxFTA9VB8ULf0D64mjt2f85yQsa -b+EQN8KZ6alxMxuLOeRxFYLPj0F9o+Y/R8wHBV48kCKhz2r1v0b6SfQ/jSm1B61x -BrxLW64qOdIOzS8bLyhUDKkrcPesr8V548aRtUKhAoGBAKlNJFd8BCGKD9Td+3dE -oP1iHTX5XZ+cQIqL0e+GMQlK4HnQP566DFZU5/GHNNAfmyxd5iSRwhTqPMHRAmOb -pqQwsyufx0dFeIBxeSO3Z6jW5h2sl4nBipZpw9bzv6EBL1xRr0SfMNZzdnf4JFzc -0htGo/VO93Z2pv8w7uGUz1nN ------END PRIVATE KEY----- diff --git a/qa/evil-tests/build.gradle b/qa/evil-tests/build.gradle index 681ca0c712bb2..1720b21b45d68 100644 --- a/qa/evil-tests/build.gradle +++ b/qa/evil-tests/build.gradle @@ -41,7 +41,9 @@ apply plugin: 'opensearch.standalone-test' dependencies { testImplementation 'com.google.jimfs:jimfs:1.3.0' - testImplementation project(':distribution:tools:plugin-cli') + testImplementation(project(':distribution:tools:plugin-cli')) { + exclude group: 'org.bouncycastle' + } } // TODO: give each evil test its own fresh JVM for more isolation. diff --git a/server/src/main/resources/org/opensearch/bootstrap/test.policy b/server/src/main/resources/org/opensearch/bootstrap/test.policy index e0a183b7eac88..9e1d5cebffc0e 100644 --- a/server/src/main/resources/org/opensearch/bootstrap/test.policy +++ b/server/src/main/resources/org/opensearch/bootstrap/test.policy @@ -7,10 +7,29 @@ */ grant { - // allow to use JVM tooling (Java Compiler) in tests for annotation processing + // allow to use JVM tooling (Java Compiler) in tests for annotation processing permission java.io.FilePermission "${java.home}/lib/*", "read"; permission java.io.FilePermission "${java.home}/lib/modules/*", "read"; permission java.lang.RuntimePermission "accessSystemModules"; permission java.lang.RuntimePermission "accessDeclaredMembers"; permission java.lang.RuntimePermission "accessClassInPackage.*"; + + // security + permission java.io.FilePermission "${java.home}/lib/security/cacerts", "read"; + permission java.io.FilePermission "${java.home}/lib/security/jssecacerts", "read"; + permission java.lang.RuntimePermission "accessClassInPackage.sun.security.internal.spec"; + permission java.lang.RuntimePermission "closeClassLoader"; + permission java.lang.RuntimePermission "getProtectionDomain"; + permission java.security.SecurityPermission "getProperty.jdk.certpath.disabledAlgorithms"; + permission java.security.SecurityPermission "getProperty.jdk.tls.disabledAlgorithms"; + permission java.security.SecurityPermission "getProperty.jdk.tls.server.defaultDHEParameters"; + permission java.security.SecurityPermission "getProperty.keystore.type.compat"; + permission java.security.SecurityPermission "getProperty.org.bouncycastle.*"; + permission java.security.SecurityPermission "putProviderProperty.BC"; + permission java.security.SecurityPermission "removeProvider.SunJCE"; + permission java.security.SecurityPermission "removeProviderProperty.BC"; + permission java.util.PropertyPermission "java.runtime.name", "read"; + permission org.bouncycastle.crypto.CryptoServicesPermission "defaultRandomConfig"; + permission org.bouncycastle.crypto.CryptoServicesPermission "exportPrivateKey"; + permission org.bouncycastle.crypto.CryptoServicesPermission "exportSecretKey"; }; diff --git a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/kdc.conf.template b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/kdc.conf.template index 22909ddf60013..69be28f4548c3 100644 --- a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/kdc.conf.template +++ b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/kdc.conf.template @@ -16,8 +16,8 @@ # under the License. [kdcdefaults] - kdc_listen = 88 - kdc_tcp_listen = 88 + kdc_ports = 88 + kdc_tcp_ports = 88 [realms] ${REALM_NAME} = { @@ -25,8 +25,7 @@ max_life = 12h 0m 0s max_renewable_life = 7d 0h 0m 0s master_key_type = aes256-cts - # remove aes256-cts:normal since unlimited strength policy needs installed for java to use it. - supported_enctypes = aes128-cts:normal des3-hmac-sha1:normal arcfour-hmac:normal des-hmac-sha1:normal des-cbc-md5:normal des-cbc-crc:normal + supported_enctypes = aes256-cts-hmac-sha1-96:normal aes128-cts-hmac-sha1-96:normal } [logging] diff --git a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/krb5.conf.template b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/krb5.conf.template index 207fe939fb7a5..a87c5b50d5cf3 100644 --- a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/krb5.conf.template +++ b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/krb5.conf.template @@ -33,18 +33,15 @@ dns_canonicalize_hostname = false dns_lookup_kdc = false dns_lookup_realm = false - dns_uri_lookup = false forwardable = true ignore_acceptor_hostname = true rdns = false - default_tgs_enctypes = rc4-hmac - default_tkt_enctypes = rc4-hmac - permitted_enctypes = rc4-hmac + default_tgs_enctypes = aes256-cts-hmac-sha1-96 aes128-cts-hmac-sha1-96 + default_tkt_enctypes = aes256-cts-hmac-sha1-96 aes128-cts-hmac-sha1-96 + permitted_enctypes = aes256-cts-hmac-sha1-96 aes128-cts-hmac-sha1-96 # udp_preference_limit = 1 - kdc_timeout = 3000 canonicalize = true - # See please https://seanjmullan.org/blog/2021/09/14/jdk17 (deprecate 3DES and RC4 in Kerberos) - allow_weak_crypto = true + allow_weak_crypto = false [realms] ${REALM_NAME} = { @@ -52,6 +49,8 @@ kdc = 127.0.0.1:${MAPPED_PORT} admin_server = ${KDC_NAME}:749 default_domain = ${BUILD_ZONE} + master_key_type = aes256-cts + supported_enctypes = aes256-cts-hmac-sha1-96:normal aes128-cts-hmac-sha1-96:normal } [domain_realm] diff --git a/test/framework/build.gradle b/test/framework/build.gradle index 84a536fdf62c8..47addd36318a4 100644 --- a/test/framework/build.gradle +++ b/test/framework/build.gradle @@ -49,6 +49,9 @@ dependencies { api "org.mockito:mockito-core:${versions.mockito}" api "net.bytebuddy:byte-buddy:${versions.bytebuddy}" api "org.objenesis:objenesis:${versions.objenesis}" + api "org.bouncycastle:bcprov-jdk18on:${versions.bouncycastle}" + api "org.bouncycastle:bcpkix-jdk18on:${versions.bouncycastle}" + api "org.bouncycastle:bcutil-jdk18on:${versions.bouncycastle}" annotationProcessor "org.apache.logging.log4j:log4j-core:${versions.log4j}" } diff --git a/test/framework/licenses/bcpkix-jdk18on-1.78.jar.sha1 b/test/framework/licenses/bcpkix-jdk18on-1.78.jar.sha1 new file mode 100644 index 0000000000000..385a9d930eede --- /dev/null +++ b/test/framework/licenses/bcpkix-jdk18on-1.78.jar.sha1 @@ -0,0 +1 @@ +dd61bcdb87678451dd42d42e267979bd4b4451a1 \ No newline at end of file diff --git a/test/framework/licenses/bcprov-jdk18on-1.78.jar.sha1 b/test/framework/licenses/bcprov-jdk18on-1.78.jar.sha1 new file mode 100644 index 0000000000000..47fb5fd5e5f5d --- /dev/null +++ b/test/framework/licenses/bcprov-jdk18on-1.78.jar.sha1 @@ -0,0 +1 @@ +619aafb92dc0b4c6cc4cf86c487ca48ee2d67a8e \ No newline at end of file diff --git a/test/framework/licenses/bouncycastle-LICENSE.txt b/test/framework/licenses/bouncycastle-LICENSE.txt new file mode 100644 index 0000000000000..5c7c14696849d --- /dev/null +++ b/test/framework/licenses/bouncycastle-LICENSE.txt @@ -0,0 +1,14 @@ +Copyright (c) 2000 - 2023 The Legion of the Bouncy Castle Inc. (https://www.bouncycastle.org) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the +Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/test/framework/licenses/bouncycastle-NOTICE.txt b/test/framework/licenses/bouncycastle-NOTICE.txt new file mode 100644 index 0000000000000..8b137891791fe --- /dev/null +++ b/test/framework/licenses/bouncycastle-NOTICE.txt @@ -0,0 +1 @@ + diff --git a/test/framework/src/main/java/org/opensearch/test/KeyStoreUtils.java b/test/framework/src/main/java/org/opensearch/test/KeyStoreUtils.java new file mode 100644 index 0000000000000..c90b2b872f8ba --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/test/KeyStoreUtils.java @@ -0,0 +1,68 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test; + +import org.bouncycastle.cert.X509CertificateHolder; +import org.bouncycastle.cert.jcajce.JcaX509CertificateConverter; +import org.bouncycastle.cert.jcajce.JcaX509v1CertificateBuilder; +import org.bouncycastle.operator.jcajce.JcaContentSignerBuilder; + +import javax.security.auth.x500.X500Principal; +import javax.security.auth.x500.X500PrivateCredential; + +import java.math.BigInteger; +import java.security.KeyPair; +import java.security.KeyPairGenerator; +import java.security.KeyStore; +import java.security.cert.X509Certificate; +import java.util.Date; + +public class KeyStoreUtils { + + public static final char[] KEYSTORE_PASSWORD = "keystore_password".toCharArray(); + + public static KeyStore createServerKeyStore() throws Exception { + var serverCred = createCredential(); + var keyStore = KeyStore.getInstance("JKS"); + keyStore.load(null, null); + keyStore.setKeyEntry( + serverCred.getAlias(), + serverCred.getPrivateKey(), + KEYSTORE_PASSWORD, + new X509Certificate[] { serverCred.getCertificate() } + ); + return keyStore; + } + + private static X500PrivateCredential createCredential() throws Exception { + var keyPairGenerator = KeyPairGenerator.getInstance("RSA"); + keyPairGenerator.initialize(2048); + var keyPair = keyPairGenerator.generateKeyPair(); + var rootCert = new JcaX509CertificateConverter().getCertificate(generateCert(keyPair)); + return new X500PrivateCredential(rootCert, keyPair.getPrivate(), "server-ca"); + } + + private static X509CertificateHolder generateCert(KeyPair pair) throws Exception { + var baseTime = System.currentTimeMillis(); + // 10 years in milliseconds + var validityPeriod = 10L * 365 * 24 * 60 * 60 * 1000; + + var certBuilder = new JcaX509v1CertificateBuilder( + new X500Principal("CN=Test CA Certificate"), + BigInteger.valueOf(1), + new Date(baseTime), + new Date(baseTime + validityPeriod), + new X500Principal("CN=Test CA Certificate"), + pair.getPublic() + ); + var signer = new JcaContentSignerBuilder("SHA256withRSA").build(pair.getPrivate()); + return certBuilder.build(signer); + } + +} From ee7fbbd226b2be81128eaafe19aad0a39244368c Mon Sep 17 00:00:00 2001 From: Vinay Krishna Pudyodu Date: Thu, 27 Feb 2025 09:42:10 -0800 Subject: [PATCH 034/550] Implemented computation of segment replication stats at shard level (#17055) * Implemented computation of segment replication stats at shard level The method implemented here computes the segment replication stats at the shard level, instead of relying on the primary shard to compute stats based on reports from its replicas. Signed-off-by: Vinay Krishna Pudyodu * Updated style checks in the test Signed-off-by: Vinay Krishna Pudyodu * Updated changelog Signed-off-by: Vinay Krishna Pudyodu * fixed style issues Signed-off-by: Vinay Krishna Pudyodu * Fix the failing integration test Signed-off-by: Vinay Krishna Pudyodu * Fix stylecheck Signed-off-by: Vinay Krishna Pudyodu * Fixed the comments for the initial revision Signed-off-by: Vinay Krishna Pudyodu * Updated to use System.nanoTime() for lag calculation Signed-off-by: Vinay Krishna Pudyodu * Fixed the integration test for node stats Signed-off-by: Vinay Krishna Pudyodu * Modified the version in the ReplicationCheckpoint for backward compatibility Signed-off-by: Vinay Krishna Pudyodu * Added precomputation logic for the stats calculation Signed-off-by: Vinay Krishna Pudyodu * Removed unwanted lines Signed-off-by: Vinay Krishna Pudyodu * Clean up the maps when index closed Signed-off-by: Vinay Krishna Pudyodu * Added a null check for the indexshard checkpoint Signed-off-by: Vinay Krishna Pudyodu * fix style checks Signed-off-by: Vinay Krishna Pudyodu * Updated version and added bwc for RemoteSegmentMetadata Signed-off-by: Vinay Krishna Pudyodu * Upated the javadoc comments Signed-off-by: Vinay Krishna Pudyodu * Address comments PR Signed-off-by: Vinay Krishna Pudyodu * Removed the latestReceivedCheckpoint map from SegmentReplicationTargetService Signed-off-by: Vinay Krishna Pudyodu * Added granular locks for the concurrency of stats methods Signed-off-by: Vinay Krishna Pudyodu * Style check fixes Signed-off-by: Vinay Krishna Pudyodu * Changes to maintain atomicity Signed-off-by: Vinay Krishna Pudyodu * spotlessApply Signed-off-by: Vinay Krishna Pudyodu * removed querying the remotestore when replication is in progress Signed-off-by: Vinay Krishna Pudyodu * spotlessApply Signed-off-by: Vinay Krishna Pudyodu --------- Signed-off-by: Vinay Krishna Pudyodu --- CHANGELOG.md | 1 + .../opensearch/index/shard/IndexShardIT.java | 5 +- .../SegmentReplicationStatsIT.java | 16 +- .../io/IndexIOStreamHandlerFactory.java | 25 +++ .../io/VersionedCodecStreamWrapper.java | 23 ++- .../org/opensearch/index/IndexModule.java | 10 +- .../org/opensearch/index/IndexService.java | 11 +- .../opensearch/index/ReplicationStats.java | 4 + .../opensearch/index/shard/IndexShard.java | 20 +- .../store/RemoteSegmentStoreDirectory.java | 5 +- .../metadata/RemoteSegmentMetadata.java | 30 ++- .../RemoteSegmentMetadataHandler.java | 9 +- .../RemoteSegmentMetadataHandlerFactory.java | 44 +++++ .../transfer/TranslogTransferManager.java | 3 +- ...ranslogTransferMetadataHandlerFactory.java | 37 ++++ .../opensearch/indices/IndicesService.java | 10 +- .../RemoteStoreReplicationSource.java | 1 - .../replication/SegmentReplicationTarget.java | 5 +- .../SegmentReplicationTargetService.java | 20 +- .../replication/SegmentReplicator.java | 149 ++++++++++++++- .../checkpoint/ReplicationCheckpoint.java | 39 +++- .../main/java/org/opensearch/node/Node.java | 3 +- .../io/VersionedCodecStreamWrapperTests.java | 11 +- .../opensearch/index/IndexModuleTests.java | 3 +- .../index/seqno/ReplicationTrackerTests.java | 15 +- .../RemoteSegmentStoreDirectoryTests.java | 9 +- ...oteSegmentMetadataHandlerFactoryTests.java | 44 +++++ .../RemoteSegmentMetadataHandlerTests.java | 2 +- ...ogTransferMetadataHandlerFactoryTests.java | 42 +++++ .../SegmentReplicationTargetServiceTests.java | 12 +- .../SegmentReplicationTargetTests.java | 15 +- .../replication/SegmentReplicatorTests.java | 174 +++++++++++++++++- .../replication/common/CopyStateTests.java | 3 +- .../index/shard/IndexShardTestCase.java | 7 +- 34 files changed, 706 insertions(+), 101 deletions(-) create mode 100644 server/src/main/java/org/opensearch/common/io/IndexIOStreamHandlerFactory.java create mode 100644 server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandlerFactory.java create mode 100644 server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferMetadataHandlerFactory.java create mode 100644 server/src/test/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandlerFactoryTests.java create mode 100644 server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferMetadataHandlerFactoryTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index e4779231977b9..6aa18ce0064ae 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Introduce a setting to disable download of full cluster state from remote on term mismatch([#16798](https://github.com/opensearch-project/OpenSearch/pull/16798/)) - Added ability to retrieve value from DocValues in a flat_object filed([#16802](https://github.com/opensearch-project/OpenSearch/pull/16802)) - Improve performace of NumericTermAggregation by avoiding unnecessary sorting([#17252](https://github.com/opensearch-project/OpenSearch/pull/17252)) +- Implemented computation of segment replication stats at shard level ([#17055](https://github.com/opensearch-project/OpenSearch/pull/17055)) - [Rule Based Auto-tagging] Add in-memory attribute value store ([#17342](https://github.com/opensearch-project/OpenSearch/pull/17342)) ### Dependencies diff --git a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java index 7fd219a3dd9dc..2d0918ff6e89a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java @@ -114,6 +114,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; import java.util.function.Predicate; import java.util.stream.Stream; @@ -136,6 +137,7 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiLettersOfLength; +import static org.mockito.Mockito.mock; public class IndexShardIT extends OpenSearchSingleNodeTestCase { @@ -716,7 +718,8 @@ public static final IndexShard newIndexShard( null, DefaultRemoteStoreSettings.INSTANCE, false, - IndexShardTestUtils.getFakeDiscoveryNodes(initializingShardRouting) + IndexShardTestUtils.getFakeDiscoveryNodes(initializingShardRouting), + mock(Function.class) ); } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationStatsIT.java index 89aef6f0be1a6..5d69799e32647 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationStatsIT.java @@ -404,19 +404,17 @@ public void testSegmentReplicationNodeAndIndexStats() throws Exception { for (NodeStats nodeStats : nodesStatsResponse.getNodes()) { ReplicationStats replicationStats = nodeStats.getIndices().getSegments().getReplicationStats(); - // primary node - should hold replication statistics + // primary node - do not have any replication statistics if (nodeStats.getNode().getName().equals(primaryNode)) { + assertTrue(replicationStats.getMaxBytesBehind() == 0); + assertTrue(replicationStats.getTotalBytesBehind() == 0); + assertTrue(replicationStats.getMaxReplicationLag() == 0); + } + // replica nodes - should hold replication statistics + if (nodeStats.getNode().getName().equals(replicaNode1) || nodeStats.getNode().getName().equals(replicaNode2)) { assertTrue(replicationStats.getMaxBytesBehind() > 0); assertTrue(replicationStats.getTotalBytesBehind() > 0); assertTrue(replicationStats.getMaxReplicationLag() > 0); - // 2 replicas so total bytes should be double of max - assertEquals(replicationStats.getMaxBytesBehind() * 2, replicationStats.getTotalBytesBehind()); - } - // replica nodes - should hold empty replication statistics - if (nodeStats.getNode().getName().equals(replicaNode1) || nodeStats.getNode().getName().equals(replicaNode2)) { - assertEquals(0, replicationStats.getMaxBytesBehind()); - assertEquals(0, replicationStats.getTotalBytesBehind()); - assertEquals(0, replicationStats.getMaxReplicationLag()); } } // get replication statistics at index level diff --git a/server/src/main/java/org/opensearch/common/io/IndexIOStreamHandlerFactory.java b/server/src/main/java/org/opensearch/common/io/IndexIOStreamHandlerFactory.java new file mode 100644 index 0000000000000..a4ad161d0ced3 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/io/IndexIOStreamHandlerFactory.java @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.io; + +/** + * Interface for factory to provide handler implementation for type {@link T} + * @param The type of content to be read/written to stream + * + * @opensearch.internal + */ +public interface IndexIOStreamHandlerFactory { + + /** + * Implements logic to provide handler based on the stream versions + * @param version stream version + * @return Handler for reading/writing content streams to/from - {@link T} + */ + IndexIOStreamHandler getHandler(int version); +} diff --git a/server/src/main/java/org/opensearch/common/io/VersionedCodecStreamWrapper.java b/server/src/main/java/org/opensearch/common/io/VersionedCodecStreamWrapper.java index 8089d354a2480..b62ae1f1d3956 100644 --- a/server/src/main/java/org/opensearch/common/io/VersionedCodecStreamWrapper.java +++ b/server/src/main/java/org/opensearch/common/io/VersionedCodecStreamWrapper.java @@ -28,18 +28,25 @@ public class VersionedCodecStreamWrapper { private static final Logger logger = LogManager.getLogger(VersionedCodecStreamWrapper.class); - // TODO This can be updated to hold a streamReadWriteHandlerFactory and get relevant handler based on the stream versions - private final IndexIOStreamHandler indexIOStreamHandler; + private final IndexIOStreamHandlerFactory indexIOStreamHandlerFactory; + private final int minVersion; private final int currentVersion; private final String codec; /** - * @param indexIOStreamHandler handler to read/write stream from T + * @param indexIOStreamHandlerFactory factory for providing handler to read/write stream from T + * @param minVersion earliest supported version of the stream * @param currentVersion latest supported version of the stream * @param codec: stream codec */ - public VersionedCodecStreamWrapper(IndexIOStreamHandler indexIOStreamHandler, int currentVersion, String codec) { - this.indexIOStreamHandler = indexIOStreamHandler; + public VersionedCodecStreamWrapper( + IndexIOStreamHandlerFactory indexIOStreamHandlerFactory, + int minVersion, + int currentVersion, + String codec + ) { + this.indexIOStreamHandlerFactory = indexIOStreamHandlerFactory; + this.minVersion = minVersion; this.currentVersion = currentVersion; this.codec = codec; } @@ -87,7 +94,7 @@ public void writeStream(IndexOutput indexOutput, T content) throws IOException { */ private int checkHeader(IndexInput indexInput) throws IOException { // TODO Once versioning strategy is decided we'll add support for min/max supported versions - return CodecUtil.checkHeader(indexInput, this.codec, this.currentVersion, this.currentVersion); + return CodecUtil.checkHeader(indexInput, this.codec, minVersion, this.currentVersion); } /** @@ -120,8 +127,6 @@ private void writeFooter(IndexOutput indexOutput) throws IOException { * @param version stream content version */ private IndexIOStreamHandler getHandlerForVersion(int version) { - // TODO implement factory and pick relevant handler based on version. - // It should also take into account min and max supported versions - return this.indexIOStreamHandler; + return this.indexIOStreamHandlerFactory.getHandler(version); } } diff --git a/server/src/main/java/org/opensearch/index/IndexModule.java b/server/src/main/java/org/opensearch/index/IndexModule.java index 52dd92f31d70b..7016ddb8e59b8 100644 --- a/server/src/main/java/org/opensearch/index/IndexModule.java +++ b/server/src/main/java/org/opensearch/index/IndexModule.java @@ -57,6 +57,7 @@ import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.NodeEnvironment; @@ -652,7 +653,8 @@ public IndexService newIndexService( clusterDefaultRefreshIntervalSupplier, recoverySettings, remoteStoreSettings, - (s) -> {} + (s) -> {}, + shardId -> ReplicationStats.empty() ); } @@ -678,7 +680,8 @@ public IndexService newIndexService( Supplier clusterDefaultRefreshIntervalSupplier, RecoverySettings recoverySettings, RemoteStoreSettings remoteStoreSettings, - Consumer replicator + Consumer replicator, + Function segmentReplicationStatsProvider ) throws IOException { final IndexEventListener eventListener = freeze(); Function> readerWrapperFactory = indexReaderWrapper @@ -740,7 +743,8 @@ public IndexService newIndexService( remoteStoreSettings, fileCache, compositeIndexSettings, - replicator + replicator, + segmentReplicationStatsProvider ); success = true; return indexService; diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index 72d723c7e1199..e265ce3590121 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -197,6 +197,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust private final FileCache fileCache; private final CompositeIndexSettings compositeIndexSettings; private final Consumer replicator; + private final Function segmentReplicationStatsProvider; public IndexService( IndexSettings indexSettings, @@ -235,7 +236,8 @@ public IndexService( RemoteStoreSettings remoteStoreSettings, FileCache fileCache, CompositeIndexSettings compositeIndexSettings, - Consumer replicator + Consumer replicator, + Function segmentReplicationStatsProvider ) { super(indexSettings); this.allowExpensiveQueries = allowExpensiveQueries; @@ -322,6 +324,7 @@ public IndexService( this.compositeIndexSettings = compositeIndexSettings; this.fileCache = fileCache; this.replicator = replicator; + this.segmentReplicationStatsProvider = segmentReplicationStatsProvider; updateFsyncTaskIfNecessary(); } @@ -398,7 +401,8 @@ public IndexService( remoteStoreSettings, null, null, - s -> {} + s -> {}, + (shardId) -> ReplicationStats.empty() ); } @@ -694,7 +698,8 @@ protected void closeInternal() { recoverySettings, remoteStoreSettings, seedRemote, - discoveryNodes + discoveryNodes, + segmentReplicationStatsProvider ); eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created"); eventListener.afterIndexShardCreated(indexShard); diff --git a/server/src/main/java/org/opensearch/index/ReplicationStats.java b/server/src/main/java/org/opensearch/index/ReplicationStats.java index 8987a492e9a90..22628e86d309f 100644 --- a/server/src/main/java/org/opensearch/index/ReplicationStats.java +++ b/server/src/main/java/org/opensearch/index/ReplicationStats.java @@ -42,6 +42,10 @@ public ReplicationStats(StreamInput in) throws IOException { this.maxReplicationLag = in.readVLong(); } + public static ReplicationStats empty() { + return new ReplicationStats(); + } + public ReplicationStats() { } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index df841dac4cf8e..f8ad3fc8cf866 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -361,6 +361,7 @@ Runnable getGlobalCheckpointSyncer() { */ private final ShardMigrationState shardMigrationState; private DiscoveryNodes discoveryNodes; + private final Function segmentReplicationStatsProvider; public IndexShard( final ShardRouting shardRouting, @@ -391,7 +392,8 @@ public IndexShard( final RecoverySettings recoverySettings, final RemoteStoreSettings remoteStoreSettings, boolean seedRemote, - final DiscoveryNodes discoveryNodes + final DiscoveryNodes discoveryNodes, + final Function segmentReplicationStatsProvider ) throws IOException { super(shardRouting.shardId(), indexSettings); assert shardRouting.initializing(); @@ -493,6 +495,7 @@ public boolean shouldCache(Query query) { this.fileDownloader = new RemoteStoreFileDownloader(shardRouting.shardId(), threadPool, recoverySettings); this.shardMigrationState = getShardMigrationState(indexSettings, seedRemote); this.discoveryNodes = discoveryNodes; + this.segmentReplicationStatsProvider = segmentReplicationStatsProvider; } public ThreadPool getThreadPool() { @@ -3233,17 +3236,10 @@ public Set getReplicationStatsForTrackedReplicas() } public ReplicationStats getReplicationStats() { - if (indexSettings.isSegRepEnabledOrRemoteNode() && routingEntry().primary()) { - final Set stats = getReplicationStatsForTrackedReplicas(); - long maxBytesBehind = stats.stream().mapToLong(SegmentReplicationShardStats::getBytesBehindCount).max().orElse(0L); - long totalBytesBehind = stats.stream().mapToLong(SegmentReplicationShardStats::getBytesBehindCount).sum(); - long maxReplicationLag = stats.stream() - .mapToLong(SegmentReplicationShardStats::getCurrentReplicationLagMillis) - .max() - .orElse(0L); - return new ReplicationStats(maxBytesBehind, totalBytesBehind, maxReplicationLag); - } - return new ReplicationStats(); + if (indexSettings.isSegRepEnabledOrRemoteNode() && !routingEntry().primary()) { + return segmentReplicationStatsProvider.apply(shardId); + } + return ReplicationStats.empty(); } /** diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index 46a90da2a18b6..c18902b69d23c 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -38,7 +38,7 @@ import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; import org.opensearch.index.store.lockmanager.RemoteStoreMetadataLockManager; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; -import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadataHandler; +import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadataHandlerFactory; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.node.remotestore.RemoteStorePinnedTimestampService; import org.opensearch.threadpool.ThreadPool; @@ -104,7 +104,8 @@ public final class RemoteSegmentStoreDirectory extends FilterDirectory implement private Map segmentsUploadedToRemoteStore; private static final VersionedCodecStreamWrapper metadataStreamWrapper = new VersionedCodecStreamWrapper<>( - new RemoteSegmentMetadataHandler(), + new RemoteSegmentMetadataHandlerFactory(), + RemoteSegmentMetadata.VERSION_ONE, RemoteSegmentMetadata.CURRENT_VERSION, RemoteSegmentMetadata.METADATA_CODEC ); diff --git a/server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadata.java b/server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadata.java index 41a145273e8ef..463e08918b3f7 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadata.java +++ b/server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadata.java @@ -30,10 +30,15 @@ */ @PublicApi(since = "2.6.0") public class RemoteSegmentMetadata { + + public static final int VERSION_ONE = 1; + + public static final int VERSION_TWO = 2; + /** * Latest supported version of metadata */ - public static final int CURRENT_VERSION = 1; + public static final int CURRENT_VERSION = VERSION_TWO; /** * Metadata codec */ @@ -106,6 +111,11 @@ public static Map f ); } + /** + * Write always writes with the latest version of the RemoteSegmentMetadata + * @param out file output stream which will store stream content + * @throws IOException in case there is a problem writing the file + */ public void write(IndexOutput out) throws IOException { out.writeMapOfStrings(toMapOfStrings()); writeCheckpointToIndexOutput(replicationCheckpoint, out); @@ -113,11 +123,18 @@ public void write(IndexOutput out) throws IOException { out.writeBytes(segmentInfosBytes, segmentInfosBytes.length); } - public static RemoteSegmentMetadata read(IndexInput indexInput) throws IOException { + /** + * Read can happen in the upgraded version of replica which needs to support all versions of RemoteSegmentMetadata + * @param indexInput file input stream + * @param version version of the RemoteSegmentMetadata + * @return {@code RemoteSegmentMetadata} + * @throws IOException in case there is a problem reading from the file input stream + */ + public static RemoteSegmentMetadata read(IndexInput indexInput, int version) throws IOException { Map metadata = indexInput.readMapOfStrings(); final Map uploadedSegmentMetadataMap = RemoteSegmentMetadata .fromMapOfStrings(metadata); - ReplicationCheckpoint replicationCheckpoint = readCheckpointFromIndexInput(indexInput, uploadedSegmentMetadataMap); + ReplicationCheckpoint replicationCheckpoint = readCheckpointFromIndexInput(indexInput, uploadedSegmentMetadataMap, version); int byteArraySize = (int) indexInput.readLong(); byte[] segmentInfosBytes = new byte[byteArraySize]; indexInput.readBytes(segmentInfosBytes, 0, byteArraySize); @@ -136,11 +153,13 @@ public static void writeCheckpointToIndexOutput(ReplicationCheckpoint replicatio out.writeLong(replicationCheckpoint.getSegmentInfosVersion()); out.writeLong(replicationCheckpoint.getLength()); out.writeString(replicationCheckpoint.getCodec()); + out.writeLong(replicationCheckpoint.getCreatedTimeStamp()); } private static ReplicationCheckpoint readCheckpointFromIndexInput( IndexInput in, - Map uploadedSegmentMetadataMap + Map uploadedSegmentMetadataMap, + int version ) throws IOException { return new ReplicationCheckpoint( new ShardId(new Index(in.readString(), in.readString()), in.readVInt()), @@ -149,7 +168,8 @@ private static ReplicationCheckpoint readCheckpointFromIndexInput( in.readLong(), in.readLong(), in.readString(), - toStoreFileMetadata(uploadedSegmentMetadataMap) + toStoreFileMetadata(uploadedSegmentMetadataMap), + version >= VERSION_TWO ? in.readLong() : 0 ); } diff --git a/server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandler.java b/server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandler.java index 3077d8c76ddae..9fa76b38d2b07 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandler.java +++ b/server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandler.java @@ -20,6 +20,13 @@ * @opensearch.internal */ public class RemoteSegmentMetadataHandler implements IndexIOStreamHandler { + + private final int version; + + public RemoteSegmentMetadataHandler(int version) { + this.version = version; + } + /** * Reads metadata content from metadata file input stream and parsed into {@link RemoteSegmentMetadata} * @param indexInput metadata file input stream with {@link IndexInput#getFilePointer()} pointing to metadata content @@ -27,7 +34,7 @@ public class RemoteSegmentMetadataHandler implements IndexIOStreamHandler { + private final AtomicReference> handlerRef = new AtomicReference<>(); + + @Override + public IndexIOStreamHandler getHandler(int version) { + IndexIOStreamHandler current = handlerRef.get(); + if (current != null) { + return current; + } + + IndexIOStreamHandler newHandler = createHandler(version); + handlerRef.compareAndSet(null, newHandler); + return handlerRef.get(); + } + + private IndexIOStreamHandler createHandler(int version) { + return switch (version) { + case RemoteSegmentMetadata.VERSION_ONE -> new RemoteSegmentMetadataHandler(RemoteSegmentMetadata.VERSION_ONE); + case RemoteSegmentMetadata.VERSION_TWO -> new RemoteSegmentMetadataHandler(RemoteSegmentMetadata.VERSION_TWO); + default -> throw new IllegalArgumentException("Unsupported RemoteSegmentMetadata version: " + version); + }; + } +} diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java index 1e621d6cb7688..d410f473c71f1 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java @@ -75,7 +75,8 @@ public class TranslogTransferManager { private final Logger logger; private static final VersionedCodecStreamWrapper metadataStreamWrapper = new VersionedCodecStreamWrapper<>( - new TranslogTransferMetadataHandler(), + new TranslogTransferMetadataHandlerFactory(), + TranslogTransferMetadata.CURRENT_VERSION, TranslogTransferMetadata.CURRENT_VERSION, TranslogTransferMetadata.METADATA_CODEC ); diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferMetadataHandlerFactory.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferMetadataHandlerFactory.java new file mode 100644 index 0000000000000..8f8e3e816d665 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferMetadataHandlerFactory.java @@ -0,0 +1,37 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog.transfer; + +import org.opensearch.common.io.IndexIOStreamHandler; +import org.opensearch.common.io.IndexIOStreamHandlerFactory; + +import java.util.concurrent.ConcurrentHashMap; + +/** + * {@link TranslogTransferMetadataHandlerFactory} is a factory class to create {@link TranslogTransferMetadataHandler} + * instances based on the {@link TranslogTransferMetadata} version + * + * @opensearch.internal + */ +public class TranslogTransferMetadataHandlerFactory implements IndexIOStreamHandlerFactory { + + private final ConcurrentHashMap> handlers = new ConcurrentHashMap<>(); + + @Override + public IndexIOStreamHandler getHandler(int version) { + return handlers.computeIfAbsent(version, this::createHandler); + } + + private IndexIOStreamHandler createHandler(int version) { + return switch (version) { + case TranslogTransferMetadata.CURRENT_VERSION -> new TranslogTransferMetadataHandler(); + default -> throw new IllegalArgumentException("Unsupported TranslogTransferMetadata version: " + version); + }; + } +} diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index d679240955a07..527c2c23ba6b1 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -105,6 +105,7 @@ import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; import org.opensearch.index.IngestionConsumerFactory; +import org.opensearch.index.ReplicationStats; import org.opensearch.index.analysis.AnalysisRegistry; import org.opensearch.index.cache.request.ShardRequestCache; import org.opensearch.index.compositeindex.CompositeIndexSettings; @@ -365,6 +366,7 @@ public class IndicesService extends AbstractLifecycleComponent private final FileCache fileCache; private final CompositeIndexSettings compositeIndexSettings; private final Consumer replicator; + private final Function segmentReplicationStatsProvider; private volatile int maxSizeInRequestCache; @Override @@ -404,7 +406,8 @@ public IndicesService( RemoteStoreSettings remoteStoreSettings, FileCache fileCache, CompositeIndexSettings compositeIndexSettings, - Consumer replicator + Consumer replicator, + Function segmentReplicationStatsProvider ) { this.settings = settings; this.threadPool = threadPool; @@ -515,6 +518,7 @@ protected void closeInternal() { this.compositeIndexSettings = compositeIndexSettings; this.fileCache = fileCache; this.replicator = replicator; + this.segmentReplicationStatsProvider = segmentReplicationStatsProvider; this.maxSizeInRequestCache = INDICES_REQUEST_CACHE_MAX_SIZE_ALLOWED_IN_CACHE_SETTING.get(clusterService.getSettings()); clusterService.getClusterSettings() .addSettingsUpdateConsumer(INDICES_REQUEST_CACHE_MAX_SIZE_ALLOWED_IN_CACHE_SETTING, this::setMaxSizeInRequestCache); @@ -581,6 +585,7 @@ public IndicesService( remoteStoreSettings, null, null, + null, null ); } @@ -998,7 +1003,8 @@ private synchronized IndexService createIndexService( this::getClusterDefaultRefreshInterval, this.recoverySettings, this.remoteStoreSettings, - replicator + replicator, + segmentReplicationStatsProvider ); } diff --git a/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java b/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java index b06b3e0497cf7..30d9c362b6269 100644 --- a/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java +++ b/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java @@ -110,7 +110,6 @@ public void getSegmentFiles( return; } logger.debug("Downloading segment files from remote store {}", filesToFetch); - if (remoteMetadataExists()) { final Directory storeDirectory = indexShard.store().directory(); final Collection directoryFiles = List.of(storeDirectory.listAll()); diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java index 7131b49a41834..64bd73ebb4611 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java @@ -39,6 +39,7 @@ import java.util.List; import java.util.Locale; import java.util.Set; +import java.util.function.BiConsumer; import java.util.stream.Collectors; /** @@ -161,7 +162,7 @@ public void writeFileChunk( * * @param listener {@link ActionListener} listener. */ - public void startReplication(ActionListener listener) { + public void startReplication(ActionListener listener, BiConsumer checkpointUpdater) { cancellableThreads.setOnCancel((reason, beforeCancelEx) -> { throw new CancellableThreads.ExecutionCancelledException("replication was canceled reason [" + reason + "]"); }); @@ -177,6 +178,8 @@ public void startReplication(ActionListener listener) { source.getCheckpointMetadata(getId(), checkpoint, checkpointInfoListener); checkpointInfoListener.whenComplete(checkpointInfo -> { + checkpointUpdater.accept(checkpointInfo.getCheckpoint(), this.indexShard); + final List filesToFetch = getFiles(checkpointInfo); state.setStage(SegmentReplicationState.Stage.GET_FILES); cancellableThreads.checkForCancel(); diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java index 8fee3f671ecc9..d57f35a5079fc 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java @@ -22,7 +22,6 @@ import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.CancellableThreads; -import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.transport.TransportResponse; @@ -49,7 +48,6 @@ import org.opensearch.transport.TransportService; import java.io.IOException; -import java.util.Map; import java.util.Optional; import java.util.concurrent.atomic.AtomicLong; @@ -58,7 +56,6 @@ /** * Service class that handles incoming checkpoints to initiate replication events on replicas. - * * @opensearch.internal */ public class SegmentReplicationTargetService extends AbstractLifecycleComponent implements ClusterStateListener, IndexEventListener { @@ -70,8 +67,6 @@ public class SegmentReplicationTargetService extends AbstractLifecycleComponent private final SegmentReplicationSourceFactory sourceFactory; - protected final Map latestReceivedCheckpoint = ConcurrentCollections.newConcurrentMap(); - private final IndicesService indicesService; private final ClusterService clusterService; private final TransportService transportService; @@ -216,7 +211,6 @@ public void clusterChanged(ClusterChangedEvent event) { public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard, Settings indexSettings) { if (indexShard != null && indexShard.indexSettings().isSegRepEnabledOrRemoteNode()) { replicator.cancel(indexShard.shardId(), "Shard closing"); - latestReceivedCheckpoint.remove(shardId); } } @@ -227,6 +221,7 @@ public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSh @Override public void afterIndexShardStarted(IndexShard indexShard) { if (indexShard.indexSettings().isSegRepEnabledOrRemoteNode() && indexShard.routingEntry().primary() == false) { + replicator.initializeStats(indexShard.shardId()); processLatestReceivedCheckpoint(indexShard, Thread.currentThread()); } } @@ -241,7 +236,6 @@ public void shardRoutingChanged(IndexShard indexShard, @Nullable ShardRouting ol && oldRouting.primary() == false && newRouting.primary()) { replicator.cancel(indexShard.shardId(), "Shard has been promoted to primary"); - latestReceivedCheckpoint.remove(indexShard.shardId()); } } @@ -468,7 +462,7 @@ private DiscoveryNode getPrimaryNode(ShardRouting primaryShard) { // visible to tests protected boolean processLatestReceivedCheckpoint(IndexShard replicaShard, Thread thread) { - final ReplicationCheckpoint latestPublishedCheckpoint = latestReceivedCheckpoint.get(replicaShard.shardId()); + final ReplicationCheckpoint latestPublishedCheckpoint = replicator.getPrimaryCheckpoint(replicaShard.shardId()); if (latestPublishedCheckpoint != null) { logger.trace( () -> new ParameterizedMessage( @@ -481,7 +475,7 @@ protected boolean processLatestReceivedCheckpoint(IndexShard replicaShard, Threa // if we retry ensure the shard is not in the process of being closed. // it will be removed from indexService's collection before the shard is actually marked as closed. if (indicesService.getShardOrNull(replicaShard.shardId()) != null) { - onNewCheckpoint(latestReceivedCheckpoint.get(replicaShard.shardId()), replicaShard); + onNewCheckpoint(replicator.getPrimaryCheckpoint(replicaShard.shardId()), replicaShard); } }; // Checks if we are using same thread and forks if necessary. @@ -497,13 +491,7 @@ protected boolean processLatestReceivedCheckpoint(IndexShard replicaShard, Threa // visible to tests protected void updateLatestReceivedCheckpoint(ReplicationCheckpoint receivedCheckpoint, IndexShard replicaShard) { - if (latestReceivedCheckpoint.get(replicaShard.shardId()) != null) { - if (receivedCheckpoint.isAheadOf(latestReceivedCheckpoint.get(replicaShard.shardId()))) { - latestReceivedCheckpoint.replace(replicaShard.shardId(), receivedCheckpoint); - } - } else { - latestReceivedCheckpoint.put(replicaShard.shardId(), receivedCheckpoint); - } + replicator.updateReplicationCheckpointStats(receivedCheckpoint, replicaShard); } /** diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicator.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicator.java index ad3bc1933208c..b8a5774c21c1f 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicator.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicator.java @@ -19,8 +19,10 @@ import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.ReplicationStats; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.store.Store; +import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.common.ReplicationCollection; import org.opensearch.indices.replication.common.ReplicationFailedException; @@ -29,6 +31,10 @@ import java.io.IOException; import java.util.Map; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ConcurrentNavigableMap; +import java.util.concurrent.ConcurrentSkipListMap; +import java.util.concurrent.TimeUnit; /** * This class is responsible for managing segment replication events on replicas. @@ -43,8 +49,11 @@ public class SegmentReplicator { private final ReplicationCollection onGoingReplications; private final Map completedReplications = ConcurrentCollections.newConcurrentMap(); - private final ThreadPool threadPool; + private final ConcurrentMap> replicationCheckpointStats = + ConcurrentCollections.newConcurrentMap(); + private final ConcurrentMap primaryCheckpoint = ConcurrentCollections.newConcurrentMap(); + private final ThreadPool threadPool; private final SetOnce sourceFactory; public SegmentReplicator(ThreadPool threadPool) { @@ -102,6 +111,135 @@ SegmentReplicationTarget startReplication( return target; } + /** + * Retrieves segment replication statistics for a specific shard. + * Its computed based on the last and first entry in the replicationCheckpointStats map. + * The Last entry gives the Bytes behind, and the difference in the first and last entry provides the lag. + * + * @param shardId The shardId to get statistics for + * @return ReplicationStats containing bytes behind and replication lag information + */ + public ReplicationStats getSegmentReplicationStats(final ShardId shardId) { + final ConcurrentNavigableMap existingCheckpointStats = replicationCheckpointStats.get(shardId); + if (existingCheckpointStats == null || existingCheckpointStats.isEmpty()) { + return ReplicationStats.empty(); + } + + Map.Entry lowestEntry = existingCheckpointStats.firstEntry(); + Map.Entry highestEntry = existingCheckpointStats.lastEntry(); + + long bytesBehind = highestEntry.getValue().getBytesBehind(); + long replicationLag = bytesBehind > 0L + ? TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - lowestEntry.getValue().getTimestamp()) + : 0; + + return new ReplicationStats(bytesBehind, bytesBehind, replicationLag); + } + + /** + * Updates the latest checkpoint of the primary for the replica shard and then + * calculates checkpoint statistics for the replica shard with the latest checkpoint information. + * This method maintains statistics about how far behind replica shards are from the primary. + * It calculates the bytes behind by comparing the latest-received and current checkpoint in the indexShard, + * and it maintains the bytes behind and timestamp for each segmentInfosVersion of latestCheckPoint. + *

+     * Example:
+     * {
+     *     [replica][0] : {
+     *                       7 : {bytesBehind=0, timestamp=1700220000000}
+     *                       8 : {bytesBehind=100, timestamp=1700330000000}
+     *                       9 : {bytesBehind=150, timestamp=1700440000000}
+     *                    }
+     * }
+     * 
+ * @param latestReceivedCheckPoint The most recent checkpoint from the primary + * @param indexShard The index shard where its updated + */ + public void updateReplicationCheckpointStats(final ReplicationCheckpoint latestReceivedCheckPoint, final IndexShard indexShard) { + ReplicationCheckpoint primaryCheckPoint = this.primaryCheckpoint.get(indexShard.shardId()); + if (primaryCheckPoint == null || latestReceivedCheckPoint.isAheadOf(primaryCheckPoint)) { + this.primaryCheckpoint.put(indexShard.shardId(), latestReceivedCheckPoint); + calculateReplicationCheckpointStats(latestReceivedCheckPoint, indexShard); + } + } + + /** + * Removes checkpoint statistics for all checkpoints up to and including the last successful sync + * and recalculates the bytes behind value for the last replicationCheckpointStats entry. + * This helps maintain only relevant checkpoint information and clean up old data. + * + * @param indexShard The index shard to prune checkpoints for + */ + protected void pruneCheckpointsUpToLastSync(final IndexShard indexShard) { + ReplicationCheckpoint latestCheckpoint = this.primaryCheckpoint.get(indexShard.shardId()); + if (latestCheckpoint != null) { + ReplicationCheckpoint indexReplicationCheckPoint = indexShard.getLatestReplicationCheckpoint(); + long segmentInfoVersion = indexReplicationCheckPoint.getSegmentInfosVersion(); + final ConcurrentNavigableMap existingCheckpointStats = replicationCheckpointStats.get( + indexShard.shardId() + ); + + if (existingCheckpointStats != null && !existingCheckpointStats.isEmpty()) { + existingCheckpointStats.keySet().removeIf(key -> key < segmentInfoVersion); + Map.Entry lastEntry = existingCheckpointStats.lastEntry(); + if (lastEntry != null) { + lastEntry.getValue().setBytesBehind(calculateBytesBehind(latestCheckpoint, indexReplicationCheckPoint)); + } + } + } + } + + private void calculateReplicationCheckpointStats(final ReplicationCheckpoint latestReceivedCheckPoint, final IndexShard indexShard) { + ReplicationCheckpoint indexShardReplicationCheckpoint = indexShard.getLatestReplicationCheckpoint(); + if (indexShardReplicationCheckpoint != null) { + long segmentInfosVersion = latestReceivedCheckPoint.getSegmentInfosVersion(); + long bytesBehind = calculateBytesBehind(latestReceivedCheckPoint, indexShardReplicationCheckpoint); + if (bytesBehind > 0) { + ConcurrentNavigableMap existingCheckpointStats = replicationCheckpointStats.get( + indexShard.shardId() + ); + if (existingCheckpointStats != null) { + existingCheckpointStats.computeIfAbsent( + segmentInfosVersion, + k -> new ReplicationCheckpointStats(bytesBehind, latestReceivedCheckPoint.getCreatedTimeStamp()) + ); + } + } + } + } + + private long calculateBytesBehind(final ReplicationCheckpoint latestCheckPoint, final ReplicationCheckpoint replicationCheckpoint) { + Store.RecoveryDiff diff = Store.segmentReplicationDiff(latestCheckPoint.getMetadataMap(), replicationCheckpoint.getMetadataMap()); + + return diff.missing.stream().mapToLong(StoreFileMetadata::length).sum(); + } + + public void initializeStats(ShardId shardId) { + replicationCheckpointStats.computeIfAbsent(shardId, k -> new ConcurrentSkipListMap<>()); + } + + private static class ReplicationCheckpointStats { + private long bytesBehind; + private final long timestamp; + + public ReplicationCheckpointStats(long bytesBehind, long timestamp) { + this.bytesBehind = bytesBehind; + this.timestamp = timestamp; + } + + public long getBytesBehind() { + return bytesBehind; + } + + public void setBytesBehind(long bytesBehind) { + this.bytesBehind = bytesBehind; + } + + public long getTimestamp() { + return timestamp; + } + } + /** * Runnable implementation to trigger a replication event. */ @@ -138,6 +276,7 @@ private void start(final long replicationId) { @Override public void onResponse(Void o) { logger.debug(() -> new ParameterizedMessage("Finished replicating {} marking as done.", target.description())); + pruneCheckpointsUpToLastSync(target.indexShard()); onGoingReplications.markAsDone(replicationId); if (target.state().getIndex().recoveredFileCount() != 0 && target.state().getIndex().recoveredBytes() != 0) { completedReplications.put(target.shardId(), target.state()); @@ -153,7 +292,7 @@ public void onFailure(Exception e) { } onGoingReplications.fail(replicationId, new ReplicationFailedException("Segment Replication failed", e), false); } - }); + }, this::updateReplicationCheckpointStats); } // pkg-private for integration tests @@ -197,12 +336,18 @@ int size() { void cancel(ShardId shardId, String reason) { onGoingReplications.cancelForShard(shardId, reason); + replicationCheckpointStats.remove(shardId); + primaryCheckpoint.remove(shardId); } SegmentReplicationTarget get(ShardId shardId) { return onGoingReplications.getOngoingReplicationTarget(shardId); } + ReplicationCheckpoint getPrimaryCheckpoint(ShardId shardId) { + return primaryCheckpoint.getOrDefault(shardId, ReplicationCheckpoint.empty(shardId)); + } + ReplicationCollection.ReplicationRef get(long id) { return onGoingReplications.get(id); } diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java index 29410159a4955..8380187a288ba 100644 --- a/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java @@ -38,6 +38,7 @@ public class ReplicationCheckpoint implements Writeable, Comparable metadataMap; + private final long createdTimeStamp; public static ReplicationCheckpoint empty(ShardId shardId) { return empty(shardId, ""); @@ -55,10 +56,11 @@ private ReplicationCheckpoint(ShardId shardId, String codec) { length = 0L; this.codec = codec; this.metadataMap = Collections.emptyMap(); + this.createdTimeStamp = System.nanoTime(); } public ReplicationCheckpoint(ShardId shardId, long primaryTerm, long segmentsGen, long segmentInfosVersion, String codec) { - this(shardId, primaryTerm, segmentsGen, segmentInfosVersion, 0L, codec, Collections.emptyMap()); + this(shardId, primaryTerm, segmentsGen, segmentInfosVersion, 0L, codec, Collections.emptyMap(), System.nanoTime()); } public ReplicationCheckpoint( @@ -77,6 +79,27 @@ public ReplicationCheckpoint( this.length = length; this.codec = codec; this.metadataMap = metadataMap; + this.createdTimeStamp = System.nanoTime(); + } + + public ReplicationCheckpoint( + ShardId shardId, + long primaryTerm, + long segmentsGen, + long segmentInfosVersion, + long length, + String codec, + Map metadataMap, + long createdTimeStamp + ) { + this.shardId = shardId; + this.primaryTerm = primaryTerm; + this.segmentsGen = segmentsGen; + this.segmentInfosVersion = segmentInfosVersion; + this.length = length; + this.codec = codec; + this.metadataMap = metadataMap; + this.createdTimeStamp = createdTimeStamp; } public ReplicationCheckpoint(StreamInput in) throws IOException { @@ -96,6 +119,11 @@ public ReplicationCheckpoint(StreamInput in) throws IOException { } else { this.metadataMap = Collections.emptyMap(); } + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + this.createdTimeStamp = in.readLong(); + } else { + this.createdTimeStamp = 0; + } } /** @@ -159,6 +187,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_10_0)) { out.writeMap(metadataMap, StreamOutput::writeString, (valueOut, fc) -> fc.writeTo(valueOut)); } + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeLong(createdTimeStamp); + } } @Override @@ -197,6 +228,10 @@ public Map getMetadataMap() { return metadataMap; } + public long getCreatedTimeStamp() { + return createdTimeStamp; + } + @Override public String toString() { return "ReplicationCheckpoint{" @@ -212,6 +247,8 @@ public String toString() { + length + ", codec=" + codec + + ", timestamp=" + + createdTimeStamp + '}'; } } diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index e1e5e4a3b455e..222c6e8ba36c4 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -957,7 +957,8 @@ protected Node( remoteStoreSettings, fileCache, compositeIndexSettings, - segmentReplicator::startReplication + segmentReplicator::startReplication, + segmentReplicator::getSegmentReplicationStats ); final IngestService ingestService = new IngestService( diff --git a/server/src/test/java/org/opensearch/common/io/VersionedCodecStreamWrapperTests.java b/server/src/test/java/org/opensearch/common/io/VersionedCodecStreamWrapperTests.java index 938337fc5146e..a88df528bcb86 100644 --- a/server/src/test/java/org/opensearch/common/io/VersionedCodecStreamWrapperTests.java +++ b/server/src/test/java/org/opensearch/common/io/VersionedCodecStreamWrapperTests.java @@ -38,16 +38,19 @@ public class VersionedCodecStreamWrapperTests extends OpenSearchTestCase { private static final int VERSION = 1; IndexIOStreamHandler ioStreamHandler; + IndexIOStreamHandlerFactory ioStreamHandlerFactory; VersionedCodecStreamWrapper versionedCodecStreamWrapper; @Before public void setup() throws IOException { + ioStreamHandlerFactory = mock(IndexIOStreamHandlerFactory.class); ioStreamHandler = mock(IndexIOStreamHandler.class); - versionedCodecStreamWrapper = new VersionedCodecStreamWrapper(ioStreamHandler, VERSION, CODEC); + versionedCodecStreamWrapper = new VersionedCodecStreamWrapper(ioStreamHandlerFactory, VERSION, VERSION, CODEC); } public void testReadStream() throws IOException { DummyObject expectedObject = new DummyObject("test read"); + when(ioStreamHandlerFactory.getHandler(VERSION)).thenReturn(ioStreamHandler); when(ioStreamHandler.readContent(any())).thenReturn(expectedObject); DummyObject readData = versionedCodecStreamWrapper.readStream(createHeaderFooterBytes(CODEC, VERSION, true, true)); assertEquals(readData, expectedObject); @@ -55,6 +58,7 @@ public void testReadStream() throws IOException { public void testReadWithOldVersionThrowsException() throws IOException { DummyObject expectedObject = new DummyObject("test read"); + when(ioStreamHandlerFactory.getHandler(VERSION)).thenReturn(ioStreamHandler); when(ioStreamHandler.readContent(any())).thenReturn(expectedObject); assertThrows( IndexFormatTooOldException.class, @@ -64,6 +68,7 @@ public void testReadWithOldVersionThrowsException() throws IOException { public void testReadWithNewVersionThrowsException() throws IOException { DummyObject expectedObject = new DummyObject("test read"); + when(ioStreamHandlerFactory.getHandler(VERSION)).thenReturn(ioStreamHandler); when(ioStreamHandler.readContent(any())).thenReturn(expectedObject); assertThrows( IndexFormatTooNewException.class, @@ -73,6 +78,7 @@ public void testReadWithNewVersionThrowsException() throws IOException { public void testReadWithUnexpectedCodecThrowsException() throws IOException { DummyObject expectedObject = new DummyObject("test read"); + when(ioStreamHandlerFactory.getHandler(VERSION)).thenReturn(ioStreamHandler); when(ioStreamHandler.readContent(any())).thenReturn(expectedObject); assertThrows( CorruptIndexException.class, @@ -82,6 +88,7 @@ public void testReadWithUnexpectedCodecThrowsException() throws IOException { public void testReadWithNoHeaderThrowsException() throws IOException { DummyObject expectedObject = new DummyObject("test read"); + when(ioStreamHandlerFactory.getHandler(VERSION)).thenReturn(ioStreamHandler); when(ioStreamHandler.readContent(any())).thenReturn(expectedObject); assertThrows( CorruptIndexException.class, @@ -91,6 +98,7 @@ public void testReadWithNoHeaderThrowsException() throws IOException { public void testReadWithNoFooterThrowsException() throws IOException { DummyObject expectedObject = new DummyObject("test read"); + when(ioStreamHandlerFactory.getHandler(VERSION)).thenReturn(ioStreamHandler); when(ioStreamHandler.readContent(any())).thenReturn(expectedObject); assertThrows( CorruptIndexException.class, @@ -102,6 +110,7 @@ public void testWriteStream() throws IOException { DummyObject expectedObject = new DummyObject("test read"); BytesStreamOutput output = new BytesStreamOutput(); OutputStreamIndexOutput indexOutput = new OutputStreamIndexOutput("dummy bytes", "dummy stream", output, 4096); + when(ioStreamHandlerFactory.getHandler(VERSION)).thenReturn(ioStreamHandler); doAnswer(invocation -> { IndexOutput io = invocation.getArgument(0); io.writeString("test write"); diff --git a/server/src/test/java/org/opensearch/index/IndexModuleTests.java b/server/src/test/java/org/opensearch/index/IndexModuleTests.java index bd86d3d396987..90f2b0b21cc8a 100644 --- a/server/src/test/java/org/opensearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/opensearch/index/IndexModuleTests.java @@ -265,7 +265,8 @@ private IndexService newIndexService(IndexModule module) throws IOException { () -> IndexSettings.DEFAULT_REFRESH_INTERVAL, DefaultRecoverySettings.INSTANCE, DefaultRemoteStoreSettings.INSTANCE, - s -> {} + s -> {}, + null ); } diff --git a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java index 233a99cbe4a73..899e80965e4fd 100644 --- a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java @@ -1844,7 +1844,8 @@ public void testSegmentReplicationCheckpointTracking() { 1, 1L, Codec.getDefault().getName(), - Map.of("segment_1", segment_1) + Map.of("segment_1", segment_1), + 0L ); final ReplicationCheckpoint secondCheckpoint = new ReplicationCheckpoint( tracker.shardId(), @@ -1853,7 +1854,8 @@ public void testSegmentReplicationCheckpointTracking() { 2, 51L, Codec.getDefault().getName(), - Map.of("segment_1", segment_1, "segment_2", segment_2) + Map.of("segment_1", segment_1, "segment_2", segment_2), + 0L ); final ReplicationCheckpoint thirdCheckpoint = new ReplicationCheckpoint( tracker.shardId(), @@ -1862,7 +1864,8 @@ public void testSegmentReplicationCheckpointTracking() { 3, 151L, Codec.getDefault().getName(), - Map.of("segment_1", segment_1, "segment_2", segment_2, "segment_3", segment_3) + Map.of("segment_1", segment_1, "segment_2", segment_2, "segment_3", segment_3), + 0L ); tracker.setLatestReplicationCheckpoint(initialCheckpoint); @@ -1974,7 +1977,8 @@ public void testSegmentReplicationCheckpointForRelocatingPrimary() { 1, 5L, Codec.getDefault().getName(), - Map.of("segment_1", segment_1) + Map.of("segment_1", segment_1), + 0L ); tracker.setLatestReplicationCheckpoint(initialCheckpoint); tracker.startReplicationLagTimers(initialCheckpoint); @@ -2033,7 +2037,8 @@ public void testSegmentReplicationCheckpointTrackingInvalidAllocationIDs() { 1, 1L, Codec.getDefault().getName(), - Collections.emptyMap() + Collections.emptyMap(), + 0L ); tracker.setLatestReplicationCheckpoint(initialCheckpoint); tracker.startReplicationLagTimers(initialCheckpoint); diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java index df3df81361a12..d673eb49be581 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -37,7 +37,7 @@ import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.remote.RemoteStoreUtils; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; -import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadataHandler; +import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadataHandlerFactory; import org.opensearch.test.MockLogAppender; import org.opensearch.test.junit.annotations.TestLogging; import org.opensearch.threadpool.ThreadPool; @@ -696,7 +696,8 @@ public void testUploadMetadataNonEmpty() throws IOException { eq(IOContext.DEFAULT) ); VersionedCodecStreamWrapper streamWrapper = new VersionedCodecStreamWrapper<>( - new RemoteSegmentMetadataHandler(), + new RemoteSegmentMetadataHandlerFactory(), + RemoteSegmentMetadata.CURRENT_VERSION, RemoteSegmentMetadata.CURRENT_VERSION, RemoteSegmentMetadata.METADATA_CODEC ); @@ -840,7 +841,7 @@ public void testHeaderMaxVersionCorruptIndexException() throws IOException { BytesStreamOutput output = new BytesStreamOutput(); OutputStreamIndexOutput indexOutput = new OutputStreamIndexOutput("segment metadata", "metadata output stream", output, 4096); - CodecUtil.writeHeader(indexOutput, RemoteSegmentMetadata.METADATA_CODEC, 2); + CodecUtil.writeHeader(indexOutput, RemoteSegmentMetadata.METADATA_CODEC, 3); indexOutput.writeMapOfStrings(metadata); CodecUtil.writeFooter(indexOutput); indexOutput.close(); @@ -1115,7 +1116,7 @@ public void testSegmentMetadataCurrentVersion() { If author doesn't want to support old metadata files. Then this can be ignored. After taking appropriate action, fix this test by setting the correct version here */ - assertEquals(RemoteSegmentMetadata.CURRENT_VERSION, 1); + assertEquals(RemoteSegmentMetadata.CURRENT_VERSION, 2); } private void indexDocs(int startDocId, int numberOfDocs) throws IOException { diff --git a/server/src/test/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandlerFactoryTests.java b/server/src/test/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandlerFactoryTests.java new file mode 100644 index 0000000000000..6911b84c58e4d --- /dev/null +++ b/server/src/test/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandlerFactoryTests.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store.remote.metadata; + +import org.opensearch.common.io.IndexIOStreamHandler; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +/** + * Unit tests for {@link org.opensearch.index.store.remote.metadata.RemoteSegmentMetadataHandlerFactoryTests}. + */ +public class RemoteSegmentMetadataHandlerFactoryTests extends OpenSearchTestCase { + + private RemoteSegmentMetadataHandlerFactory segmentMetadataHandlerFactory; + + @Before + public void setup() { + segmentMetadataHandlerFactory = new RemoteSegmentMetadataHandlerFactory(); + } + + public void testGetHandlerReturnsBasedOnVersion() { + IndexIOStreamHandler versionOneHandler = segmentMetadataHandlerFactory.getHandler(1); + assertTrue(versionOneHandler instanceof RemoteSegmentMetadataHandler); + IndexIOStreamHandler versionTwoHandler = segmentMetadataHandlerFactory.getHandler(2); + assertTrue(versionTwoHandler instanceof RemoteSegmentMetadataHandler); + } + + public void testGetHandlerWhenCalledMultipleTimesReturnsCachedHandler() { + IndexIOStreamHandler versionTwoHandlerOne = segmentMetadataHandlerFactory.getHandler(2); + IndexIOStreamHandler versionTwoHandlerTwo = segmentMetadataHandlerFactory.getHandler(2); + assertEquals(versionTwoHandlerOne, versionTwoHandlerTwo); + } + + public void testGetHandlerWhenHandlerNotProvidedThrowsException() { + Throwable throwable = assertThrows(IllegalArgumentException.class, () -> { segmentMetadataHandlerFactory.getHandler(3); }); + assertEquals("Unsupported RemoteSegmentMetadata version: 3", throwable.getMessage()); + } +} diff --git a/server/src/test/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandlerTests.java b/server/src/test/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandlerTests.java index f5d54dc790e76..0a668bba28c74 100644 --- a/server/src/test/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandlerTests.java +++ b/server/src/test/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandlerTests.java @@ -44,7 +44,7 @@ public class RemoteSegmentMetadataHandlerTests extends IndexShardTestCase { @Before public void setup() throws IOException { - remoteSegmentMetadataHandler = new RemoteSegmentMetadataHandler(); + remoteSegmentMetadataHandler = new RemoteSegmentMetadataHandler(2); Settings indexSettings = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, org.opensearch.Version.CURRENT) diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferMetadataHandlerFactoryTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferMetadataHandlerFactoryTests.java new file mode 100644 index 0000000000000..767037160980e --- /dev/null +++ b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferMetadataHandlerFactoryTests.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog.transfer; + +import org.opensearch.common.io.IndexIOStreamHandler; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +/** + * Unit tests for {@link org.opensearch.index.translog.transfer.TranslogTransferMetadataHandlerFactoryTests}. + */ +public class TranslogTransferMetadataHandlerFactoryTests extends OpenSearchTestCase { + + private TranslogTransferMetadataHandlerFactory translogTransferMetadataHandlerFactory; + + @Before + public void setup() { + translogTransferMetadataHandlerFactory = new TranslogTransferMetadataHandlerFactory(); + } + + public void testGetHandlerReturnsBasedOnVersion() { + IndexIOStreamHandler versionOneHandler = translogTransferMetadataHandlerFactory.getHandler(1); + assertTrue(versionOneHandler instanceof TranslogTransferMetadataHandler); + } + + public void testGetHandlerWhenCalledMultipleTimesReturnsCachedHandler() { + IndexIOStreamHandler versionTwoHandlerOne = translogTransferMetadataHandlerFactory.getHandler(1); + IndexIOStreamHandler versionTwoHandlerTwo = translogTransferMetadataHandlerFactory.getHandler(1); + assertEquals(versionTwoHandlerOne, versionTwoHandlerTwo); + } + + public void testGetHandlerWhenHandlerNotProvidedThrowsException() { + Throwable throwable = assertThrows(IllegalArgumentException.class, () -> { translogTransferMetadataHandlerFactory.getHandler(2); }); + assertEquals("Unsupported TranslogTransferMetadata version: 2", throwable.getMessage()); + } +} diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java index 1faaa16ce5628..8a47b87b09f30 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -100,8 +100,6 @@ public class SegmentReplicationTargetServiceTests extends IndexShardTestCase { private SegmentReplicationState state; private ReplicationCheckpoint initialCheckpoint; - private ClusterState clusterState; - private static final long TRANSPORT_TIMEOUT = 30000;// 30sec @Override @@ -140,13 +138,14 @@ public void setUp() throws Exception { indicesService = mock(IndicesService.class); ClusterService clusterService = mock(ClusterService.class); - clusterState = mock(ClusterState.class); + ClusterState clusterState = mock(ClusterState.class); RoutingTable mockRoutingTable = mock(RoutingTable.class); when(clusterService.state()).thenReturn(clusterState); when(clusterState.routingTable()).thenReturn(mockRoutingTable); when(mockRoutingTable.shardRoutingTable(any())).thenReturn(primaryShard.getReplicationGroup().getRoutingTable()); when(clusterState.nodes()).thenReturn(DiscoveryNodes.builder().add(localNode).build()); + sut = prepareForReplication(primaryShard, replicaShard, transportService, indicesService, clusterService); initialCheckpoint = primaryShard.getLatestReplicationCheckpoint(); aheadCheckpoint = new ReplicationCheckpoint( @@ -596,13 +595,6 @@ public void testShardRoutingChanged_DoesNothingForDocRepIndex() throws IOExcepti closeShards(shard); } - public void testUpdateLatestReceivedCheckpoint() { - final SegmentReplicationTargetService spy = spy(sut); - sut.updateLatestReceivedCheckpoint(checkpoint, replicaShard); - sut.updateLatestReceivedCheckpoint(aheadCheckpoint, replicaShard); - assertEquals(sut.latestReceivedCheckpoint.get(replicaShard.shardId()), aheadCheckpoint); - } - public void testForceSegmentSyncHandler() throws Exception { ForceSyncRequest forceSyncRequest = new ForceSyncRequest(1L, 1L, replicaShard.shardId()); when(indicesService.getShardOrNull(forceSyncRequest.getShardId())).thenReturn(replicaShard); diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java index 8b4b3aff701b4..52cb39bebd2b7 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java @@ -177,6 +177,9 @@ public void onFailure(Exception e) { logger.error("Unexpected onFailure", e); Assert.fail(); } + }, (ReplicationCheckpoint checkpoint, IndexShard indexShard) -> { + assertEquals(repCheckpoint, checkpoint); + assertEquals(indexShard, spyIndexShard); }); } @@ -230,7 +233,7 @@ public void onFailure(Exception e) { assertEquals(exception, e.getCause().getCause()); segrepTarget.fail(new ReplicationFailedException(e), false); } - }); + }, mock(BiConsumer.class)); } public void testFailureResponse_getSegmentFiles() { @@ -283,7 +286,7 @@ public void onFailure(Exception e) { assertEquals(exception, e.getCause().getCause()); segrepTarget.fail(new ReplicationFailedException(e), false); } - }); + }, mock(BiConsumer.class)); } public void testFailure_finalizeReplication_NonCorruptionException() throws IOException { @@ -330,7 +333,7 @@ public void onFailure(Exception e) { assertEquals(exception, e.getCause()); segrepTarget.fail(new ReplicationFailedException(e), false); } - }); + }, mock(BiConsumer.class)); } public void testFailure_finalizeReplication_IndexFormatException() throws IOException { @@ -376,7 +379,7 @@ public void onFailure(Exception e) { assertEquals(exception, e.getCause()); segrepTarget.fail(new ReplicationFailedException(e), false); } - }); + }, mock(BiConsumer.class)); } public void testFailure_differentSegmentFiles() throws IOException { @@ -429,7 +432,7 @@ public void onFailure(Exception e) { assertTrue(e.getMessage().contains("has local copies of segments that differ from the primary")); segrepTarget.fail(new ReplicationFailedException(e), false); } - }); + }, mock(BiConsumer.class)); } /** @@ -483,7 +486,7 @@ public void onFailure(Exception e) { logger.error("Unexpected onFailure", e); Assert.fail(); } - }); + }, mock(BiConsumer.class)); } /** diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicatorTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicatorTests.java index 81ea16c80dd79..38f1c59bd5b68 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicatorTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicatorTests.java @@ -9,6 +9,8 @@ package org.opensearch.indices.replication; import org.apache.lucene.store.IOContext; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.Version; import org.opensearch.OpenSearchCorruptionException; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; @@ -20,6 +22,8 @@ import org.opensearch.common.lucene.Lucene; import org.opensearch.common.settings.Settings; import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.ReplicationStats; import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.replication.TestReplicationSource; import org.opensearch.index.shard.IndexShard; @@ -35,9 +39,11 @@ import java.io.IOException; import java.io.UncheckedIOException; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; @@ -200,6 +206,173 @@ public void getSegmentFiles( closeShards(primary, replica); } + public void testGetSegmentReplicationStats_WhenNoReplication() { + SegmentReplicator segmentReplicator = new SegmentReplicator(threadPool); + ShardId shardId = new ShardId("index", "uuid", 0); + ReplicationStats replicationStats = segmentReplicator.getSegmentReplicationStats(shardId); + assertEquals(0, replicationStats.maxReplicationLag); + assertEquals(0, replicationStats.totalBytesBehind); + assertEquals(0, replicationStats.maxBytesBehind); + } + + public void testGetSegmentReplicationStats_WhileOnGoingReplicationAndPrimaryRefreshedToNewCheckPoint() { + ShardId shardId = new ShardId("index", "uuid", 0); + ReplicationCheckpoint firstReplicationCheckpoint = ReplicationCheckpoint.empty(shardId); + + StoreFileMetadata storeFileMetadata1 = new StoreFileMetadata("test-1", 500, "1", Version.LATEST, new BytesRef(500)); + StoreFileMetadata storeFileMetadata2 = new StoreFileMetadata("test-2", 500, "1", Version.LATEST, new BytesRef(500)); + Map stringStoreFileMetadataMapOne = new HashMap<>(); + stringStoreFileMetadataMapOne.put("test-1", storeFileMetadata1); + stringStoreFileMetadataMapOne.put("test-2", storeFileMetadata2); + ReplicationCheckpoint secondReplicationCheckpoint = new ReplicationCheckpoint( + shardId, + 2, + 2, + 2, + 1000, + "", + stringStoreFileMetadataMapOne, + System.nanoTime() - TimeUnit.MINUTES.toNanos(1) + ); + + IndexShard replicaShard = mock(IndexShard.class); + when(replicaShard.shardId()).thenReturn(shardId); + when(replicaShard.getLatestReplicationCheckpoint()).thenReturn(firstReplicationCheckpoint) + .thenReturn(firstReplicationCheckpoint) + .thenReturn(firstReplicationCheckpoint) + .thenReturn(secondReplicationCheckpoint); + + SegmentReplicator segmentReplicator = new SegmentReplicator(threadPool); + segmentReplicator.initializeStats(shardId); + segmentReplicator.updateReplicationCheckpointStats(firstReplicationCheckpoint, replicaShard); + segmentReplicator.updateReplicationCheckpointStats(secondReplicationCheckpoint, replicaShard); + + Map stringStoreFileMetadataMapTwo = new HashMap<>(); + StoreFileMetadata storeFileMetadata3 = new StoreFileMetadata("test-3", 200, "1", Version.LATEST, new BytesRef(200)); + stringStoreFileMetadataMapTwo.put("test-1", storeFileMetadata1); + stringStoreFileMetadataMapTwo.put("test-2", storeFileMetadata2); + stringStoreFileMetadataMapTwo.put("test-3", storeFileMetadata3); + ReplicationCheckpoint thirdReplicationCheckpoint = new ReplicationCheckpoint( + shardId, + 3, + 3, + 3, + 200, + "", + stringStoreFileMetadataMapTwo, + System.nanoTime() - TimeUnit.MINUTES.toNanos(1) + ); + + segmentReplicator.updateReplicationCheckpointStats(thirdReplicationCheckpoint, replicaShard); + + ReplicationStats replicationStatsFirst = segmentReplicator.getSegmentReplicationStats(shardId); + assertEquals(1200, replicationStatsFirst.totalBytesBehind); + assertEquals(1200, replicationStatsFirst.maxBytesBehind); + assertTrue(replicationStatsFirst.maxReplicationLag > 0); + + segmentReplicator.pruneCheckpointsUpToLastSync(replicaShard); + + ReplicationStats replicationStatsSecond = segmentReplicator.getSegmentReplicationStats(shardId); + assertEquals(200, replicationStatsSecond.totalBytesBehind); + assertEquals(200, replicationStatsSecond.maxBytesBehind); + assertTrue(replicationStatsSecond.maxReplicationLag > 0); + } + + public void testGetSegmentReplicationStats_WhenCheckPointReceivedOutOfOrder() { + ShardId shardId = new ShardId("index", "uuid", 0); + ReplicationCheckpoint firstReplicationCheckpoint = ReplicationCheckpoint.empty(shardId); + + StoreFileMetadata storeFileMetadata1 = new StoreFileMetadata("test-1", 500, "1", Version.LATEST, new BytesRef(500)); + StoreFileMetadata storeFileMetadata2 = new StoreFileMetadata("test-2", 500, "1", Version.LATEST, new BytesRef(500)); + Map stringStoreFileMetadataMapOne = new HashMap<>(); + stringStoreFileMetadataMapOne.put("test-1", storeFileMetadata1); + stringStoreFileMetadataMapOne.put("test-2", storeFileMetadata2); + ReplicationCheckpoint secondReplicationCheckpoint = new ReplicationCheckpoint( + shardId, + 2, + 2, + 2, + 1000, + "", + stringStoreFileMetadataMapOne, + System.nanoTime() - TimeUnit.MINUTES.toNanos(1) + ); + + IndexShard replicaShard = mock(IndexShard.class); + when(replicaShard.shardId()).thenReturn(shardId); + when(replicaShard.getLatestReplicationCheckpoint()).thenReturn(firstReplicationCheckpoint) + .thenReturn(firstReplicationCheckpoint) + .thenReturn(firstReplicationCheckpoint); + + SegmentReplicator segmentReplicator = new SegmentReplicator(threadPool); + segmentReplicator.initializeStats(shardId); + segmentReplicator.updateReplicationCheckpointStats(firstReplicationCheckpoint, replicaShard); + + Map stringStoreFileMetadataMapTwo = new HashMap<>(); + StoreFileMetadata storeFileMetadata3 = new StoreFileMetadata("test-3", 200, "1", Version.LATEST, new BytesRef(200)); + stringStoreFileMetadataMapTwo.put("test-1", storeFileMetadata1); + stringStoreFileMetadataMapTwo.put("test-2", storeFileMetadata2); + stringStoreFileMetadataMapTwo.put("test-3", storeFileMetadata3); + ReplicationCheckpoint thirdReplicationCheckpoint = new ReplicationCheckpoint( + shardId, + 3, + 3, + 3, + 200, + "", + stringStoreFileMetadataMapTwo, + System.nanoTime() - TimeUnit.MINUTES.toNanos(1) + ); + + segmentReplicator.updateReplicationCheckpointStats(thirdReplicationCheckpoint, replicaShard); + + ReplicationStats replicationStatsFirst = segmentReplicator.getSegmentReplicationStats(shardId); + assertEquals(1200, replicationStatsFirst.totalBytesBehind); + assertEquals(1200, replicationStatsFirst.maxBytesBehind); + assertTrue(replicationStatsFirst.maxReplicationLag > 0); + + segmentReplicator.updateReplicationCheckpointStats(secondReplicationCheckpoint, replicaShard); + ReplicationStats replicationStatsSecond = segmentReplicator.getSegmentReplicationStats(shardId); + assertEquals(1200, replicationStatsSecond.totalBytesBehind); + assertEquals(1200, replicationStatsSecond.maxBytesBehind); + assertTrue(replicationStatsSecond.maxReplicationLag > 0); + } + + public void testUpdateReplicationCheckpointStatsIgnoresWhenOutOfOrderCheckPointReceived() { + ShardId shardId = new ShardId("index", "uuid", 0); + IndexShard replicaShard = mock(IndexShard.class); + when(replicaShard.shardId()).thenReturn(shardId); + + SegmentReplicator segmentReplicator = new SegmentReplicator(threadPool); + ReplicationCheckpoint replicationCheckpoint = new ReplicationCheckpoint( + shardId, + 2, + 2, + 2, + 1000, + "", + new HashMap<>(), + System.nanoTime() - TimeUnit.MINUTES.toNanos(1) + ); + segmentReplicator.updateReplicationCheckpointStats(replicationCheckpoint, replicaShard); + + assertEquals(replicationCheckpoint, segmentReplicator.getPrimaryCheckpoint(shardId)); + + ReplicationCheckpoint oldReplicationCheckpoint = new ReplicationCheckpoint( + shardId, + 1, + 1, + 1, + 500, + "", + new HashMap<>(), + System.nanoTime() - TimeUnit.MINUTES.toNanos(1) + ); + segmentReplicator.updateReplicationCheckpointStats(oldReplicationCheckpoint, replicaShard); + + assertEquals(replicationCheckpoint, segmentReplicator.getPrimaryCheckpoint(shardId)); + } + protected void resolveCheckpointListener(ActionListener listener, IndexShard primary) { try (final CopyState copyState = new CopyState(primary)) { listener.onResponse( @@ -209,5 +382,4 @@ protected void resolveCheckpointListener(ActionListener throw new UncheckedIOException(e); } } - } diff --git a/server/src/test/java/org/opensearch/indices/replication/common/CopyStateTests.java b/server/src/test/java/org/opensearch/indices/replication/common/CopyStateTests.java index 0b30486038e3a..3b7c5560f89fb 100644 --- a/server/src/test/java/org/opensearch/indices/replication/common/CopyStateTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/common/CopyStateTests.java @@ -81,7 +81,8 @@ public static IndexShard createMockIndexShard() throws IOException { 0L, 0L, Codec.getDefault().getName(), - SI_SNAPSHOT.asMap() + SI_SNAPSHOT.asMap(), + 0L ); final Tuple, ReplicationCheckpoint> gatedCloseableReplicationCheckpointTuple = new Tuple<>( new GatedCloseable<>(testSegmentInfos, () -> {}), diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 655a9eb7d5d38..bdd4b40e398d5 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -86,6 +86,7 @@ import org.opensearch.env.TestEnvironment; import org.opensearch.index.IndexSettings; import org.opensearch.index.MapperTestUtils; +import org.opensearch.index.ReplicationStats; import org.opensearch.index.VersionType; import org.opensearch.index.cache.IndexCache; import org.opensearch.index.cache.query.DisabledQueryCache; @@ -688,6 +689,9 @@ protected IndexShard newShard( } return new InternalTranslogFactory(); }; + // This is fine since we are not testing the node stats now + Function mockReplicationStatsProvider = mock(Function.class); + when(mockReplicationStatsProvider.apply(any())).thenReturn(new ReplicationStats(800, 800, 500)); indexShard = new IndexShard( routing, indexSettings, @@ -717,7 +721,8 @@ protected IndexShard newShard( DefaultRecoverySettings.INSTANCE, DefaultRemoteStoreSettings.INSTANCE, false, - discoveryNodes + discoveryNodes, + mockReplicationStatsProvider ); indexShard.addShardFailureCallback(DEFAULT_SHARD_FAILURE_HANDLER); if (remoteStoreStatsTrackerFactory != null) { From 415abb91dbb39245690faf349b2cf71e6f65dca0 Mon Sep 17 00:00:00 2001 From: Varun Bharadwaj Date: Thu, 27 Feb 2025 14:45:19 -0800 Subject: [PATCH 035/550] [Pull-based Ingestion] Support segment replication for pull-based ingestion (#17359) --- .../plugin/kafka/IngestFromKafkaIT.java | 212 ++--- .../plugin/kafka/KafkaIngestionBaseIT.java | 111 +++ .../plugin/kafka/RemoteStoreKafkaIT.java | 125 +++ ...ava => TestContainerThreadLeakFilter.java} | 7 +- .../index/engine/IngestionEngine.java | 843 ++---------------- .../index/engine/InternalEngine.java | 74 +- .../translog/InternalTranslogManager.java | 7 +- .../index/translog/NoOpTranslogManager.java | 60 ++ .../opensearch/index/translog/Translog.java | 2 + .../index/translog/TranslogManager.java | 45 +- .../pollingingest/IngestionEngineFactory.java | 5 + .../index/engine/IngestionEngineTests.java | 23 +- .../opensearch/test/InternalTestCluster.java | 5 +- 13 files changed, 548 insertions(+), 971 deletions(-) create mode 100644 plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/KafkaIngestionBaseIT.java create mode 100644 plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/RemoteStoreKafkaIT.java rename plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/{TestContainerWatchdogThreadLeakFilter.java => TestContainerThreadLeakFilter.java} (76%) diff --git a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/IngestFromKafkaIT.java b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/IngestFromKafkaIT.java index d6b099c6b24d8..d51569431506a 100644 --- a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/IngestFromKafkaIT.java +++ b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/IngestFromKafkaIT.java @@ -8,12 +8,6 @@ package org.opensearch.plugin.kafka; -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; - -import org.apache.kafka.clients.producer.KafkaProducer; -import org.apache.kafka.clients.producer.Producer; -import org.apache.kafka.clients.producer.ProducerRecord; -import org.apache.kafka.common.serialization.StringSerializer; import org.opensearch.action.admin.cluster.node.info.NodeInfo; import org.opensearch.action.admin.cluster.node.info.NodesInfoRequest; import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; @@ -22,40 +16,24 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.index.query.RangeQueryBuilder; -import org.opensearch.plugins.Plugin; import org.opensearch.plugins.PluginInfo; import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.Assert; -import java.util.Arrays; -import java.util.Collection; import java.util.List; -import java.util.Properties; import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.Stream; -import org.testcontainers.containers.KafkaContainer; -import org.testcontainers.utility.DockerImageName; - import static org.hamcrest.Matchers.is; import static org.awaitility.Awaitility.await; /** * Integration test for Kafka ingestion */ -@ThreadLeakFilters(filters = TestContainerWatchdogThreadLeakFilter.class) -public class IngestFromKafkaIT extends OpenSearchIntegTestCase { - static final String topicName = "test"; - - private KafkaContainer kafka; - - @Override - protected Collection> nodePlugins() { - return Arrays.asList(KafkaPlugin.class); - } - +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class IngestFromKafkaIT extends KafkaIngestionBaseIT { /** * test ingestion-kafka-plugin is installed */ @@ -75,128 +53,86 @@ public void testPluginsAreInstalled() { } public void testKafkaIngestion() { - try { - setupKafka(); - // create an index with ingestion source from kafka - createIndex( - "test", - Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put("ingestion_source.type", "kafka") - .put("ingestion_source.pointer.init.reset", "earliest") - .put("ingestion_source.param.topic", "test") - .put("ingestion_source.param.bootstrap_servers", kafka.getBootstrapServers()) - .build(), - "{\"properties\":{\"name\":{\"type\": \"text\"},\"age\":{\"type\": \"integer\"}}}}" - ); + produceData("1", "name1", "24"); + produceData("2", "name2", "20"); + + createIndex( + "test", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("ingestion_source.type", "kafka") + .put("ingestion_source.pointer.init.reset", "earliest") + .put("ingestion_source.param.topic", "test") + .put("ingestion_source.param.bootstrap_servers", kafka.getBootstrapServers()) + .put("index.replication.type", "SEGMENT") + .build(), + "{\"properties\":{\"name\":{\"type\": \"text\"},\"age\":{\"type\": \"integer\"}}}}" + ); - RangeQueryBuilder query = new RangeQueryBuilder("age").gte(21); - await().atMost(10, TimeUnit.SECONDS).untilAsserted(() -> { - refresh("test"); - SearchResponse response = client().prepareSearch("test").setQuery(query).get(); - assertThat(response.getHits().getTotalHits().value(), is(1L)); - }); - } finally { - stopKafka(); - } + RangeQueryBuilder query = new RangeQueryBuilder("age").gte(21); + await().atMost(10, TimeUnit.SECONDS).untilAsserted(() -> { + refresh("test"); + SearchResponse response = client().prepareSearch("test").setQuery(query).get(); + assertThat(response.getHits().getTotalHits().value(), is(1L)); + }); } public void testKafkaIngestion_RewindByTimeStamp() { - try { - setupKafka(); - // create an index with ingestion source from kafka - createIndex( - "test_rewind_by_timestamp", - Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put("ingestion_source.type", "kafka") - .put("ingestion_source.pointer.init.reset", "rewind_by_timestamp") - // 1739459500000 is the timestamp of the first message - // 1739459800000 is the timestamp of the second message - // by resetting to 1739459600000, only the second message will be ingested - .put("ingestion_source.pointer.init.reset.value", "1739459600000") - .put("ingestion_source.param.topic", "test") - .put("ingestion_source.param.bootstrap_servers", kafka.getBootstrapServers()) - .put("ingestion_source.param.auto.offset.reset", "latest") - .build(), - "{\"properties\":{\"name\":{\"type\": \"text\"},\"age\":{\"type\": \"integer\"}}}}" - ); + produceData("1", "name1", "24", 1739459500000L); + produceData("2", "name2", "20", 1739459800000L); + + // create an index with ingestion source from kafka + createIndex( + "test_rewind_by_timestamp", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("ingestion_source.type", "kafka") + .put("ingestion_source.pointer.init.reset", "rewind_by_timestamp") + // 1739459500000 is the timestamp of the first message + // 1739459800000 is the timestamp of the second message + // by resetting to 1739459600000, only the second message will be ingested + .put("ingestion_source.pointer.init.reset.value", "1739459600000") + .put("ingestion_source.param.topic", "test") + .put("ingestion_source.param.bootstrap_servers", kafka.getBootstrapServers()) + .put("ingestion_source.param.auto.offset.reset", "latest") + .build(), + "{\"properties\":{\"name\":{\"type\": \"text\"},\"age\":{\"type\": \"integer\"}}}}" + ); - RangeQueryBuilder query = new RangeQueryBuilder("age").gte(0); - await().atMost(10, TimeUnit.SECONDS).untilAsserted(() -> { - refresh("test_rewind_by_timestamp"); - SearchResponse response = client().prepareSearch("test_rewind_by_timestamp").setQuery(query).get(); - assertThat(response.getHits().getTotalHits().value(), is(1L)); - }); - } finally { - stopKafka(); - } + RangeQueryBuilder query = new RangeQueryBuilder("age").gte(0); + await().atMost(10, TimeUnit.SECONDS).untilAsserted(() -> { + refresh("test_rewind_by_timestamp"); + SearchResponse response = client().prepareSearch("test_rewind_by_timestamp").setQuery(query).get(); + assertThat(response.getHits().getTotalHits().value(), is(1L)); + }); } public void testKafkaIngestion_RewindByOffset() { - try { - setupKafka(); - // create an index with ingestion source from kafka - createIndex( - "test_rewind_by_offset", - Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put("ingestion_source.type", "kafka") - .put("ingestion_source.pointer.init.reset", "rewind_by_offset") - .put("ingestion_source.pointer.init.reset.value", "1") - .put("ingestion_source.param.topic", "test") - .put("ingestion_source.param.bootstrap_servers", kafka.getBootstrapServers()) - .put("ingestion_source.param.auto.offset.reset", "latest") - .build(), - "{\"properties\":{\"name\":{\"type\": \"text\"},\"age\":{\"type\": \"integer\"}}}}" - ); - - RangeQueryBuilder query = new RangeQueryBuilder("age").gte(0); - await().atMost(10, TimeUnit.SECONDS).untilAsserted(() -> { - refresh("test_rewind_by_offset"); - SearchResponse response = client().prepareSearch("test_rewind_by_offset").setQuery(query).get(); - assertThat(response.getHits().getTotalHits().value(), is(1L)); - }); - } finally { - stopKafka(); - } - } - - private void setupKafka() { - kafka = new KafkaContainer(DockerImageName.parse("confluentinc/cp-kafka:6.2.1")) - // disable topic auto creation - .withEnv("KAFKA_AUTO_CREATE_TOPICS_ENABLE", "false"); - kafka.start(); - prepareKafkaData(); - } - - private void stopKafka() { - if (kafka != null) { - kafka.stop(); - } - } - - private void prepareKafkaData() { - String boostrapServers = kafka.getBootstrapServers(); - KafkaUtils.createTopic(topicName, 1, boostrapServers); - Properties props = new Properties(); - props.put("bootstrap.servers", kafka.getBootstrapServers()); - Producer producer = new KafkaProducer<>(props, new StringSerializer(), new StringSerializer()); - producer.send( - new ProducerRecord<>(topicName, null, 1739459500000L, "null", "{\"_id\":\"1\",\"_source\":{\"name\":\"bob\", \"age\": 24}}") - ); - producer.send( - new ProducerRecord<>( - topicName, - null, - 1739459800000L, - "null", - "{\"_id\":\"2\", \"_op_type:\":\"index\",\"_source\":{\"name\":\"alice\", \"age\": 20}}" - ) + produceData("1", "name1", "24"); + produceData("2", "name2", "20"); + // create an index with ingestion source from kafka + createIndex( + "test_rewind_by_offset", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("ingestion_source.type", "kafka") + .put("ingestion_source.pointer.init.reset", "rewind_by_offset") + .put("ingestion_source.pointer.init.reset.value", "1") + .put("ingestion_source.param.topic", "test") + .put("ingestion_source.param.bootstrap_servers", kafka.getBootstrapServers()) + .put("ingestion_source.param.auto.offset.reset", "latest") + .build(), + "{\"properties\":{\"name\":{\"type\": \"text\"},\"age\":{\"type\": \"integer\"}}}}" ); - producer.close(); + + RangeQueryBuilder query = new RangeQueryBuilder("age").gte(0); + await().atMost(10, TimeUnit.SECONDS).untilAsserted(() -> { + refresh("test_rewind_by_offset"); + SearchResponse response = client().prepareSearch("test_rewind_by_offset").setQuery(query).get(); + assertThat(response.getHits().getTotalHits().value(), is(1L)); + }); } } diff --git a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/KafkaIngestionBaseIT.java b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/KafkaIngestionBaseIT.java new file mode 100644 index 0000000000000..087bc9786872f --- /dev/null +++ b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/KafkaIngestionBaseIT.java @@ -0,0 +1,111 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.kafka; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.apache.kafka.clients.producer.KafkaProducer; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.common.serialization.StringSerializer; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.After; +import org.junit.Before; + +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Locale; +import java.util.Properties; +import java.util.concurrent.TimeUnit; + +import org.testcontainers.containers.KafkaContainer; +import org.testcontainers.utility.DockerImageName; + +/** + * Base test class for Kafka ingestion tests + */ +@ThreadLeakFilters(filters = TestContainerThreadLeakFilter.class) +public class KafkaIngestionBaseIT extends OpenSearchIntegTestCase { + static final String topicName = "test"; + static final String indexName = "testindex"; + static final String mapping = "{\"properties\":{\"name\":{\"type\": \"text\"},\"age\":{\"type\": \"integer\"}}}}"; + static final long defaultMessageTimestamp = 1739459500000L; + + protected KafkaContainer kafka; + protected Producer producer; + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(KafkaPlugin.class); + } + + @Before + private void setup() { + setupKafka(); + } + + @After + private void cleanup() { + stopKafka(); + } + + private void setupKafka() { + kafka = new KafkaContainer(DockerImageName.parse("confluentinc/cp-kafka:6.2.1")) + // disable topic auto creation + .withEnv("KAFKA_AUTO_CREATE_TOPICS_ENABLE", "false"); + kafka.start(); + + // setup producer + String boostrapServers = kafka.getBootstrapServers(); + KafkaUtils.createTopic(topicName, 1, boostrapServers); + Properties props = new Properties(); + props.put("bootstrap.servers", kafka.getBootstrapServers()); + producer = new KafkaProducer<>(props, new StringSerializer(), new StringSerializer()); + } + + private void stopKafka() { + if (producer != null) { + producer.close(); + } + + if (kafka != null) { + kafka.stop(); + } + } + + protected void produceData(String id, String name, String age) { + produceData(id, name, age, defaultMessageTimestamp); + } + + protected void produceData(String id, String name, String age, long timestamp) { + String payload = String.format( + Locale.ROOT, + "{\"_id\":\"%s\", \"_op_type:\":\"index\",\"_source\":{\"name\":\"%s\", \"age\": %s}}", + id, + name, + age + ); + producer.send(new ProducerRecord<>(topicName, null, timestamp, "null", payload)); + } + + protected void waitForSearchableDocs(long docCount, List nodes) throws Exception { + assertBusy(() -> { + for (String node : nodes) { + final SearchResponse response = client(node).prepareSearch(indexName).setSize(0).setPreference("_only_local").get(); + final long hits = response.getHits().getTotalHits().value(); + if (hits < docCount) { + fail("Expected search hits on node: " + node + " to be at least " + docCount + " but was: " + hits); + } + } + }, 1, TimeUnit.MINUTES); + } +} diff --git a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/RemoteStoreKafkaIT.java b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/RemoteStoreKafkaIT.java new file mode 100644 index 0000000000000..a9f818a9ca825 --- /dev/null +++ b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/RemoteStoreKafkaIT.java @@ -0,0 +1,125 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.kafka; + +import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.routing.allocation.command.AllocateReplicaAllocationCommand; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.query.RangeQueryBuilder; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.nio.file.Path; +import java.util.Arrays; + +import static org.hamcrest.Matchers.is; + +/** + * Integration tests for segment replication with remote store using kafka as ingestion source. + */ +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RemoteStoreKafkaIT extends KafkaIngestionBaseIT { + private static final String REPOSITORY_NAME = "test-remote-store-repo"; + private Path absolutePath; + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + if (absolutePath == null) { + absolutePath = randomRepoPath().toAbsolutePath(); + } + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(remoteStoreClusterSettings(REPOSITORY_NAME, absolutePath)) + .build(); + } + + public void testSegmentReplicationWithRemoteStore() throws Exception { + // Step 1: Create primary and replica nodes. Create index with 1 replica and kafka as ingestion source. + + internalCluster().startClusterManagerOnlyNode(); + final String nodeA = internalCluster().startDataOnlyNode(); + + createIndex( + indexName, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put("ingestion_source.type", "kafka") + .put("ingestion_source.pointer.init.reset", "earliest") + .put("ingestion_source.param.topic", topicName) + .put("ingestion_source.param.bootstrap_servers", kafka.getBootstrapServers()) + .put("index.replication.type", "SEGMENT") + .build(), + mapping + ); + + ensureYellowAndNoInitializingShards(indexName); + final String nodeB = internalCluster().startDataOnlyNode(); + ensureGreen(indexName); + assertTrue(nodeA.equals(primaryNodeName(indexName))); + assertTrue(nodeB.equals(replicaNodeName(indexName))); + verifyRemoteStoreEnabled(nodeA); + verifyRemoteStoreEnabled(nodeB); + + // Step 2: Produce update messages and validate segment replication + + produceData("1", "name1", "24"); + produceData("2", "name2", "20"); + refresh(indexName); + waitForSearchableDocs(2, Arrays.asList(nodeA, nodeB)); + + RangeQueryBuilder query = new RangeQueryBuilder("age").gte(21); + SearchResponse primaryResponse = client(nodeA).prepareSearch(indexName).setQuery(query).setPreference("_only_local").get(); + assertThat(primaryResponse.getHits().getTotalHits().value(), is(1L)); + SearchResponse replicaResponse = client(nodeB).prepareSearch(indexName).setQuery(query).setPreference("_only_local").get(); + assertThat(replicaResponse.getHits().getTotalHits().value(), is(1L)); + + // Step 3: Stop current primary node and validate replica promotion. + + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodeA)); + ensureYellowAndNoInitializingShards(indexName); + assertTrue(nodeB.equals(primaryNodeName(indexName))); + + // Step 4: Verify new primary node is able to index documents + + produceData("3", "name3", "30"); + produceData("4", "name4", "31"); + refresh(indexName); + waitForSearchableDocs(4, Arrays.asList(nodeB)); + + SearchResponse newPrimaryResponse = client(nodeB).prepareSearch(indexName).setQuery(query).setPreference("_only_local").get(); + assertThat(newPrimaryResponse.getHits().getTotalHits().value(), is(3L)); + + // Step 5: Add a new node and assign the replica shard. Verify node recovery works. + + final String nodeC = internalCluster().startDataOnlyNode(); + client().admin().cluster().prepareReroute().add(new AllocateReplicaAllocationCommand(indexName, 0, nodeC)).get(); + ensureGreen(indexName); + assertTrue(nodeC.equals(replicaNodeName(indexName))); + verifyRemoteStoreEnabled(nodeC); + + waitForSearchableDocs(4, Arrays.asList(nodeC)); + SearchResponse newReplicaResponse = client(nodeC).prepareSearch(indexName).setQuery(query).setPreference("_only_local").get(); + assertThat(newReplicaResponse.getHits().getTotalHits().value(), is(3L)); + + // Step 6: Produce new updates and verify segment replication works when primary and replica index are not empty. + produceData("5", "name5", "40"); + produceData("6", "name6", "41"); + refresh(indexName); + waitForSearchableDocs(6, Arrays.asList(nodeB, nodeC)); + } + + private void verifyRemoteStoreEnabled(String node) { + GetSettingsResponse settingsResponse = client(node).admin().indices().prepareGetSettings(indexName).get(); + String remoteStoreEnabled = settingsResponse.getIndexToSettings().get(indexName).get("index.remote_store.enabled"); + assertEquals("Remote store should be enabled", "true", remoteStoreEnabled); + } +} diff --git a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/TestContainerWatchdogThreadLeakFilter.java b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/TestContainerThreadLeakFilter.java similarity index 76% rename from plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/TestContainerWatchdogThreadLeakFilter.java rename to plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/TestContainerThreadLeakFilter.java index 50b88c6233a46..91e2c83ebfa48 100644 --- a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/TestContainerWatchdogThreadLeakFilter.java +++ b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/TestContainerThreadLeakFilter.java @@ -13,11 +13,12 @@ /** * The {@link org.testcontainers.images.TimeLimitedLoggedPullImageResultCallback} instance used by test containers, * for example {@link org.testcontainers.containers.KafkaContainer} creates a watcher daemon thread which is never - * stopped. This filter excludes that thread from the thread leak detection logic. + * stopped. This filter excludes that thread from the thread leak detection logic. It also excludes ryuk resource reaper + * thread which is not closed on time. */ -public final class TestContainerWatchdogThreadLeakFilter implements ThreadFilter { +public final class TestContainerThreadLeakFilter implements ThreadFilter { @Override public boolean reject(Thread t) { - return t.getName().startsWith("testcontainers-pull-watchdog-"); + return t.getName().startsWith("testcontainers-pull-watchdog-") || t.getName().startsWith("testcontainers-ryuk"); } } diff --git a/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java b/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java index b37281b9d1582..72b59ba88b4c2 100644 --- a/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java @@ -8,145 +8,54 @@ package org.opensearch.index.engine; -import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.index.MergePolicy; -import org.apache.lucene.index.SegmentCommitInfo; -import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.ReferenceManager; -import org.apache.lucene.store.AlreadyClosedException; -import org.apache.lucene.store.Directory; -import org.apache.lucene.store.LockObtainFailedException; -import org.apache.lucene.util.InfoStream; import org.opensearch.ExceptionsHelper; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IngestionSource; -import org.opensearch.common.Booleans; -import org.opensearch.common.Nullable; -import org.opensearch.common.SuppressForbidden; -import org.opensearch.common.concurrent.GatedCloseable; -import org.opensearch.common.lucene.LoggerInfoStream; import org.opensearch.common.lucene.Lucene; -import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.concurrent.AbstractRunnable; -import org.opensearch.common.util.concurrent.ReleasableLock; -import org.opensearch.common.util.io.IOUtils; -import org.opensearch.core.common.unit.ByteSizeValue; -import org.opensearch.core.index.shard.ShardId; -import org.opensearch.index.IndexSettings; import org.opensearch.index.IngestionConsumerFactory; import org.opensearch.index.IngestionShardConsumer; import org.opensearch.index.IngestionShardPointer; import org.opensearch.index.mapper.DocumentMapperForType; import org.opensearch.index.mapper.IdFieldMapper; import org.opensearch.index.mapper.ParseContext; -import org.opensearch.index.merge.MergeStats; -import org.opensearch.index.merge.OnGoingMerge; -import org.opensearch.index.seqno.SeqNoStats; -import org.opensearch.index.shard.OpenSearchMergePolicy; +import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.translog.NoOpTranslogManager; import org.opensearch.index.translog.Translog; -import org.opensearch.index.translog.TranslogCorruptedException; +import org.opensearch.index.translog.TranslogDeletionPolicy; import org.opensearch.index.translog.TranslogManager; import org.opensearch.index.translog.TranslogStats; +import org.opensearch.index.translog.listener.CompositeTranslogEventListener; import org.opensearch.indices.pollingingest.DefaultStreamPoller; import org.opensearch.indices.pollingingest.StreamPoller; -import org.opensearch.search.suggest.completion.CompletionStats; -import org.opensearch.threadpool.ThreadPool; -import java.io.Closeable; import java.io.IOException; -import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.Optional; import java.util.Set; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; -import java.util.function.BiConsumer; import java.util.function.BiFunction; -import java.util.function.UnaryOperator; import static org.opensearch.index.translog.Translog.EMPTY_TRANSLOG_SNAPSHOT; /** * IngestionEngine is an engine that ingests data from a stream source. */ -public class IngestionEngine extends Engine { - - private volatile SegmentInfos lastCommittedSegmentInfos; - private final CompletionStatsCache completionStatsCache; - private final IndexWriter indexWriter; - private final OpenSearchReaderManager internalReaderManager; - private final ExternalReaderManager externalReaderManager; - private final Lock flushLock = new ReentrantLock(); - private final ReentrantLock optimizeLock = new ReentrantLock(); - private final OpenSearchConcurrentMergeScheduler mergeScheduler; - private final AtomicBoolean shouldPeriodicallyFlushAfterBigMerge = new AtomicBoolean(false); - private final TranslogManager translogManager; - private final DocumentMapperForType documentMapperForType; - private final IngestionConsumerFactory ingestionConsumerFactory; - private StreamPoller streamPoller; +public class IngestionEngine extends InternalEngine { - /** - * UUID value that is updated every time the engine is force merged. - */ - @Nullable - private volatile String forceMergeUUID; + private StreamPoller streamPoller; + private final IngestionConsumerFactory ingestionConsumerFactory; + private final DocumentMapperForType documentMapperForType; public IngestionEngine(EngineConfig engineConfig, IngestionConsumerFactory ingestionConsumerFactory) { super(engineConfig); - store.incRef(); - boolean success = false; - try { - this.lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); - this.completionStatsCache = new CompletionStatsCache(() -> acquireSearcher("completion_stats")); - IndexMetadata indexMetadata = engineConfig.getIndexSettings().getIndexMetadata(); - assert indexMetadata != null; - mergeScheduler = new EngineMergeScheduler(engineConfig.getShardId(), engineConfig.getIndexSettings()); - indexWriter = createWriter(); - externalReaderManager = createReaderManager(new InternalEngine.RefreshWarmerListener(logger, isClosed, engineConfig)); - internalReaderManager = externalReaderManager.internalReaderManager; - translogManager = new NoOpTranslogManager( - shardId, - readLock, - this::ensureOpen, - new TranslogStats(0, 0, 0, 0, 0), - EMPTY_TRANSLOG_SNAPSHOT - ); - documentMapperForType = engineConfig.getDocumentMapperForTypeSupplier().get(); - this.ingestionConsumerFactory = Objects.requireNonNull(ingestionConsumerFactory); - - success = true; - } catch (IOException | TranslogCorruptedException e) { - throw new EngineCreationFailureException(shardId, "failed to create engine", e); - } finally { - if (!success) { - if (streamPoller != null) { - try { - streamPoller.close(); - } catch (IOException e) { - logger.error("failed to close stream poller", e); - throw new RuntimeException(e); - } - } - if (!isClosed.get()) { - // failure, we need to dec the store reference - store.decRef(); - } - } - } + this.ingestionConsumerFactory = Objects.requireNonNull(ingestionConsumerFactory); + this.documentMapperForType = engineConfig.getDocumentMapperForTypeSupplier().get(); + } /** @@ -169,11 +78,11 @@ public void start() { engineConfig.getShardId().getId() ); logger.info("created ingestion consumer for shard [{}]", engineConfig.getShardId()); - - Map commitData = commitDataAsMap(); + Map commitData = commitDataAsMap(indexWriter); StreamPoller.ResetState resetState = ingestionSource.getPointerInitReset().getType(); IngestionShardPointer startPointer = null; Set persistedPointers = new HashSet<>(); + if (commitData.containsKey(StreamPoller.BATCH_START)) { // try recovering from commit data String batchStartStr = commitData.get(StreamPoller.BATCH_START); @@ -190,23 +99,13 @@ public void start() { String resetValue = ingestionSource.getPointerInitReset().getValue(); streamPoller = new DefaultStreamPoller(startPointer, persistedPointers, ingestionShardConsumer, this, resetState, resetValue); - streamPoller.start(); - } - private IndexWriter createWriter() throws IOException { - try { - final IndexWriterConfig iwc = getIndexWriterConfig(); - return createWriter(store.directory(), iwc); - } catch (LockObtainFailedException ex) { - logger.warn("could not lock IndexWriter", ex); - throw ex; + // Poller is only started on the primary shard. Replica shards will rely on segment replication. + if (!engineConfig.isReadOnlyReplica()) { + streamPoller.start(); } } - public DocumentMapperForType getDocumentMapperForType() { - return documentMapperForType; - } - protected Set fetchPersistedOffsets(DirectoryReader directoryReader, IngestionShardPointer batchStart) throws IOException { final IndexSearcher searcher = new IndexSearcher(directoryReader); @@ -228,195 +127,6 @@ protected Set fetchPersistedOffsets(DirectoryReader direc return result; } - /** - * a copy of ExternalReaderManager from InternalEngine - */ - @SuppressForbidden(reason = "reference counting is required here") - static final class ExternalReaderManager extends ReferenceManager { - private final BiConsumer refreshListener; - private final OpenSearchReaderManager internalReaderManager; - private boolean isWarmedUp; // guarded by refreshLock - - ExternalReaderManager( - OpenSearchReaderManager internalReaderManager, - BiConsumer refreshListener - ) throws IOException { - this.refreshListener = refreshListener; - this.internalReaderManager = internalReaderManager; - this.current = internalReaderManager.acquire(); // steal the reference without warming up - } - - @Override - protected OpenSearchDirectoryReader refreshIfNeeded(OpenSearchDirectoryReader referenceToRefresh) throws IOException { - // we simply run a blocking refresh on the internal reference manager and then steal it's reader - // it's a save operation since we acquire the reader which incs it's reference but then down the road - // steal it by calling incRef on the "stolen" reader - internalReaderManager.maybeRefreshBlocking(); - final OpenSearchDirectoryReader newReader = internalReaderManager.acquire(); - if (isWarmedUp == false || newReader != referenceToRefresh) { - boolean success = false; - try { - refreshListener.accept(newReader, isWarmedUp ? referenceToRefresh : null); - isWarmedUp = true; - success = true; - } finally { - if (success == false) { - internalReaderManager.release(newReader); - } - } - } - // nothing has changed - both ref managers share the same instance so we can use reference equality - if (referenceToRefresh == newReader) { - internalReaderManager.release(newReader); - return null; - } else { - return newReader; // steal the reference - } - } - - @Override - protected boolean tryIncRef(OpenSearchDirectoryReader reference) { - return reference.tryIncRef(); - } - - @Override - protected int getRefCount(OpenSearchDirectoryReader reference) { - return reference.getRefCount(); - } - - @Override - protected void decRef(OpenSearchDirectoryReader reference) throws IOException { - reference.decRef(); - } - } - - private ExternalReaderManager createReaderManager(InternalEngine.RefreshWarmerListener externalRefreshListener) throws EngineException { - boolean success = false; - OpenSearchReaderManager internalReaderManager = null; - try { - try { - final OpenSearchDirectoryReader directoryReader = OpenSearchDirectoryReader.wrap( - DirectoryReader.open(indexWriter), - shardId - ); - internalReaderManager = new OpenSearchReaderManager(directoryReader); - lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); - ExternalReaderManager externalReaderManager = new ExternalReaderManager(internalReaderManager, externalRefreshListener); - success = true; - return externalReaderManager; - } catch (IOException e) { - maybeFailEngine("start", e); - try { - indexWriter.rollback(); - } catch (IOException inner) { // iw is closed below - e.addSuppressed(inner); - } - throw new EngineCreationFailureException(shardId, "failed to open reader on writer", e); - } - } finally { - if (success == false) { // release everything we created on a failure - IOUtils.closeWhileHandlingException(internalReaderManager, indexWriter); - } - } - } - - // pkg-private for testing - IndexWriter createWriter(Directory directory, IndexWriterConfig iwc) throws IOException { - return new IndexWriter(directory, iwc); - } - - private IndexWriterConfig getIndexWriterConfig() { - final IndexWriterConfig iwc = new IndexWriterConfig(engineConfig.getAnalyzer()); - iwc.setCommitOnClose(false); // we by default don't commit on close - iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND); - // with tests.verbose, lucene sets this up: plumb to align with filesystem stream - boolean verbose = false; - try { - verbose = Boolean.parseBoolean(System.getProperty("tests.verbose")); - } catch (Exception ignore) {} - iwc.setInfoStream(verbose ? InfoStream.getDefault() : new LoggerInfoStream(logger)); - iwc.setMergeScheduler(mergeScheduler); - // set merge scheduler - MergePolicy mergePolicy = config().getMergePolicy(); - boolean shuffleForcedMerge = Booleans.parseBoolean(System.getProperty("opensearch.shuffle_forced_merge", Boolean.TRUE.toString())); - if (shuffleForcedMerge) { - // We wrap the merge policy for all indices even though it is mostly useful for time-based indices - // but there should be no overhead for other type of indices so it's simpler than adding a setting - // to enable it. - mergePolicy = new ShuffleForcedMergePolicy(mergePolicy); - } - - if (config().getIndexSettings().isMergeOnFlushEnabled()) { - final long maxFullFlushMergeWaitMillis = config().getIndexSettings().getMaxFullFlushMergeWaitTime().millis(); - if (maxFullFlushMergeWaitMillis > 0) { - iwc.setMaxFullFlushMergeWaitMillis(maxFullFlushMergeWaitMillis); - final Optional> mergeOnFlushPolicy = config().getIndexSettings().getMergeOnFlushPolicy(); - if (mergeOnFlushPolicy.isPresent()) { - mergePolicy = mergeOnFlushPolicy.get().apply(mergePolicy); - } - } - } else { - // Disable merge on refresh - iwc.setMaxFullFlushMergeWaitMillis(0); - } - - iwc.setCheckPendingFlushUpdate(config().getIndexSettings().isCheckPendingFlushEnabled()); - iwc.setMergePolicy(new OpenSearchMergePolicy(mergePolicy)); - iwc.setSimilarity(engineConfig.getSimilarity()); - iwc.setRAMBufferSizeMB(engineConfig.getIndexingBufferSize().getMbFrac()); - iwc.setCodec(engineConfig.getCodec()); - iwc.setUseCompoundFile(engineConfig.useCompoundFile()); - if (config().getIndexSort() != null) { - iwc.setIndexSort(config().getIndexSort()); - } - if (config().getLeafSorter() != null) { - iwc.setLeafSorter(config().getLeafSorter()); // The default segment search order - } - - return new IndexWriterConfig(new StandardAnalyzer()); - } - - @Override - public TranslogManager translogManager() { - // ingestion engine does not have translog - return translogManager; - } - - @Override - protected SegmentInfos getLastCommittedSegmentInfos() { - return lastCommittedSegmentInfos; - } - - @Override - protected SegmentInfos getLatestSegmentInfos() { - throw new UnsupportedOperationException(); - } - - @Override - public String getHistoryUUID() { - return loadHistoryUUID(lastCommittedSegmentInfos.userData); - } - - @Override - public long getWritingBytes() { - return 0; - } - - @Override - public CompletionStats completionStats(String... fieldNamePatterns) { - return completionStatsCache.get(fieldNamePatterns); - } - - @Override - public long getIndexThrottleTimeInMillis() { - return 0; - } - - @Override - public boolean isThrottled() { - return false; - } - @Override public IndexResult index(Index index) throws IOException { assert Objects.equals(index.uid().field(), IdFieldMapper.NAME) : index.uid().field(); @@ -457,16 +167,6 @@ public GetResult get(Get get, BiFunction search return getFromSearcher(get, searcherFactory, SearcherScope.EXTERNAL); } - @Override - protected ReferenceManager getReferenceManager(SearcherScope scope) { - return externalReaderManager; - } - - @Override - public Closeable acquireHistoryRetentionLock() { - throw new UnsupportedOperationException("Not implemented"); - } - @Override public Translog.Snapshot newChangesSnapshot( String source, @@ -475,199 +175,36 @@ public Translog.Snapshot newChangesSnapshot( boolean requiredFullRange, boolean accurateCount ) throws IOException { - throw new UnsupportedOperationException("Not implemented"); - } - - @Override - public int countNumberOfHistoryOperations(String source, long fromSeqNo, long toSeqNumber) throws IOException { - return 0; - } - - @Override - public boolean hasCompleteOperationHistory(String reason, long startingSeqNo) { - return false; - } - - @Override - public long getMinRetainedSeqNo() { - return 0; - } - - @Override - public long getPersistedLocalCheckpoint() { - return 0; - } - - @Override - public long getProcessedLocalCheckpoint() { - return 0; - } - - @Override - public SeqNoStats getSeqNoStats(long globalCheckpoint) { - return null; - } - - @Override - public long getLastSyncedGlobalCheckpoint() { - return 0; - } - - @Override - public long getIndexBufferRAMBytesUsed() { - return 0; - } - - @Override - public List segments(boolean verbose) { - try (ReleasableLock lock = readLock.acquire()) { - Segment[] segmentsArr = getSegmentInfo(lastCommittedSegmentInfos, verbose); - - // fill in the merges flag - Set onGoingMerges = mergeScheduler.onGoingMerges(); - for (OnGoingMerge onGoingMerge : onGoingMerges) { - for (SegmentCommitInfo segmentInfoPerCommit : onGoingMerge.getMergedSegments()) { - for (Segment segment : segmentsArr) { - if (segment.getName().equals(segmentInfoPerCommit.info.name)) { - segment.mergeId = onGoingMerge.getId(); - break; - } - } - } - } - return Arrays.asList(segmentsArr); - } - } - - @Override - public void refresh(String source) throws EngineException { - refresh(source, SearcherScope.EXTERNAL, true); - } - - final boolean refresh(String source, SearcherScope scope, boolean block) throws EngineException { - boolean refreshed; - try { - // refresh does not need to hold readLock as ReferenceManager can handle correctly if the engine is closed in mid-way. - if (store.tryIncRef()) { - // increment the ref just to ensure nobody closes the store during a refresh - try { - // even though we maintain 2 managers we really do the heavy-lifting only once. - // the second refresh will only do the extra work we have to do for warming caches etc. - ReferenceManager referenceManager = getReferenceManager(scope); - // it is intentional that we never refresh both internal / external together - if (block) { - referenceManager.maybeRefreshBlocking(); - refreshed = true; - } else { - refreshed = referenceManager.maybeRefresh(); - } - } finally { - store.decRef(); - } - } else { - refreshed = false; - } - } catch (AlreadyClosedException e) { - failOnTragicEvent(e); - throw e; - } catch (Exception e) { - try { - failEngine("refresh failed source[" + source + "]", e); - } catch (Exception inner) { - e.addSuppressed(inner); - } - throw new RefreshFailedEngineException(shardId, e); - } - // We check for pruning in each delete request, but we also prune here e.g. in case a delete burst comes in and then no more deletes - // for a long time: - maybePruneDeletes(); - // TODO: use OS merge scheduler - mergeScheduler.refreshConfig(); - return refreshed; - } - - @Override - public boolean maybeRefresh(String source) throws EngineException { - return refresh(source, SearcherScope.EXTERNAL, false); - } - - @Override - public void writeIndexingBuffer() throws EngineException { - refresh("write indexing buffer", SearcherScope.INTERNAL, false); - } - - @Override - public boolean shouldPeriodicallyFlush() { - return false; - } - - @Override - public void flush(boolean force, boolean waitIfOngoing) throws EngineException { - ensureOpen(); - if (force && waitIfOngoing == false) { - assert false : "wait_if_ongoing must be true for a force flush: force=" + force + " wait_if_ongoing=" + waitIfOngoing; - throw new IllegalArgumentException( - "wait_if_ongoing must be true for a force flush: force=" + force + " wait_if_ongoing=" + waitIfOngoing - ); - } - try (ReleasableLock lock = readLock.acquire()) { - ensureOpen(); - if (flushLock.tryLock() == false) { - // if we can't get the lock right away we block if needed otherwise barf - if (waitIfOngoing == false) { - return; - } - logger.trace("waiting for in-flight flush to finish"); - flushLock.lock(); - logger.trace("acquired flush lock after blocking"); - } else { - logger.trace("acquired flush lock immediately"); - } - try { - // Only flush if (1) Lucene has uncommitted docs, or (2) forced by caller, - // - // do we need to consider #3 and #4 as in InternalEngine? - // (3) the newly created commit points to a different translog generation (can free translog), - // or (4) the local checkpoint information in the last commit is stale, which slows down future recoveries. - boolean hasUncommittedChanges = indexWriter.hasUncommittedChanges(); - if (hasUncommittedChanges || force) { - logger.trace("starting commit for flush;"); - - // TODO: do we need to close the latest commit as done in InternalEngine? - commitIndexWriter(indexWriter); - - logger.trace("finished commit for flush"); - - // a temporary debugging to investigate test failure - issue#32827. Remove when the issue is resolved - logger.debug("new commit on flush, hasUncommittedChanges:{}, force:{}", hasUncommittedChanges, force); - - // we need to refresh in order to clear older version values - refresh("version_table_flush", SearcherScope.INTERNAL, true); - } - } catch (FlushFailedEngineException ex) { - maybeFailEngine("flush", ex); - throw ex; - } catch (IOException e) { - throw new FlushFailedEngineException(shardId, e); - } finally { - flushLock.unlock(); - } - } + return EMPTY_TRANSLOG_SNAPSHOT; } /** - * Commits the specified index writer. - * - * @param writer the index writer to commit + * This method is a copy of commitIndexWriter method from {@link InternalEngine} with some additions for ingestion + * source. */ - protected void commitIndexWriter(final IndexWriter writer) throws IOException { + @Override + protected void commitIndexWriter(final IndexWriter writer, final String translogUUID) throws IOException { try { + final long localCheckpoint = localCheckpointTracker.getProcessedCheckpoint(); writer.setLiveCommitData(() -> { /* - * The user data captured the min and max range of the stream poller + * The user data captured above (e.g. local checkpoint) contains data that must be evaluated *before* Lucene flushes + * segments, including the local checkpoint amongst other values. The maximum sequence number is different, we never want + * the maximum sequence number to be less than the last sequence number to go into a Lucene commit, otherwise we run the + * risk of re-using a sequence number for two different documents when restoring from this commit point and subsequently + * writing new documents to the index. Since we only know which Lucene documents made it into the final commit after the + * {@link IndexWriter#commit()} call flushes all documents, we defer computation of the maximum sequence number to the time + * of invocation of the commit data iterator (which occurs after all documents have been flushed to Lucene). */ - final Map commitData = new HashMap<>(2); - + final Map commitData = new HashMap<>(7); + commitData.put(Translog.TRANSLOG_UUID_KEY, translogUUID); + commitData.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(localCheckpoint)); + commitData.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(localCheckpointTracker.getMaxSeqNo())); + commitData.put(MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID, Long.toString(maxUnsafeAutoIdTimestamp.get())); + commitData.put(HISTORY_UUID_KEY, historyUUID); + commitData.put(Engine.MIN_RETAINED_SEQNO, Long.toString(softDeletesPolicy.getMinRetainedSeqNo())); + + // ingestion engine needs to record batch start pointer commitData.put(StreamPoller.BATCH_START, streamPoller.getBatchStartPointer().asString()); final String currentForceMergeUUID = forceMergeUUID; if (currentForceMergeUUID != null) { @@ -676,6 +213,7 @@ protected void commitIndexWriter(final IndexWriter writer) throws IOException { logger.trace("committing writer with commit data [{}]", commitData); return commitData.entrySet().iterator(); }); + shouldPeriodicallyFlushAfterBigMerge.set(false); writer.commit(); } catch (final Exception ex) { try { @@ -703,268 +241,6 @@ protected void commitIndexWriter(final IndexWriter writer) throws IOException { } } - @Override - public MergeStats getMergeStats() { - return mergeScheduler.stats(); - } - - @Override - public void onSettingsChanged(TimeValue translogRetentionAge, ByteSizeValue translogRetentionSize, long softDeletesRetentionOps) { - mergeScheduler.refreshConfig(); - // TODO: do we need more? - } - - protected Map commitDataAsMap() { - return commitDataAsMap(indexWriter); - } - - /** - * Gets the commit data from {@link IndexWriter} as a map. - */ - protected static Map commitDataAsMap(final IndexWriter indexWriter) { - final Map commitData = new HashMap<>(8); - for (Map.Entry entry : indexWriter.getLiveCommitData()) { - commitData.put(entry.getKey(), entry.getValue()); - } - return commitData; - } - - @Override - public void forceMerge( - boolean flush, - int maxNumSegments, - boolean onlyExpungeDeletes, - boolean upgrade, - boolean upgradeOnlyAncientSegments, - String forceMergeUUID - ) throws EngineException, IOException { - /* - * We do NOT acquire the readlock here since we are waiting on the merges to finish - * that's fine since the IW.rollback should stop all the threads and trigger an IOException - * causing us to fail the forceMerge - * - * The way we implement upgrades is a bit hackish in the sense that we set an instance - * variable and that this setting will thus apply to the next forced merge that will be run. - * This is ok because (1) this is the only place we call forceMerge, (2) we have a single - * thread for optimize, and the 'optimizeLock' guarding this code, and (3) ConcurrentMergeScheduler - * syncs calls to findForcedMerges. - */ - assert indexWriter.getConfig().getMergePolicy() instanceof OpenSearchMergePolicy : "MergePolicy is " - + indexWriter.getConfig().getMergePolicy().getClass().getName(); - OpenSearchMergePolicy mp = (OpenSearchMergePolicy) indexWriter.getConfig().getMergePolicy(); - optimizeLock.lock(); - try { - ensureOpen(); - if (upgrade) { - logger.info("starting segment upgrade upgradeOnlyAncientSegments={}", upgradeOnlyAncientSegments); - mp.setUpgradeInProgress(true, upgradeOnlyAncientSegments); - } - store.incRef(); // increment the ref just to ensure nobody closes the store while we optimize - try { - if (onlyExpungeDeletes) { - assert upgrade == false; - indexWriter.forceMergeDeletes(true /* blocks and waits for merges*/); - } else if (maxNumSegments <= 0) { - assert upgrade == false; - indexWriter.maybeMerge(); - } else { - indexWriter.forceMerge(maxNumSegments, true /* blocks and waits for merges*/); - this.forceMergeUUID = forceMergeUUID; - } - if (flush) { - flush(false, true); - } - if (upgrade) { - logger.info("finished segment upgrade"); - } - } finally { - store.decRef(); - } - } catch (AlreadyClosedException ex) { - /* in this case we first check if the engine is still open. If so this exception is just fine - * and expected. We don't hold any locks while we block on forceMerge otherwise it would block - * closing the engine as well. If we are not closed we pass it on to failOnTragicEvent which ensures - * we are handling a tragic even exception here */ - ensureOpen(ex); - failOnTragicEvent(ex); - throw ex; - } catch (Exception e) { - try { - maybeFailEngine(FORCE_MERGE, e); - } catch (Exception inner) { - e.addSuppressed(inner); - } - throw e; - } finally { - try { - // reset it just to make sure we reset it in a case of an error - mp.setUpgradeInProgress(false, false); - } finally { - optimizeLock.unlock(); - } - } - } - - @Override - public GatedCloseable acquireLastIndexCommit(boolean flushFirst) throws EngineException { - store.incRef(); - try { - var reader = getReferenceManager(SearcherScope.INTERNAL).acquire(); - return new GatedCloseable<>(reader.getIndexCommit(), () -> { - store.decRef(); - getReferenceManager(SearcherScope.INTERNAL).release(reader); - }); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - @Override - public GatedCloseable acquireSafeIndexCommit() throws EngineException { - // TODO: do we need this? likely not - return acquireLastIndexCommit(false); - } - - @Override - public SafeCommitInfo getSafeCommitInfo() { - // TODO: do we need this? - return SafeCommitInfo.EMPTY; - } - - @Override - protected void closeNoLock(String reason, CountDownLatch closedLatch) { - if (isClosed.compareAndSet(false, true)) { - assert rwl.isWriteLockedByCurrentThread() || failEngineLock.isHeldByCurrentThread() - : "Either the write lock must be held or the engine must be currently be failing itself"; - try { - try { - IOUtils.close(externalReaderManager, internalReaderManager); - } catch (Exception e) { - logger.warn("Failed to close ReaderManager", e); - } - - // no need to commit in this case!, we snapshot before we close the shard, so translog and all sync'ed - logger.trace("rollback indexWriter"); - try { - indexWriter.rollback(); - } catch (AlreadyClosedException ex) { - failOnTragicEvent(ex); - throw ex; - } - logger.trace("rollback indexWriter done"); - } catch (Exception e) { - logger.warn("failed to rollback writer on close", e); - } finally { - try { - store.decRef(); - logger.debug("engine closed [{}]", reason); - } finally { - closedLatch.countDown(); - } - } - } - } - - private boolean failOnTragicEvent(AlreadyClosedException ex) { - final boolean engineFailed; - // if we are already closed due to some tragic exception - // we need to fail the engine. it might have already been failed before - // but we are double-checking it's failed and closed - if (indexWriter.isOpen() == false && indexWriter.getTragicException() != null) { - final Exception tragicException; - if (indexWriter.getTragicException() instanceof Exception) { - tragicException = (Exception) indexWriter.getTragicException(); - } else { - tragicException = new RuntimeException(indexWriter.getTragicException()); - } - failEngine("already closed by tragic event on the index writer", tragicException); - engineFailed = true; - } else if (failedEngine.get() == null && isClosed.get() == false) { // we are closed but the engine is not failed yet? - // this smells like a bug - we only expect ACE if we are in a fatal case ie. either translog or IW is closed by - // a tragic event or has closed itself. if that is not the case we are in a buggy state and raise an assertion error - throw new AssertionError("Unexpected AlreadyClosedException", ex); - } else { - engineFailed = false; - } - return engineFailed; - } - - private final class EngineMergeScheduler extends OpenSearchConcurrentMergeScheduler { - private final AtomicInteger numMergesInFlight = new AtomicInteger(0); - private final AtomicBoolean isThrottling = new AtomicBoolean(); - - EngineMergeScheduler(ShardId shardId, IndexSettings indexSettings) { - super(shardId, indexSettings); - } - - @Override - public synchronized void beforeMerge(OnGoingMerge merge) { - int maxNumMerges = mergeScheduler.getMaxMergeCount(); - if (numMergesInFlight.incrementAndGet() > maxNumMerges) { - if (isThrottling.getAndSet(true) == false) { - logger.info("now throttling indexing: numMergesInFlight={}, maxNumMerges={}", numMergesInFlight, maxNumMerges); - activateThrottling(); - } - } - } - - @Override - public synchronized void afterMerge(OnGoingMerge merge) { - int maxNumMerges = mergeScheduler.getMaxMergeCount(); - if (numMergesInFlight.decrementAndGet() < maxNumMerges) { - if (isThrottling.getAndSet(false)) { - logger.info("stop throttling indexing: numMergesInFlight={}, maxNumMerges={}", numMergesInFlight, maxNumMerges); - deactivateThrottling(); - } - } - if (indexWriter.hasPendingMerges() == false - && System.nanoTime() - lastWriteNanos >= engineConfig.getFlushMergesAfter().nanos()) { - // NEVER do this on a merge thread since we acquire some locks blocking here and if we concurrently rollback the writer - // we deadlock on engine#close for instance. - engineConfig.getThreadPool().executor(ThreadPool.Names.FLUSH).execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - if (isClosed.get() == false) { - logger.warn("failed to flush after merge has finished"); - } - } - - @Override - protected void doRun() { - // if we have no pending merges and we are supposed to flush once merges have finished to - // free up transient disk usage of the (presumably biggish) segments that were just merged - flush(); - } - }); - } else if (merge.getTotalBytesSize() >= engineConfig.getIndexSettings().getFlushAfterMergeThresholdSize().getBytes()) { - // we hit a significant merge which would allow us to free up memory if we'd commit it hence on the next change - // we should execute a flush on the next operation if that's a flush after inactive or indexing a document. - // we could fork a thread and do it right away but we try to minimize forking and piggyback on outside events. - shouldPeriodicallyFlushAfterBigMerge.set(true); - } - } - - @Override - protected void handleMergeException(final Throwable exc) { - engineConfig.getThreadPool().generic().execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - logger.debug("merge failure action rejected", e); - } - - @Override - protected void doRun() throws Exception { - /* - * We do this on another thread rather than the merge thread that we are initially called on so that we have complete - * confidence that the call stack does not contain catch statements that would cause the error that might be thrown - * here from being caught and never reaching the uncaught exception handler. - */ - failEngine(MERGE_FAILED, new MergePolicy.MergeException(exc)); - } - }); - } - } - @Override public void activateThrottling() { // TODO: add this when we have a thread pool for indexing in parallel @@ -975,38 +251,41 @@ public void deactivateThrottling() { // TODO: is this needed? } - @Override - public int fillSeqNoGaps(long primaryTerm) throws IOException { - // TODO: is this needed? - return 0; - } - @Override public void maybePruneDeletes() { // no need to prune deletes in ingestion engine } @Override - public void updateMaxUnsafeAutoIdTimestamp(long newTimestamp) { - // TODO: is this needed? + public void close() throws IOException { + if (streamPoller != null) { + streamPoller.close(); + } + super.close(); } - @Override - public long getMaxSeqNoOfUpdatesOrDeletes() { - // TODO: is this needed? - return 0; + public DocumentMapperForType getDocumentMapperForType() { + return documentMapperForType; } @Override - public void advanceMaxSeqNoOfUpdatesOrDeletes(long maxSeqNoOfUpdatesOnPrimary) { - // TODO: is this needed? + protected TranslogManager createTranslogManager( + String translogUUID, + TranslogDeletionPolicy translogDeletionPolicy, + CompositeTranslogEventListener translogEventListener + ) throws IOException { + return new NoOpTranslogManager( + shardId, + readLock, + this::ensureOpen, + new TranslogStats(), + EMPTY_TRANSLOG_SNAPSHOT, + translogUUID, + true + ); } - @Override - public void close() throws IOException { - if (streamPoller != null) { - streamPoller.close(); - } - super.close(); + protected Map commitDataAsMap() { + return commitDataAsMap(indexWriter); } } diff --git a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java index ff790fa1513f1..064e757c6ebb7 100644 --- a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java @@ -144,16 +144,28 @@ */ public class InternalEngine extends Engine { + /** + * UUID value that is updated every time the engine is force merged. + */ + @Nullable + protected volatile String forceMergeUUID; + /** * When we last pruned expired tombstones from versionMap.deletes: */ private volatile long lastDeleteVersionPruneTimeMSec; - private final InternalTranslogManager translogManager; - private final OpenSearchConcurrentMergeScheduler mergeScheduler; + protected final TranslogManager translogManager; + protected final IndexWriter indexWriter; + protected final LocalCheckpointTracker localCheckpointTracker; + protected final AtomicLong maxUnsafeAutoIdTimestamp = new AtomicLong(-1); + protected final SoftDeletesPolicy softDeletesPolicy; + protected final AtomicBoolean shouldPeriodicallyFlushAfterBigMerge = new AtomicBoolean(false); - private final IndexWriter indexWriter; + @Nullable + protected final String historyUUID; + private final OpenSearchConcurrentMergeScheduler mergeScheduler; private final ExternalReaderManager externalReaderManager; private final OpenSearchReaderManager internalReaderManager; @@ -168,15 +180,12 @@ public class InternalEngine extends Engine { private final IndexThrottle throttle; - private final LocalCheckpointTracker localCheckpointTracker; - private final CombinedDeletionPolicy combinedDeletionPolicy; // How many callers are currently requesting index throttling. Currently there are only two situations where we do this: when merges // are falling behind and when writing indexing buffer to disk is too slow. When this is 0, there is no throttling, else we throttling // incoming indexing ops to a single thread: private final AtomicInteger throttleRequestCount = new AtomicInteger(); - private final AtomicLong maxUnsafeAutoIdTimestamp = new AtomicLong(-1); private final AtomicLong maxSeenAutoIdTimestamp = new AtomicLong(-1); // max_seq_no_of_updates_or_deletes tracks the max seq_no of update or delete operations that have been processed in this engine. // An index request is considered as an update if it overwrites existing documents with the same docId in the Lucene index. @@ -189,14 +198,12 @@ public class InternalEngine extends Engine { private final CounterMetric numDocAppends = new CounterMetric(); private final CounterMetric numDocUpdates = new CounterMetric(); private final NumericDocValuesField softDeletesField = Lucene.newSoftDeletesField(); - private final SoftDeletesPolicy softDeletesPolicy; private final LastRefreshedCheckpointListener lastRefreshedCheckpointListener; private final CompletionStatsCache completionStatsCache; private final AtomicBoolean trackTranslogLocation = new AtomicBoolean(false); private final KeyedLock noOpKeyedLock = new KeyedLock<>(); - private final AtomicBoolean shouldPeriodicallyFlushAfterBigMerge = new AtomicBoolean(false); /** * If multiple writes passed {@link InternalEngine#tryAcquireInFlightDocs(Operation, int)} but they haven't adjusted @@ -210,15 +217,6 @@ public class InternalEngine extends Engine { private final int maxDocs; - @Nullable - private final String historyUUID; - - /** - * UUID value that is updated every time the engine is force merged. - */ - @Nullable - private volatile String forceMergeUUID; - public InternalEngine(EngineConfig engineConfig) { this(engineConfig, IndexWriter.MAX_DOCS, LocalCheckpointTracker::new, TranslogEventListener.NOOP_TRANSLOG_EVENT_LISTENER); } @@ -249,7 +247,7 @@ public TranslogManager translogManager() { ExternalReaderManager externalReaderManager = null; OpenSearchReaderManager internalReaderManager = null; EngineMergeScheduler scheduler = null; - InternalTranslogManager translogManagerRef = null; + TranslogManager translogManagerRef = null; boolean success = false; try { this.lastDeleteVersionPruneTimeMSec = engineConfig.getThreadPool().relativeTimeInMillis(); @@ -280,20 +278,11 @@ public void onFailure(String reason, Exception ex) { } } }; - translogManagerRef = new InternalTranslogManager( - engineConfig.getTranslogConfig(), - engineConfig.getPrimaryTermSupplier(), - engineConfig.getGlobalCheckpointSupplier(), - translogDeletionPolicy, - shardId, - readLock, - this::getLocalCheckpointTracker, - translogUUID, - new CompositeTranslogEventListener(Arrays.asList(internalTranslogEventListener, translogEventListener), shardId), - this::ensureOpen, - engineConfig.getTranslogFactory(), - engineConfig.getStartedPrimarySupplier() + CompositeTranslogEventListener compositeTranslogEventListener = new CompositeTranslogEventListener( + Arrays.asList(internalTranslogEventListener, translogEventListener), + shardId ); + translogManagerRef = createTranslogManager(translogUUID, translogDeletionPolicy, compositeTranslogEventListener); this.translogManager = translogManagerRef; this.softDeletesPolicy = newSoftDeletesPolicy(); this.combinedDeletionPolicy = new CombinedDeletionPolicy( @@ -362,6 +351,27 @@ public void onFailure(String reason, Exception ex) { logger.trace("created new InternalEngine"); } + protected TranslogManager createTranslogManager( + String translogUUID, + TranslogDeletionPolicy translogDeletionPolicy, + CompositeTranslogEventListener translogEventListener + ) throws IOException { + return new InternalTranslogManager( + engineConfig.getTranslogConfig(), + engineConfig.getPrimaryTermSupplier(), + engineConfig.getGlobalCheckpointSupplier(), + translogDeletionPolicy, + shardId, + readLock, + this::getLocalCheckpointTracker, + translogUUID, + translogEventListener, + this::ensureOpen, + engineConfig.getTranslogFactory(), + engineConfig.getStartedPrimarySupplier() + ); + } + private LocalCheckpointTracker createLocalCheckpointTracker( BiFunction localCheckpointTrackerSupplier ) throws IOException { @@ -2773,7 +2783,7 @@ public Closeable acquireHistoryRetentionLock() { /** * Gets the commit data from {@link IndexWriter} as a map. */ - private static Map commitDataAsMap(final IndexWriter indexWriter) { + protected static Map commitDataAsMap(final IndexWriter indexWriter) { final Map commitData = new HashMap<>(8); for (Map.Entry entry : indexWriter.getLiveCommitData()) { commitData.put(entry.getKey(), entry.getValue()); diff --git a/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java b/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java index e2210217672ef..d2c81c4274ebd 100644 --- a/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java +++ b/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java @@ -21,7 +21,6 @@ import org.opensearch.index.translog.listener.TranslogEventListener; import org.opensearch.index.translog.transfer.TranslogUploadFailedException; -import java.io.Closeable; import java.io.IOException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BooleanSupplier; @@ -31,12 +30,12 @@ import java.util.stream.Stream; /** - * The {@link TranslogManager} implementation capable of orchestrating all read/write {@link Translog} operations while - * interfacing with the {@link org.opensearch.index.engine.InternalEngine} + * The {@link TranslogManager} implementation capable of orchestrating all read/write {@link Translog} operations for + * the {@link org.opensearch.index.engine.InternalEngine} * * @opensearch.internal */ -public class InternalTranslogManager implements TranslogManager, Closeable { +public class InternalTranslogManager implements TranslogManager { private final ReleasableLock readLock; private final LifecycleAware engineLifeCycleAware; diff --git a/server/src/main/java/org/opensearch/index/translog/NoOpTranslogManager.java b/server/src/main/java/org/opensearch/index/translog/NoOpTranslogManager.java index b4aa7865570a6..7ae80f88b0595 100644 --- a/server/src/main/java/org/opensearch/index/translog/NoOpTranslogManager.java +++ b/server/src/main/java/org/opensearch/index/translog/NoOpTranslogManager.java @@ -11,6 +11,7 @@ import org.opensearch.common.lease.Releasable; import org.opensearch.common.util.concurrent.ReleasableLock; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.seqno.SequenceNumbers; import java.io.IOException; import java.util.stream.Stream; @@ -27,6 +28,9 @@ public class NoOpTranslogManager implements TranslogManager { private final Runnable ensureOpen; private final ShardId shardId; private final TranslogStats translogStats; + private final TranslogDeletionPolicy translogDeletionPolicy; + private final String translogUUID; + private final boolean skipRecoveryStep; public NoOpTranslogManager( ShardId shardId, @@ -34,12 +38,27 @@ public NoOpTranslogManager( Runnable ensureOpen, TranslogStats translogStats, Translog.Snapshot emptyTranslogSnapshot + ) throws IOException { + this(shardId, readLock, ensureOpen, translogStats, emptyTranslogSnapshot, "", false); + } + + public NoOpTranslogManager( + ShardId shardId, + ReleasableLock readLock, + Runnable ensureOpen, + TranslogStats translogStats, + Translog.Snapshot emptyTranslogSnapshot, + String translogUUID, + boolean skipRecoveryStep ) throws IOException { this.emptyTranslogSnapshot = emptyTranslogSnapshot; this.readLock = readLock; this.shardId = shardId; this.ensureOpen = ensureOpen; this.translogStats = translogStats; + this.translogDeletionPolicy = new DefaultTranslogDeletionPolicy(0, 0, 0); + this.translogUUID = translogUUID; + this.skipRecoveryStep = skipRecoveryStep; } @Override @@ -48,6 +67,11 @@ public void rollTranslogGeneration() throws TranslogException {} @Override public int recoverFromTranslog(TranslogRecoveryRunner translogRecoveryRunner, long localCheckpoint, long recoverUpToSeqNo) throws IOException { + // skip translog recovery attempt when skipRecoveryStep is true + if (skipRecoveryStep) { + return 0; + } + try (ReleasableLock ignored = readLock.acquire()) { ensureOpen.run(); try (Translog.Snapshot snapshot = emptyTranslogSnapshot) { @@ -132,6 +156,42 @@ public Releasable drainSync() { @Override public Translog.TranslogGeneration getTranslogGeneration() { + return new Translog.TranslogGeneration(translogUUID, 0); + } + + @Override + public long getLastSyncedGlobalCheckpoint() { + return 0; + } + + @Override + public long getMaxSeqNo() { + return SequenceNumbers.NO_OPS_PERFORMED; + } + + @Override + public void trimUnreferencedReaders() throws IOException {} + + @Override + public boolean shouldPeriodicallyFlush(long localCheckpointOfLastCommit, long flushThreshold) { + return false; + } + + @Override + public Exception getTragicExceptionIfClosed() { return null; } + + @Override + public TranslogDeletionPolicy getDeletionPolicy() { + return translogDeletionPolicy; + } + + @Override + public String getTranslogUUID() { + return translogUUID; + } + + @Override + public void close() throws IOException {} } diff --git a/server/src/main/java/org/opensearch/index/translog/Translog.java b/server/src/main/java/org/opensearch/index/translog/Translog.java index ffda06d8d8292..b1e88624c9906 100644 --- a/server/src/main/java/org/opensearch/index/translog/Translog.java +++ b/server/src/main/java/org/opensearch/index/translog/Translog.java @@ -899,6 +899,8 @@ public TranslogDeletionPolicy getDeletionPolicy() { return deletionPolicy; } + public static final Translog.Location EMPTY_TRANSLOG_LOCATION = new Translog.Location(0, 0, 0); + /** * Location in the translot * diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogManager.java b/server/src/main/java/org/opensearch/index/translog/TranslogManager.java index e1a0b7d1c1293..ec312636e7ee1 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogManager.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogManager.java @@ -11,6 +11,7 @@ import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lease.Releasable; +import java.io.Closeable; import java.io.IOException; import java.util.stream.Stream; @@ -20,7 +21,7 @@ * @opensearch.api */ @PublicApi(since = "1.0.0") -public interface TranslogManager { +public interface TranslogManager extends Closeable { /** * Rolls the translog generation and cleans unneeded. @@ -142,4 +143,46 @@ public interface TranslogManager { Releasable drainSync(); Translog.TranslogGeneration getTranslogGeneration(); + + /** + * Retrieves last synced global checkpoint. + */ + long getLastSyncedGlobalCheckpoint(); + + /** + * Retrieves the max seq no. + */ + long getMaxSeqNo(); + + /** + * Trims unreferenced translog generations by asking {@link TranslogDeletionPolicy} for the minimum required + * generation. + */ + void trimUnreferencedReaders() throws IOException; + + /** + * + * @param localCheckpointOfLastCommit local checkpoint reference of last commit to translog + * @param flushThreshold threshold to flush to translog + * @return if the translog should be flushed + */ + boolean shouldPeriodicallyFlush(long localCheckpointOfLastCommit, long flushThreshold); + + /** + * Retrieves the underlying translog tragic exception + * @return the tragic exception + */ + Exception getTragicExceptionIfClosed(); + + /** + * Retrieves the translog deletion policy + * @return TranslogDeletionPolicy + */ + TranslogDeletionPolicy getDeletionPolicy(); + + /** + * Retrieves the translog unique identifier + * @return the uuid of the translog + */ + String getTranslogUUID(); } diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/IngestionEngineFactory.java b/server/src/main/java/org/opensearch/indices/pollingingest/IngestionEngineFactory.java index e124adb90365b..16688feddf53c 100644 --- a/server/src/main/java/org/opensearch/indices/pollingingest/IngestionEngineFactory.java +++ b/server/src/main/java/org/opensearch/indices/pollingingest/IngestionEngineFactory.java @@ -13,6 +13,7 @@ import org.opensearch.index.engine.EngineConfig; import org.opensearch.index.engine.EngineFactory; import org.opensearch.index.engine.IngestionEngine; +import org.opensearch.index.engine.NRTReplicationEngine; import java.util.Objects; @@ -29,6 +30,10 @@ public IngestionEngineFactory(IngestionConsumerFactory ingestionConsumerFactory) @Override public Engine newReadWriteEngine(EngineConfig config) { + if (config.isReadOnlyReplica()) { + return new NRTReplicationEngine(config); + } + IngestionEngine ingestionEngine = new IngestionEngine(config, ingestionConsumerFactory); ingestionEngine.start(); return ingestionEngine; diff --git a/server/src/test/java/org/opensearch/index/engine/IngestionEngineTests.java b/server/src/test/java/org/opensearch/index/engine/IngestionEngineTests.java index 19718384bd926..2d00bbcba0c8c 100644 --- a/server/src/test/java/org/opensearch/index/engine/IngestionEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/IngestionEngineTests.java @@ -36,8 +36,9 @@ import java.util.concurrent.atomic.AtomicLong; import static org.awaitility.Awaitility.await; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; public class IngestionEngineTests extends EngineTestCase { @@ -46,6 +47,7 @@ public class IngestionEngineTests extends EngineTestCase { private IngestionEngine ingestionEngine; // the messages of the stream to ingest from private List messages; + private EngineConfig engineConfig; @Override @Before @@ -86,6 +88,7 @@ public void tearDown() throws Exception { ingestionEngineStore.close(); } super.tearDown(); + engineConfig = null; } public void testCreateEngine() throws IOException { @@ -95,7 +98,7 @@ public void testCreateEngine() throws IOException { ingestionEngine.flush(false, true); Map commitData = ingestionEngine.commitDataAsMap(); // verify the commit data - Assert.assertEquals(1, commitData.size()); + Assert.assertEquals(7, commitData.size()); Assert.assertEquals("2", commitData.get(StreamPoller.BATCH_START)); // verify the stored offsets @@ -120,21 +123,19 @@ public void testRecovery() throws IOException { publishData("{\"_id\":\"3\",\"_source\":{\"name\":\"john\", \"age\": 30}}"); publishData("{\"_id\":\"4\",\"_source\":{\"name\":\"jane\", \"age\": 25}}"); ingestionEngine.close(); - ingestionEngine = buildIngestionEngine(new AtomicLong(2), ingestionEngineStore, indexSettings); + ingestionEngine = buildIngestionEngine(new AtomicLong(0), ingestionEngineStore, indexSettings); waitForResults(ingestionEngine, 4); } public void testCreationFailure() throws IOException { - // Simulate an error scenario - Store mockStore = mock(Store.class); - doThrow(new IOException("Simulated IOException")).when(mockStore).readLastCommittedSegmentsInfo(); - final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); - FakeIngestionSource.FakeIngestionConsumerFactory consumerFactory = new FakeIngestionSource.FakeIngestionConsumerFactory(messages); + Store mockStore = spy(store); + doThrow(new IOException("Simulated IOException")).when(mockStore).trimUnsafeCommits(any()); + EngineConfig engineConfig = config( indexSettings, - store, + mockStore, createTempDir(), NoMergePolicy.INSTANCE, null, @@ -156,7 +157,9 @@ public void testCreationFailure() throws IOException { private IngestionEngine buildIngestionEngine(AtomicLong globalCheckpoint, Store store, IndexSettings settings) throws IOException { FakeIngestionSource.FakeIngestionConsumerFactory consumerFactory = new FakeIngestionSource.FakeIngestionConsumerFactory(messages); - EngineConfig engineConfig = config(settings, store, createTempDir(), NoMergePolicy.INSTANCE, null, null, globalCheckpoint::get); + if (engineConfig == null) { + engineConfig = config(settings, store, createTempDir(), NoMergePolicy.INSTANCE, null, null, globalCheckpoint::get); + } // overwrite the config with ingestion engine settings String mapping = "{\"properties\":{\"name\":{\"type\": \"text\"},\"age\":{\"type\": \"integer\"}}}}"; MapperService mapperService = createMapperService(mapping); diff --git a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java index 4f04c0b08fd0a..f9a09c088095b 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java @@ -98,6 +98,7 @@ import org.opensearch.index.engine.DocIdSeqNoAndSource; import org.opensearch.index.engine.Engine; import org.opensearch.index.engine.EngineTestCase; +import org.opensearch.index.engine.IngestionEngine; import org.opensearch.index.engine.InternalEngine; import org.opensearch.index.seqno.SeqNoStats; import org.opensearch.index.seqno.SequenceNumbers; @@ -1378,7 +1379,9 @@ private void assertOpenTranslogReferences() throws Exception { for (IndexService indexService : indexServices) { for (IndexShard indexShard : indexService) { try { - if (IndexShardTestCase.getEngine(indexShard) instanceof InternalEngine) { + if (IndexShardTestCase.getEngine(indexShard) instanceof IngestionEngine) { + // no-op, as IngestionEngine does not use translog. + } else if (IndexShardTestCase.getEngine(indexShard) instanceof InternalEngine) { IndexShardTestCase.getTranslog(indexShard).getDeletionPolicy().assertNoOpenTranslogRefs(); } } catch (AlreadyClosedException ok) { From b1e66b34f2f29952ac04a411267562a3dbc54976 Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Thu, 27 Feb 2025 14:48:51 -0800 Subject: [PATCH 036/550] Move TSC took-time policy to guard both heap and disk tier (#17190) * Move TSC took-time policy to guard both heap and disk tier Signed-off-by: Peter Alfonsi * changelog Signed-off-by: Peter Alfonsi * spotless apply Signed-off-by: Peter Alfonsi * Addressed Sagar's comment Signed-off-by: Peter Alfonsi * Add missing javadoc Signed-off-by: Peter Alfonsi * address round 2 of comments Signed-off-by: Peter Alfonsi * Add removal notification to put() Signed-off-by: Peter Alfonsi * Fix incorrect stats hit when cache entry rejected by policy Signed-off-by: Peter Alfonsi * rerun gradle Signed-off-by: Peter Alfonsi * Fixed more broken stats Signed-off-by: Peter Alfonsi * rerun gradle Signed-off-by: Peter Alfonsi * Addressed more comments Signed-off-by: Peter Alfonsi * make policy rejections count as neither hit or miss Signed-off-by: Peter Alfonsi * rerun gradle Signed-off-by: Peter Alfonsi * remove potential double-loading Signed-off-by: Peter Alfonsi * rerun gradle Signed-off-by: Peter Alfonsi * remove removalNotification Signed-off-by: Peter Alfonsi * rerun gradle Signed-off-by: Peter Alfonsi --------- Signed-off-by: Peter Alfonsi Signed-off-by: Peter Alfonsi Co-authored-by: Peter Alfonsi --- CHANGELOG.md | 1 + .../common/tier/TieredSpilloverCacheIT.java | 88 ++++- .../tier/TieredSpilloverCacheStatsIT.java | 85 ++-- .../cache/common/policy/TookTimePolicy.java | 14 +- .../common/tier/TieredSpilloverCache.java | 147 ++++--- .../tier/TieredSpilloverCachePlugin.java | 2 + .../tier/TieredSpilloverCacheSettings.java | 40 +- .../common/policy/TookTimePolicyTests.java | 46 ++- .../tier/TieredSpilloverCacheTests.java | 364 +++++++++++++++--- 9 files changed, 579 insertions(+), 208 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6aa18ce0064ae..bd218393919bd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,6 +30,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Changed - Convert transport-reactor-netty4 to use gradle version catalog [#17233](https://github.com/opensearch-project/OpenSearch/pull/17233) - Increase force merge threads to 1/8th of cores [#17255](https://github.com/opensearch-project/OpenSearch/pull/17255) +- TieredSpilloverCache took-time threshold now guards heap tier as well as disk tier [#17190](https://github.com/opensearch-project/OpenSearch/pull/17190) ### Deprecated diff --git a/modules/cache-common/src/internalClusterTest/java/org/opensearch/cache/common/tier/TieredSpilloverCacheIT.java b/modules/cache-common/src/internalClusterTest/java/org/opensearch/cache/common/tier/TieredSpilloverCacheIT.java index f0ea21bde187e..08458cd2a054d 100644 --- a/modules/cache-common/src/internalClusterTest/java/org/opensearch/cache/common/tier/TieredSpilloverCacheIT.java +++ b/modules/cache-common/src/internalClusterTest/java/org/opensearch/cache/common/tier/TieredSpilloverCacheIT.java @@ -118,7 +118,7 @@ public void testSanityChecksWithIndicesRequestCache() throws InterruptedExceptio ); } - public void testWithDynamicTookTimePolicyWithMultiSegments() throws Exception { + public void testWithDynamicDiskTookTimePolicyWithMultiSegments() throws Exception { int numberOfSegments = getNumberOfSegments(); int onHeapCacheSizePerSegmentInBytes = 800; // Per cache entry below is around ~700 bytes, so keeping this // just a bit higher so that each segment can atleast hold 1 entry. @@ -139,12 +139,13 @@ public void testWithDynamicTookTimePolicyWithMultiSegments() throws Exception { ) .get() ); - // Set a very high value for took time policy so that no items evicted from onHeap cache are spilled + // Set a very high value for took time disk policy so that no items evicted from onHeap cache are spilled // to disk. And then hit requests so that few items are cached into cache. ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest().transientSettings( Settings.builder() .put( - TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), + TieredSpilloverCacheSettings.TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE) + .getKey(), new TimeValue(100, TimeUnit.SECONDS) ) .build() @@ -182,12 +183,13 @@ public void testWithDynamicTookTimePolicyWithMultiSegments() throws Exception { assertEquals(0, requestCacheStats.getHitCount()); long lastEvictionSeen = requestCacheStats.getEvictions(); - // Decrease took time policy to zero so that disk cache also comes into play. Now we should be able + // Decrease disk took time policy to zero so that disk cache also comes into play. Now we should be able // to cache all entries. updateSettingsRequest = new ClusterUpdateSettingsRequest().transientSettings( Settings.builder() .put( - TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), + TieredSpilloverCacheSettings.TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE) + .getKey(), new TimeValue(0, TimeUnit.MILLISECONDS) ) .build() @@ -206,7 +208,7 @@ public void testWithDynamicTookTimePolicyWithMultiSegments() throws Exception { assertEquals(lastEvictionSeen, requestCacheStats.getEvictions()); } - public void testWithDynamicTookTimePolicy() throws Exception { + public void testWithDynamicHeapTookTimePolicy() throws Exception { int onHeapCacheSizeInBytes = 2000; internalCluster().startNode(Settings.builder().put(defaultSettings(onHeapCacheSizeInBytes + "b", 1)).build()); Client client = client(); @@ -224,8 +226,7 @@ public void testWithDynamicTookTimePolicy() throws Exception { ) .get() ); - // Step 1 : Set a very high value for took time policy so that no items evicted from onHeap cache are spilled - // to disk. And then hit requests so that few items are cached into cache. + // Set a high threshold for the overall cache took time policy so nothing will enter the cache. ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest().transientSettings( Settings.builder() .put( @@ -245,6 +246,57 @@ public void testWithDynamicTookTimePolicy() throws Exception { ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge("index").setFlush(true).get(); OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); long perQuerySizeInCacheInBytes = -1; + for (int iterator = 0; iterator < numberOfIndexedItems; iterator++) { + SearchResponse resp = client.prepareSearch("index") + .setRequestCache(true) + .setQuery(QueryBuilders.termQuery("k" + iterator, "hello" + iterator)) + .get(); + assertSearchResponse(resp); + } + RequestCacheStats requestCacheStats = getRequestCacheStats(client, "index"); + assertEquals(0, requestCacheStats.getEvictions()); + } + + public void testWithDynamicDiskTookTimePolicy() throws Exception { + int onHeapCacheSizeInBytes = 2000; + internalCluster().startNode(Settings.builder().put(defaultSettings(onHeapCacheSizeInBytes + "b", 1)).build()); + Client client = client(); + assertAcked( + client.admin() + .indices() + .prepareCreate("index") + .setMapping("k", "type=keyword") + .setSettings( + Settings.builder() + .put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.refresh_interval", -1) + ) + .get() + ); + // Step 1 : Set a very high value for disk took time policy so that no items evicted from onHeap cache are spilled + // to disk. And then hit requests so that few items are cached into cache. + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest().transientSettings( + Settings.builder() + .put( + TieredSpilloverCacheSettings.TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE) + .getKey(), + new TimeValue(100, TimeUnit.SECONDS) + ) + .build() + ); + assertAcked(internalCluster().client().admin().cluster().updateSettings(updateSettingsRequest).get()); + int numberOfIndexedItems = randomIntBetween(6, 10); + for (int iterator = 0; iterator < numberOfIndexedItems; iterator++) { + indexRandom(true, client.prepareIndex("index").setSource("k" + iterator, "hello" + iterator)); + } + ensureSearchable("index"); + refreshAndWaitForReplication(); + // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache + ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge("index").setFlush(true).get(); + OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); + long perQuerySizeInCacheInBytes = -1; for (int iterator = 0; iterator < numberOfIndexedItems; iterator++) { SearchResponse resp = client.prepareSearch("index") .setRequestCache(true) @@ -282,12 +334,13 @@ public void testWithDynamicTookTimePolicy() throws Exception { assertEquals(0, requestCacheStats.getHitCount()); long lastEvictionSeen = requestCacheStats.getEvictions(); - // Step 3: Decrease took time policy to zero so that disk cache also comes into play. Now we should be able + // Step 3: Decrease disk took time policy to zero so that disk cache also comes into play. Now we should be able // to cache all entries. updateSettingsRequest = new ClusterUpdateSettingsRequest().transientSettings( Settings.builder() .put( - TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), + TieredSpilloverCacheSettings.TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE) + .getKey(), new TimeValue(0, TimeUnit.MILLISECONDS) ) .build() @@ -352,11 +405,12 @@ public void testInvalidationWithIndicesRequestCache() throws Exception { ) .get() ); - // Update took time policy to zero so that all entries are eligible to be cached on disk. + // Update disk took time policy to zero so that all entries are eligible to be cached on disk. ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest().transientSettings( Settings.builder() .put( - TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), + TieredSpilloverCacheSettings.TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE) + .getKey(), new TimeValue(0, TimeUnit.MILLISECONDS) ) .build() @@ -437,11 +491,12 @@ public void testWithExplicitCacheClear() throws Exception { ) .get() ); - // Update took time policy to zero so that all entries are eligible to be cached on disk. + // Update disk took time policy to zero so that all entries are eligible to be cached on disk. ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest().transientSettings( Settings.builder() .put( - TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), + TieredSpilloverCacheSettings.TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE) + .getKey(), new TimeValue(0, TimeUnit.MILLISECONDS) ) .build() @@ -512,11 +567,12 @@ public void testWithDynamicDiskCacheSetting() throws Exception { ) .get() ); - // Update took time policy to zero so that all entries are eligible to be cached on disk. + // Update disk took time policy to zero so that all entries are eligible to be cached on disk. ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest().transientSettings( Settings.builder() .put( - TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), + TieredSpilloverCacheSettings.TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE) + .getKey(), new TimeValue(0, TimeUnit.MILLISECONDS) ) .build() diff --git a/modules/cache-common/src/internalClusterTest/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsIT.java b/modules/cache-common/src/internalClusterTest/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsIT.java index fa10f4185521a..c72fc0d529c03 100644 --- a/modules/cache-common/src/internalClusterTest/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsIT.java +++ b/modules/cache-common/src/internalClusterTest/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsIT.java @@ -62,16 +62,7 @@ protected Collection> nodePlugins() { * Test aggregating by indices */ public void testIndicesLevelAggregation() throws Exception { - internalCluster().startNodes( - 1, - Settings.builder() - .put(defaultSettings(HEAP_CACHE_SIZE_STRING, 1)) - .put( - TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), - new TimeValue(0, TimeUnit.SECONDS) - ) - .build() - ); + startNodesDefaultSettings(); Client client = client(); Map values = setupCacheForAggregationTests(client); @@ -115,16 +106,7 @@ public void testIndicesLevelAggregation() throws Exception { * Test aggregating by indices and tier */ public void testIndicesAndTierLevelAggregation() throws Exception { - internalCluster().startNodes( - 1, - Settings.builder() - .put(defaultSettings(HEAP_CACHE_SIZE_STRING, 1)) - .put( - TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), - new TimeValue(0, TimeUnit.SECONDS) - ) - .build() - ); + startNodesDefaultSettings(); Client client = client(); Map values = setupCacheForAggregationTests(client); @@ -195,16 +177,7 @@ public void testIndicesAndTierLevelAggregation() throws Exception { * Test aggregating by tier only */ public void testTierLevelAggregation() throws Exception { - internalCluster().startNodes( - 1, - Settings.builder() - .put(defaultSettings(HEAP_CACHE_SIZE_STRING, 1)) - .put( - TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), - new TimeValue(0, TimeUnit.SECONDS) - ) - .build() - ); + startNodesDefaultSettings(); Client client = client(); Map values = setupCacheForAggregationTests(client); // Get values for tiers alone and check they add correctly across indices @@ -236,16 +209,7 @@ public void testTierLevelAggregation() throws Exception { } public void testInvalidLevelsAreIgnored() throws Exception { - internalCluster().startNodes( - 1, - Settings.builder() - .put(defaultSettings(HEAP_CACHE_SIZE_STRING, getNumberOfSegments())) - .put( - TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), - new TimeValue(0, TimeUnit.SECONDS) - ) - .build() - ); + startNodesDefaultSettings(); Client client = client(); Map values = setupCacheForAggregationTests(client); @@ -287,16 +251,7 @@ public void testInvalidLevelsAreIgnored() throws Exception { * Check the new stats API returns the same values as the old stats API. */ public void testStatsMatchOldApi() throws Exception { - internalCluster().startNodes( - 1, - Settings.builder() - .put(defaultSettings(HEAP_CACHE_SIZE_STRING, getNumberOfSegments())) - .put( - TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), - new TimeValue(0, TimeUnit.SECONDS) - ) - .build() - ); + startNodesDefaultSettings(); String index = "index"; Client client = client(); startIndex(client, index); @@ -354,7 +309,12 @@ public void testStatsWithMultipleSegments() throws Exception { .put(defaultSettings(heap_cache_size_per_segment * numberOfSegments + "B", numberOfSegments)) .put( TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), - new TimeValue(0, TimeUnit.SECONDS) + TimeValue.ZERO + ) + .put( + TieredSpilloverCacheSettings.TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE) + .getKey(), + TimeValue.ZERO ) .build() ); @@ -429,6 +389,11 @@ public void testClosingShard() throws Exception { TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), new TimeValue(0, TimeUnit.SECONDS) ) + .put( + TieredSpilloverCacheSettings.TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE) + .getKey(), + new TimeValue(0, TimeUnit.SECONDS) + ) .put(INDICES_CACHE_CLEAN_INTERVAL_SETTING.getKey(), new TimeValue(1)) .build() ); @@ -631,4 +596,22 @@ private static ImmutableCacheStatsHolder getNodeCacheStatsResult(Client client, NodeCacheStats ncs = nodeStatsResponse.getNodes().get(0).getNodeCacheStats(); return ncs.getStatsByCache(CacheType.INDICES_REQUEST_CACHE); } + + private void startNodesDefaultSettings() { + internalCluster().startNodes( + 1, + Settings.builder() + .put(defaultSettings(HEAP_CACHE_SIZE_STRING, 1)) + .put( + TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), + TimeValue.ZERO + ) + .put( + TieredSpilloverCacheSettings.TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE) + .getKey(), + TimeValue.ZERO + ) + .build() + ); + } } diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/policy/TookTimePolicy.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/policy/TookTimePolicy.java index 4bc26803acf4c..620b5597086f4 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/policy/TookTimePolicy.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/policy/TookTimePolicy.java @@ -13,16 +13,14 @@ package org.opensearch.cache.common.policy; -import org.opensearch.common.cache.CacheType; import org.opensearch.common.cache.policy.CachedQueryResult; import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; import org.opensearch.common.unit.TimeValue; import java.util.function.Function; import java.util.function.Predicate; -import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP; - /** * A cache tier policy which accepts queries whose took time is greater than some threshold. * The threshold should be set to approximately the time it takes to get a result from the cache tier. @@ -46,20 +44,20 @@ public class TookTimePolicy implements Predicate { * @param threshold the threshold * @param cachedResultParser the function providing policy values * @param clusterSettings cluster settings - * @param cacheType cache type + * @param targetSetting the cluster setting to register a consumer with */ public TookTimePolicy( TimeValue threshold, Function cachedResultParser, ClusterSettings clusterSettings, - CacheType cacheType + Setting targetSetting ) { if (threshold.compareTo(TimeValue.ZERO) < 0) { throw new IllegalArgumentException("Threshold for TookTimePolicy must be >= 0ms but was " + threshold.getStringRep()); } this.threshold = threshold; this.cachedResultParser = cachedResultParser; - clusterSettings.addSettingsUpdateConsumer(TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(cacheType), this::setThreshold); + clusterSettings.addSettingsUpdateConsumer(targetSetting, this::setThreshold); } private void setThreshold(TimeValue threshold) { @@ -72,6 +70,10 @@ private void setThreshold(TimeValue threshold) { * @return whether to admit the data */ public boolean test(V data) { + if (threshold.equals(TimeValue.ZERO)) { + // Skip parsing the took time if this threshold is zero. + return true; + } long tookTimeNanos; try { tookTimeNanos = cachedResultParser.apply(data).getTookTimeNanos(); diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java index 9879235812377..d968e61cffcff 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java @@ -53,6 +53,8 @@ import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TIERED_SPILLOVER_DISK_STORE_SIZE; import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TIERED_SPILLOVER_ONHEAP_STORE_SIZE; import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TIERED_SPILLOVER_SEGMENTS; +import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP; +import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP; import static org.opensearch.cache.common.tier.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_VALUE_DISK; import static org.opensearch.cache.common.tier.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_VALUE_ON_HEAP; import static org.opensearch.common.cache.settings.CacheSettings.INVALID_SEGMENT_COUNT_EXCEPTION_MESSAGE; @@ -145,9 +147,12 @@ static class TieredSpilloverCacheSegment implements ICache { ReleasableLock writeLock = new ReleasableLock(readWriteLock.writeLock()); private final Map, TierInfo> caches; - + // Policies guarding access to the cache overall. private final List> policies; + // Policies guarding access to the disk tier. + private final List> diskPolicies; + private final TieredSpilloverCacheStatsHolder statsHolder; private final long onHeapCacheMaxWeight; @@ -157,7 +162,7 @@ static class TieredSpilloverCacheSegment implements ICache { * This map is used to handle concurrent requests for same key in computeIfAbsent() to ensure we load the value * only once. */ - Map, CompletableFuture, V>>> completableFutureMap = new ConcurrentHashMap<>(); + Map, CompletableFuture, V>, Boolean>>> completableFutureMap = new ConcurrentHashMap<>(); TieredSpilloverCacheSegment( Builder builder, @@ -220,7 +225,8 @@ static class TieredSpilloverCacheSegment implements ICache { cacheListMap.put(onHeapCache, new TierInfo(true, TIER_DIMENSION_VALUE_ON_HEAP)); cacheListMap.put(diskCache, new TierInfo(isDiskCacheEnabled, TIER_DIMENSION_VALUE_DISK)); this.caches = Collections.synchronizedMap(cacheListMap); - this.policies = builder.policies; // Will never be null; builder initializes it to an empty list + this.policies = builder.policies; + this.diskPolicies = builder.diskPolicies; // Will never be null; builder initializes it to an empty list this.onHeapCacheMaxWeight = onHeapCacheSizeInBytes; this.diskCacheMaxWeight = diskCacheSizeInBytes; } @@ -255,18 +261,19 @@ public V get(ICacheKey key) { public void put(ICacheKey key, V value) { // First check in case the key is already present in either of tiers. Tuple cacheValueTuple = getValueFromTieredCache(true).apply(key); - if (cacheValueTuple == null) { - // In case it is not present in any tier, put it inside onHeap cache by default. - try (ReleasableLock ignore = writeLock.acquire()) { - onHeapCache.put(key, value); - } - updateStatsOnPut(TIER_DIMENSION_VALUE_ON_HEAP, key, value); - } else { - // Put it inside desired tier. - try (ReleasableLock ignore = writeLock.acquire()) { - for (Map.Entry, TierInfo> entry : this.caches.entrySet()) { - if (cacheValueTuple.v2().equals(entry.getValue().tierName)) { - entry.getKey().put(key, value); + if (evaluatePoliciesList(value, policies)) { + if (cacheValueTuple == null) { + // In case it is not present in any tier, put it inside onHeap cache by default. + try (ReleasableLock ignore = writeLock.acquire()) { + onHeapCache.put(key, value); + } + updateStatsOnPut(TIER_DIMENSION_VALUE_ON_HEAP, key, value); + } else { + try (ReleasableLock ignore = writeLock.acquire()) { + for (Map.Entry, TierInfo> entry : this.caches.entrySet()) { + if (cacheValueTuple.v2().equals(entry.getValue().tierName)) { + entry.getKey().put(key, value); + } } } updateStatsOnPut(cacheValueTuple.v2(), key, value); @@ -281,7 +288,7 @@ public V computeIfAbsent(ICacheKey key, LoadAwareCacheLoader, V> // getValueFromTieredCache(), // we will see all misses. Instead, handle stats in computeIfAbsent(). Tuple cacheValueTuple; - CompletableFuture, V>> future = null; + CompletableFuture, V>, Boolean>> future = null; try (ReleasableLock ignore = readLock.acquire()) { cacheValueTuple = getValueFromTieredCache(false).apply(key); if (cacheValueTuple == null) { @@ -297,22 +304,25 @@ public V computeIfAbsent(ICacheKey key, LoadAwareCacheLoader, V> // Add the value to the onHeap cache. We are calling computeIfAbsent which does another get inside. // This is needed as there can be many requests for the same key at the same time and we only want to load // the value once. - V value = compute(key, loader, future); - // Handle stats - if (loader.isLoaded()) { - // The value was just computed and added to the cache by this thread. Register a miss for the heap cache, and the disk - // cache - // if present - updateStatsOnPut(TIER_DIMENSION_VALUE_ON_HEAP, key, value); - statsHolder.incrementMisses(heapDimensionValues); - if (caches.get(diskCache).isEnabled()) { - statsHolder.incrementMisses(diskDimensionValues); + Tuple> computedValueTuple = compute(key, loader, future); + boolean wasCacheMiss = computedValueTuple.v2().v1(); + boolean wasRejectedByPolicy = computedValueTuple.v2().v2(); + // If the value was rejected by policy, it counts as neither a hit or miss. + if (!wasRejectedByPolicy) { + // Handle stats + if (wasCacheMiss) { + // The value was just computed and added to the cache by this thread. + // Register a miss for the heap cache, and the disk cache if present + statsHolder.incrementMisses(heapDimensionValues); + if (caches.get(diskCache).isEnabled()) { + statsHolder.incrementMisses(diskDimensionValues); + } + } else { + // Another thread requesting this key already loaded the value. Register a hit for the heap cache + statsHolder.incrementHits(heapDimensionValues); } - } else { - // Another thread requesting this key already loaded the value. Register a hit for the heap cache - statsHolder.incrementHits(heapDimensionValues); } - return value; + return computedValueTuple.v1(); } else { // Handle stats for an initial hit from getValueFromTieredCache() if (cacheValueTuple.v2().equals(TIER_DIMENSION_VALUE_ON_HEAP)) { @@ -327,20 +337,33 @@ public V computeIfAbsent(ICacheKey key, LoadAwareCacheLoader, V> return cacheValueTuple.v1(); } - private V compute(ICacheKey key, LoadAwareCacheLoader, V> loader, CompletableFuture, V>> future) - throws Exception { - // Handler to handle results post-processing. Takes a tuple or exception as an input and returns - // the value. Also before returning value, puts the value in cache. - BiFunction, V>, Throwable, Void> handler = (pair, ex) -> { - if (pair != null) { + private Tuple> compute( + ICacheKey key, + LoadAwareCacheLoader, V> loader, + CompletableFuture, V>, Boolean>> future + ) throws Exception { + // Handler to handle results post-processing. Takes a Tuple, boolean>, where the boolean represents whether + // this key/value pair was rejected by the policies, + // or exception as an input and returns the value. Also before returning value, puts the value in cache if accepted by policies. + boolean wasCacheMiss = false; + boolean wasRejectedByPolicy = false; + BiFunction, V>, Boolean>, Throwable, Void> handler = (pairInfo, ex) -> { + Tuple, V> pair = pairInfo.v1(); + boolean rejectedByPolicy = pairInfo.v2(); + if (pair != null && !rejectedByPolicy) { + boolean didAddToCache = false; try (ReleasableLock ignore = writeLock.acquire()) { onHeapCache.put(pair.v1(), pair.v2()); + didAddToCache = true; } catch (Exception e) { // TODO: Catch specific exceptions to know whether this resulted from cache or underlying removal // listeners/stats. Needs better exception handling at underlying layers.For now swallowing // exception. logger.warn("Exception occurred while putting item onto heap cache", e); } + if (didAddToCache) { + updateStatsOnPut(TIER_DIMENSION_VALUE_ON_HEAP, key, pair.v2()); + } } else { if (ex != null) { logger.warn("Exception occurred while trying to compute the value", ex); @@ -364,16 +387,20 @@ private V compute(ICacheKey key, LoadAwareCacheLoader, V> loader future.completeExceptionally(npe); throw new ExecutionException(npe); } else { - future.complete(new Tuple<>(key, value)); + wasRejectedByPolicy = !evaluatePoliciesList(value, policies); + future.complete(new Tuple<>(new Tuple<>(key, value), wasRejectedByPolicy)); + wasCacheMiss = !wasRejectedByPolicy; } } else { try { - value = future.get().v2(); + Tuple, V>, Boolean> futureTuple = future.get(); + wasRejectedByPolicy = futureTuple.v2(); + value = futureTuple.v1().v2(); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } - return value; + return new Tuple<>(value, new Tuple<>(wasCacheMiss, wasRejectedByPolicy)); } @Override @@ -442,7 +469,9 @@ void handleRemovalFromHeapTier(RemovalNotification, V> notification boolean wasEvicted = SPILLOVER_REMOVAL_REASONS.contains(notification.getRemovalReason()); boolean countEvictionTowardsTotal = false; // Don't count this eviction towards the cache's total if it ends up in the disk tier boolean exceptionOccurredOnDiskCachePut = false; - boolean canCacheOnDisk = caches.get(diskCache).isEnabled() && wasEvicted && evaluatePolicies(notification.getValue()); + boolean canCacheOnDisk = caches.get(diskCache).isEnabled() + && wasEvicted + && evaluatePoliciesList(notification.getValue(), diskPolicies); if (canCacheOnDisk) { try (ReleasableLock ignore = writeLock.acquire()) { diskCache.put(key, notification.getValue()); // spill over to the disk tier and increment its stats @@ -465,8 +494,8 @@ void handleRemovalFromHeapTier(RemovalNotification, V> notification updateStatsOnRemoval(TIER_DIMENSION_VALUE_ON_HEAP, wasEvicted, key, notification.getValue(), countEvictionTowardsTotal); } - boolean evaluatePolicies(V value) { - for (Predicate policy : policies) { + boolean evaluatePoliciesList(V value, List> policiesList) { + for (Predicate policy : policiesList) { if (!policy.test(value)) { return false; } @@ -822,8 +851,8 @@ public ICache create(CacheConfig config, CacheType cacheType, } ICache.Factory diskCacheFactory = cacheFactories.get(diskCacheStoreName); - TimeValue diskPolicyThreshold = TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(cacheType) - .get(settings); + TimeValue tookTimePolicyThreshold = TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(cacheType).get(settings); + TimeValue tookTimeDiskPolicyThreshold = TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP.get(cacheType).get(settings); Function cachedResultParser = Objects.requireNonNull( config.getCachedResultParser(), "Cached result parser fn can't be null" @@ -849,7 +878,22 @@ public ICache create(CacheConfig config, CacheType cacheType, .setCacheConfig(config) .setCacheType(cacheType) .setNumberOfSegments(numberOfSegments) - .addPolicy(new TookTimePolicy(diskPolicyThreshold, cachedResultParser, config.getClusterSettings(), cacheType)) + .addPolicy( + new TookTimePolicy<>( + tookTimePolicyThreshold, + cachedResultParser, + config.getClusterSettings(), + TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(cacheType) + ) + ) + .addDiskPolicy( + new TookTimePolicy<>( + tookTimeDiskPolicyThreshold, + cachedResultParser, + config.getClusterSettings(), + TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP.get(cacheType) + ) + ) .setOnHeapCacheSizeInBytes(onHeapCacheSize) .setDiskCacheSize(diskCacheSize) .build(); @@ -873,7 +917,8 @@ public static class Builder { private CacheConfig cacheConfig; private CacheType cacheType; private Map cacheFactories; - private final ArrayList> policies = new ArrayList<>(); + private final List> policies = new ArrayList<>(); + private final List> diskPolicies = new ArrayList<>(); private int numberOfSegments; private long onHeapCacheSizeInBytes; @@ -945,7 +990,7 @@ public Builder setCacheFactories(Map cacheFactorie } /** - * Set a cache policy to be used to limit access to this cache's disk tier. + * Set a cache policy to be used to limit access to this cache. * @param policy the policy * @return builder */ @@ -955,12 +1000,12 @@ public Builder addPolicy(Predicate policy) { } /** - * Set multiple policies to be used to limit access to this cache's disk tier. - * @param policies the policies + * Set a cache policy to be used to limit access to this cache's disk tier. + * @param diskPolicy the policy * @return builder */ - public Builder addPolicies(List> policies) { - this.policies.addAll(policies); + public Builder addDiskPolicy(Predicate diskPolicy) { + this.diskPolicies.add(diskPolicy); return this; } diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCachePlugin.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCachePlugin.java index bf522b42b70ca..d1d033fae8cd2 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCachePlugin.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCachePlugin.java @@ -21,6 +21,7 @@ import java.util.Map; import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.DISK_CACHE_ENABLED_SETTING_MAP; +import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP; import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP; /** @@ -62,6 +63,7 @@ public List> getSettings() { TieredSpilloverCacheSettings.TIERED_SPILLOVER_DISK_STORE_NAME.getConcreteSettingForNamespace(cacheType.getSettingPrefix()) ); settingList.add(TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(cacheType)); + settingList.add(TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP.get(cacheType)); if (FeatureFlags.PLUGGABLE_CACHE_SETTING.get(settings)) { settingList.add(DISK_CACHE_ENABLED_SETTING_MAP.get(cacheType)); } diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheSettings.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheSettings.java index 31dc1795134e4..790e2ead729fe 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheSettings.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheSettings.java @@ -38,6 +38,16 @@ public class TieredSpilloverCacheSettings { */ public static final long MIN_DISK_CACHE_SIZE_IN_BYTES = 10485760L; + /** + * The default took time threshold for a value to enter the heap tier of the cache, and therefore to enter the cache at all. + */ + public static final TimeValue DEFAULT_TOOK_TIME_THRESHOLD = TimeValue.ZERO; + + /** + * The default took time threshold for a value to enter the disk tier of the cache. + */ + public static final TimeValue DEFAULT_TOOK_TIME_DISK_THRESHOLD = new TimeValue(10, TimeUnit.MILLISECONDS); + /** * Setting which defines the onHeap cache store to be used in TieredSpilloverCache. * @@ -109,13 +119,27 @@ public class TieredSpilloverCacheSettings { ); /** - * Setting defining the minimum took time for a query to be allowed into the disk cache. + * Setting defining the minimum took time for a query to be allowed in the cache. + */ + private static final Setting.AffixSetting TIERED_SPILLOVER_TOOK_TIME_THRESHOLD = Setting.suffixKeySetting( + TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME + ".policies.took_time.threshold", + (key) -> Setting.timeSetting( + key, + DEFAULT_TOOK_TIME_THRESHOLD, + TimeValue.ZERO, // Minimum value for this setting + NodeScope, + Setting.Property.Dynamic + ) + ); + + /** + * Setting defining the minimum took time for a query to be allowed in the disk tier of the cache. */ private static final Setting.AffixSetting TIERED_SPILLOVER_DISK_TOOK_TIME_THRESHOLD = Setting.suffixKeySetting( TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME + ".disk.store.policies.took_time.threshold", (key) -> Setting.timeSetting( key, - new TimeValue(10, TimeUnit.MILLISECONDS), // Default value for this setting + DEFAULT_TOOK_TIME_DISK_THRESHOLD, TimeValue.ZERO, // Minimum value for this setting NodeScope, Setting.Property.Dynamic @@ -128,6 +152,12 @@ public class TieredSpilloverCacheSettings { */ public static final Map> TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP; + /** + * Stores took time policy settings for the disk tiers of various cache types as these are dynamic so that can be registered and + * retrieved accordingly. + */ + public static final Map> TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP; + /** * Stores disk cache enabled settings for various cache types as these are dynamic so that can be registered and * retrieved accordingly. @@ -139,9 +169,14 @@ public class TieredSpilloverCacheSettings { */ static { Map> concreteTookTimePolicySettingMap = new HashMap<>(); + Map> concreteDiskTookTimePolicySettingMap = new HashMap<>(); Map> diskCacheSettingMap = new HashMap<>(); for (CacheType cacheType : CacheType.values()) { concreteTookTimePolicySettingMap.put( + cacheType, + TIERED_SPILLOVER_TOOK_TIME_THRESHOLD.getConcreteSettingForNamespace(cacheType.getSettingPrefix()) + ); + concreteDiskTookTimePolicySettingMap.put( cacheType, TIERED_SPILLOVER_DISK_TOOK_TIME_THRESHOLD.getConcreteSettingForNamespace(cacheType.getSettingPrefix()) ); @@ -151,6 +186,7 @@ public class TieredSpilloverCacheSettings { ); } TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP = concreteTookTimePolicySettingMap; + TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP = concreteDiskTookTimePolicySettingMap; DISK_CACHE_ENABLED_SETTING_MAP = diskCacheSettingMap; } diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/policy/TookTimePolicyTests.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/policy/TookTimePolicyTests.java index 000067280e50d..535274b30f2d9 100644 --- a/modules/cache-common/src/test/java/org/opensearch/cache/common/policy/TookTimePolicyTests.java +++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/policy/TookTimePolicyTests.java @@ -11,7 +11,6 @@ import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TotalHits; -import org.opensearch.common.Randomness; import org.opensearch.common.cache.CacheType; import org.opensearch.common.cache.policy.CachedQueryResult; import org.opensearch.common.io.stream.BytesStreamOutput; @@ -20,7 +19,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.search.DocValueFormat; import org.opensearch.search.query.QuerySearchResult; import org.opensearch.test.OpenSearchTestCase; @@ -28,7 +26,7 @@ import java.io.IOException; import java.util.HashSet; -import java.util.Random; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Function; import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP; @@ -52,7 +50,12 @@ public void setup() { } private TookTimePolicy getTookTimePolicy(TimeValue threshold) { - return new TookTimePolicy<>(threshold, transformationFunction, clusterSettings, CacheType.INDICES_REQUEST_CACHE); + return new TookTimePolicy<>( + threshold, + transformationFunction, + clusterSettings, + TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE) + ); } public void testTookTimePolicy() throws Exception { @@ -75,17 +78,31 @@ public void testTookTimePolicy() throws Exception { assertTrue(longResult); } - public void testNegativeOneInput() throws Exception { - // PolicyValues with -1 took time can be passed to this policy if we shouldn't accept it for whatever reason - TookTimePolicy tookTimePolicy = getTookTimePolicy(TimeValue.ZERO); - BytesReference minusOne = getValidPolicyInput(-1L); - assertFalse(tookTimePolicy.test(minusOne)); - } - public void testInvalidThreshold() throws Exception { assertThrows(IllegalArgumentException.class, () -> getTookTimePolicy(TimeValue.MINUS_ONE)); } + public void testZeroThresholdSkipsCheck() throws Exception { + AtomicInteger numChecksRun = new AtomicInteger(); + Function dummyTransformationFunction = (data) -> { + numChecksRun.incrementAndGet(); + try { + return CachedQueryResult.getPolicyValues(data); + } catch (IOException e) { + throw new RuntimeException(e); + } + }; + TookTimePolicy policy = new TookTimePolicy<>( + TimeValue.ZERO, + dummyTransformationFunction, + clusterSettings, + TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE) + ); + BytesReference minusOne = getValidPolicyInput(-1L); + assertTrue(policy.test(minusOne)); + assertEquals(0, numChecksRun.get()); + } + private BytesReference getValidPolicyInput(Long tookTimeNanos) throws IOException { // When it's used in the cache, the policy will receive BytesReferences which come from // serializing a CachedQueryResult. @@ -109,11 +126,4 @@ private QuerySearchResult getQSR() { ); return mockQSR; } - - private void writeRandomBytes(StreamOutput out, int numBytes) throws IOException { - Random rand = Randomness.get(); - byte[] bytes = new byte[numBytes]; - rand.nextBytes(bytes); - out.writeBytes(bytes); - } } diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java index 494534ac74c9f..c74eb371709f6 100644 --- a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java +++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java @@ -24,6 +24,7 @@ import org.opensearch.common.cache.store.OpenSearchOnHeapCache; import org.opensearch.common.cache.store.config.CacheConfig; import org.opensearch.common.cache.store.settings.OpenSearchOnHeapCacheSettings; +import org.opensearch.common.collect.Tuple; import org.opensearch.common.metrics.CounterMetric; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; @@ -57,10 +58,12 @@ import java.util.function.Predicate; import static org.opensearch.cache.common.tier.TieredSpilloverCache.ZERO_SEGMENT_COUNT_EXCEPTION_MESSAGE; +import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.DEFAULT_TOOK_TIME_DISK_THRESHOLD; import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.DISK_CACHE_ENABLED_SETTING_MAP; import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.MIN_DISK_CACHE_SIZE_IN_BYTES; import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TIERED_SPILLOVER_ONHEAP_STORE_SIZE; import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TIERED_SPILLOVER_SEGMENTS; +import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP; import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP; import static org.opensearch.cache.common.tier.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_NAME; import static org.opensearch.cache.common.tier.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_VALUE_DISK; @@ -83,6 +86,7 @@ public void setup() { Settings settings = Settings.EMPTY; clusterSettings = new ClusterSettings(settings, new HashSet<>()); clusterSettings.registerSetting(TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE)); + clusterSettings.registerSetting(TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE)); clusterSettings.registerSetting(DISK_CACHE_ENABLED_SETTING_MAP.get(CacheType.INDICES_REQUEST_CACHE)); } @@ -191,8 +195,8 @@ public void testComputeIfAbsentWithFactoryBasedCacheCreation() throws Exception .setValueSerializer(new StringSerializer()) .setSettings(settings) .setDimensionNames(dimensionNames) - .setCachedResultParser(s -> new CachedQueryResult.PolicyValues(20_000_000L)) // Values will always appear to have taken - // 20_000_000 ns = 20 ms to compute + // Values will always appear to have taken 2x the took time threshold to compute, so they will be admitted + .setCachedResultParser(s -> new CachedQueryResult.PolicyValues(DEFAULT_TOOK_TIME_DISK_THRESHOLD.getNanos() * 2)) .setClusterSettings(clusterSettings) .setStoragePath(storagePath) .build(), @@ -291,8 +295,8 @@ public void testComputeIfAbsentWithSegmentedCache() throws Exception { .setValueSerializer(new StringSerializer()) .setSettings(settings) .setDimensionNames(dimensionNames) - .setCachedResultParser(s -> new CachedQueryResult.PolicyValues(20_000_000L)) // Values will always appear to have taken - // 20_000_000 ns = 20 ms to compute + // Values will always appear to have taken 2x the took time threshold to compute, so they will be admitted + .setCachedResultParser(s -> new CachedQueryResult.PolicyValues(DEFAULT_TOOK_TIME_DISK_THRESHOLD.getNanos() * 2)) .setClusterSettings(clusterSettings) .setStoragePath(storagePath) .setSegmentCount(numberOfSegments) @@ -1155,6 +1159,7 @@ public void testComputeIfAbsentWithOnHeapCacheThrowingExceptionOnPut() throws Ex mockDiskCacheFactory, cacheConfig, null, + null, removalListener, 1, onHeapCacheSize * keyValueSize, @@ -1202,6 +1207,7 @@ public void testComputeIfAbsentWithDiskCacheThrowingExceptionOnPut() throws Exce mockDiskCacheFactory, cacheConfig, null, + null, removalListener, 1, onHeapCacheSize * keyValueSize, @@ -1356,14 +1362,13 @@ public void testConcurrencyForEvictionFlowFromOnHeapToDiskTier() throws Exceptio } public void testDiskTierPolicies() throws Exception { - // For policy function, allow if what it receives starts with "a" and string is even length - ArrayList> policies = new ArrayList<>(); - policies.add(new AllowFirstLetterA()); - policies.add(new AllowEvenLengths()); + // For disk policy function, allow if what it receives starts with "a" and string is even length + Tuple>, Map>> setupTuple = setupPoliciesTest(); + List> diskPolicies = setupTuple.v1(); int keyValueSize = 50; MockCacheRemovalListener removalListener = new MockCacheRemovalListener<>(); - TieredSpilloverCache tieredSpilloverCache = intializeTieredSpilloverCache( + TieredSpilloverCache tieredSpilloverCache = initializeTieredSpilloverCache( keyValueSize, keyValueSize * 100, removalListener, @@ -1376,35 +1381,22 @@ public void testDiskTierPolicies() throws Exception { ) .build(), 0, - policies, + diskPolicies, 1 ); - Map keyValuePairs = new HashMap<>(); - Map expectedOutputs = new HashMap<>(); - keyValuePairs.put("key1", "abcd"); - expectedOutputs.put("key1", true); - keyValuePairs.put("key2", "abcde"); - expectedOutputs.put("key2", false); - keyValuePairs.put("key3", "bbc"); - expectedOutputs.put("key3", false); - keyValuePairs.put("key4", "ab"); - expectedOutputs.put("key4", true); - keyValuePairs.put("key5", ""); - expectedOutputs.put("key5", false); - + Map> keyValuePairs = setupTuple.v2(); LoadAwareCacheLoader, String> loader = getLoadAwareCacheLoader(keyValuePairs); - int expectedEvictions = 0; for (String key : keyValuePairs.keySet()) { ICacheKey iCacheKey = getICacheKey(key); - Boolean expectedOutput = expectedOutputs.get(key); + Boolean expectedOutput = keyValuePairs.get(key).v2(); String value = tieredSpilloverCache.computeIfAbsent(iCacheKey, loader); - assertEquals(keyValuePairs.get(key), value); + assertEquals(keyValuePairs.get(key).v1(), value); String result = tieredSpilloverCache.get(iCacheKey); if (expectedOutput) { // Should retrieve from disk tier if it was accepted - assertEquals(keyValuePairs.get(key), result); + assertEquals(keyValuePairs.get(key).v1(), result); } else { // Should miss as heap tier size = 0 and the policy rejected it assertNull(result); @@ -1419,19 +1411,70 @@ public void testDiskTierPolicies() throws Exception { assertEquals(expectedEvictions, getTotalStatsSnapshot(tieredSpilloverCache).getEvictions()); } - public void testTookTimePolicyFromFactory() throws Exception { + private Tuple>, Map>> setupPoliciesTest() { + ArrayList> policies = new ArrayList<>(); + policies.add(new AllowFirstLetterA()); + policies.add(new AllowEvenLengths()); + + // Map from key to tuple of (value, whether we expect it to be admitted by policy) + Map> keyValuePairs = new HashMap<>(); + keyValuePairs.put("key1", new Tuple<>("abcd", true)); + keyValuePairs.put("key2", new Tuple<>("abcde", false)); + keyValuePairs.put("key3", new Tuple<>("bbc", false)); + keyValuePairs.put("key4", new Tuple<>("ab", true)); + keyValuePairs.put("key5", new Tuple<>("", false)); + return new Tuple<>(policies, keyValuePairs); + } + + public void testTookTimePoliciesFromFactory() throws Exception { // Mock took time by passing this map to the policy info wrapper fn // The policy inspects values, not keys, so this is a map from values -> took time + + long cacheThresholdNanos = 2_000_000L; + long diskThresholdNanos = 11_000_000L; Map tookTimeMap = new HashMap<>(); - tookTimeMap.put("a", 10_000_000L); + tookTimeMap.put("a", diskThresholdNanos); tookTimeMap.put("b", 0L); - tookTimeMap.put("c", 99_999_999L); + tookTimeMap.put("c", diskThresholdNanos * 3); tookTimeMap.put("d", null); tookTimeMap.put("e", -1L); - tookTimeMap.put("f", 8_888_888L); - long timeValueThresholdNanos = 10_000_000L; - - Map keyValueMap = Map.of("A", "a", "B", "b", "C", "c", "D", "d", "E", "e", "F", "f"); + tookTimeMap.put("f", cacheThresholdNanos * 2); + tookTimeMap.put("g", cacheThresholdNanos - 1); + assertTrue(cacheThresholdNanos * 2 < diskThresholdNanos); + + Map keyValueMap = Map.of("A", "a", "B", "b", "C", "c", "D", "d", "E", "e", "F", "f", "G", "g"); + Map expectedInHeapTierMap = Map.of( + "A", + true, + "B", + false, + "C", + true, + "D", + false, + "E", + false, + "F", + true, + "G", + false + ); + Map expectedInDiskTierMap = Map.of( + "A", + true, + "B", + false, + "C", + true, + "D", + false, + "E", + false, + "F", + false, + "G", + false + ); // Most of setup duplicated from testComputeIfAbsentWithFactoryBasedCacheCreation() int onHeapCacheSize = randomIntBetween(tookTimeMap.size() + 1, tookTimeMap.size() + 30); @@ -1460,10 +1503,9 @@ public void testTookTimePolicyFromFactory() throws Exception { ).getKey(), onHeapCacheSize * keyValueSize + "b" ) - .put( - TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), - new TimeValue(timeValueThresholdNanos / 1_000_000) - ) + // Initialize the settings to some other value, so we can demonstrate the updating logic works correctly. + .put(TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), TimeValue.ZERO) + .put(TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), TimeValue.ZERO) .put(TIERED_SPILLOVER_SEGMENTS.getConcreteSettingForNamespace(CacheType.INDICES_REQUEST_CACHE.getSettingPrefix()).getKey(), 1) .build(); @@ -1497,28 +1539,57 @@ public CachedQueryResult.PolicyValues apply(String s) { TieredSpilloverCache tieredSpilloverCache = (TieredSpilloverCache) tieredSpilloverICache; - // First add all our values to the on heap cache - for (String key : tookTimeMap.keySet()) { - tieredSpilloverCache.computeIfAbsent(getICacheKey(key), getLoadAwareCacheLoader(keyValueMap)); + // Change setting values to the target values to show both updates work as expected. + clusterSettings.applySettings( + Settings.builder() + .put( + TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), + new TimeValue(cacheThresholdNanos / 1_000_000) + ) + .put( + TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), + new TimeValue(diskThresholdNanos / 1_000_000) + ) + .build() + ); + + Map> loaderMap = new HashMap<>(); + for (String key : keyValueMap.keySet()) { + // The boolean here is not needed, just to fit with the get loader method + loaderMap.put(key, new Tuple<>(keyValueMap.get(key), false)); + } + LoadAwareCacheLoader, String> loader = getLoadAwareCacheLoader(loaderMap); + // First check whether keys respect the heap tier threshold. + int expectedKeys = 0; + for (String key : keyValueMap.keySet()) { + tieredSpilloverCache.computeIfAbsent(getICacheKey(key), loader); + if (expectedInHeapTierMap.get(key)) { + expectedKeys++; + } } - assertEquals(tookTimeMap.size(), tieredSpilloverCache.count()); + assertEquals(0, removalListener.evictionsMetric.count()); + assertEquals(0, getHitsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); + assertEquals(expectedKeys, tieredSpilloverCache.count()); - // Ensure all these keys get evicted from the on heap tier by adding > heap tier size worth of random keys (this works as we have 1 - // segment) + // Ensure all these keys get evicted from the on heap tier by adding > heap tier size worth of random keys + // (this works as we have 1 segment). Set heap threshold to 0 to ensure random keys can all enter + clusterSettings.applySettings( + Settings.builder() + .put(TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), TimeValue.ZERO) + .build() + ); for (int i = 0; i < onHeapCacheSize; i++) { - tieredSpilloverCache.computeIfAbsent(getICacheKey(UUID.randomUUID().toString()), getLoadAwareCacheLoader(keyValueMap)); + tieredSpilloverCache.computeIfAbsent(getICacheKey(UUID.randomUUID().toString()), getLoadAwareCacheLoader()); } - for (String key : tookTimeMap.keySet()) { + for (String key : keyValueMap.keySet()) { ICacheKey iCacheKey = getICacheKey(key); assertNull(tieredSpilloverCache.getTieredCacheSegment(iCacheKey).getOnHeapCache().get(iCacheKey)); } // Now the original keys should be in the disk tier if the policy allows them, or misses if not - for (String key : tookTimeMap.keySet()) { + for (String key : keyValueMap.keySet()) { String computedValue = tieredSpilloverCache.get(getICacheKey(key)); - String mapValue = keyValueMap.get(key); - Long tookTime = tookTimeMap.get(mapValue); - if (tookTime != null && tookTime > timeValueThresholdNanos) { + if (expectedInDiskTierMap.get(key)) { // expect a hit assertNotNull(computedValue); } else { @@ -1543,6 +1614,139 @@ public void testMinimumThresholdSettingValue() throws Exception { assertEquals(validDuration, concreteSetting.get(validSettings)); } + public void testEntryPoliciesWithPut() throws Exception { + Tuple>, Map>> setupTuple = setupPoliciesTest(); + List> policies = setupTuple.v1(); + Map> keyValuePairs = setupTuple.v2(); + + int keyValueSize = 50; + MockCacheRemovalListener removalListener = new MockCacheRemovalListener<>(); + TieredSpilloverCache tieredSpilloverCache = initializeTieredSpilloverCache( + keyValueSize, + keyValueSize * 100, + removalListener, + Settings.builder() + .put( + TieredSpilloverCacheSettings.TIERED_SPILLOVER_ONHEAP_STORE_SIZE.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ).getKey(), + keyValueSize * keyValuePairs.size() + 1 + "b" + ) + .build(), + 0, + policies, + null, + 1 + ); + + int expectedKeys = 0; + for (String key : keyValuePairs.keySet()) { + ICacheKey iCacheKey = getICacheKey(key); + tieredSpilloverCache.put(iCacheKey, keyValuePairs.get(key).v1()); + Boolean expectedOutput = keyValuePairs.get(key).v2(); + String result = tieredSpilloverCache.get(iCacheKey); + if (expectedOutput) { + // Should retrieve from heap tier if it was accepted + assertEquals(keyValuePairs.get(key).v1(), result); + expectedKeys++; + } else { + // Should miss as the policy rejected it + assertNull(result); + } + } + + assertEquals(0, getEvictionsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); + assertEquals(expectedKeys, getTotalStatsSnapshot(tieredSpilloverCache).getItems()); + assertEquals(0, removalListener.evictionsMetric.count()); + } + + public void testEntryPoliciesConcurrentlyWithComputeIfAbsent() throws Exception { + Tuple>, Map>> setupTuple = setupPoliciesTest(); + List> policies = setupTuple.v1(); + Map> keyValuePairs = setupTuple.v2(); + + int keyValueSize = 50; + MockCacheRemovalListener removalListener = new MockCacheRemovalListener<>(); + LoadAwareCacheLoader, String> loader = getLoadAwareCacheLoader(keyValuePairs); + TieredSpilloverCache tieredSpilloverCache = initializeTieredSpilloverCache( + keyValueSize, + keyValueSize * 100, + removalListener, + Settings.builder() + .put( + TieredSpilloverCacheSettings.TIERED_SPILLOVER_ONHEAP_STORE_SIZE.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ).getKey(), + keyValueSize * keyValuePairs.size() + 1 + "b" + ) + .build(), + 0, + policies, + null, + 1 + ); + + // To test concurrently, run for each key multiple times in parallel threads + int numRepetitionsPerKey = 10; + int numThreads = keyValuePairs.size() * numRepetitionsPerKey; + + Thread[] threads = new Thread[numThreads]; + Phaser phaser = new Phaser(numThreads + 1); + CountDownLatch countDownLatch = new CountDownLatch(numThreads); + + // Get number of keys we expect to enter the cache + int expectedKeys = 0; + for (String key : keyValuePairs.keySet()) { + Boolean expectedOutput = keyValuePairs.get(key).v2(); + if (expectedOutput) { + expectedKeys++; + } + } + + int threadNumber = 0; + for (String key : keyValuePairs.keySet()) { + for (int j = 0; j < numRepetitionsPerKey; j++) { + threads[threadNumber] = new Thread(() -> { + try { + phaser.arriveAndAwaitAdvance(); + ICacheKey iCacheKey = getICacheKey(key); + tieredSpilloverCache.computeIfAbsent(iCacheKey, loader); + } catch (Exception ignored) {} finally { + countDownLatch.countDown(); + } + }); + threads[threadNumber].start(); + threadNumber++; + } + } + phaser.arriveAndAwaitAdvance(); + countDownLatch.await(); + + assertEquals(0, getEvictionsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); + assertEquals(expectedKeys, getItemsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); + // We should have (numRepetitionsPerKey - 1) * (expectedKeys) hits + assertEquals((numRepetitionsPerKey - 1) * expectedKeys, getHitsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); + // We should have 1 miss for each accepted key. Rejected keys should not cause misses. + assertEquals(expectedKeys, getMissesForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); + + for (String key : keyValuePairs.keySet()) { + ICacheKey iCacheKey = getICacheKey(key); + String result = tieredSpilloverCache.get(iCacheKey); + Boolean expectedInCache = keyValuePairs.get(key).v2(); + if (expectedInCache) { + // Should retrieve from heap tier if it was accepted + assertEquals(keyValuePairs.get(key).v1(), result); + } else { + // Should miss as the policy rejected it + assertNull(result); + } + } + + assertEquals(0, getEvictionsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); + assertEquals(expectedKeys, getTotalStatsSnapshot(tieredSpilloverCache).getItems()); + assertEquals(0, removalListener.evictionsMetric.count()); + } + public void testPutWithDiskCacheDisabledSetting() throws Exception { int onHeapCacheSize = randomIntBetween(10, 30); int diskCacheSize = randomIntBetween(300, 500); @@ -1972,8 +2176,8 @@ public void testWithInvalidSegmentNumber() throws Exception { .setValueSerializer(new StringSerializer()) .setSettings(settings) .setDimensionNames(dimensionNames) - .setCachedResultParser(s -> new CachedQueryResult.PolicyValues(20_000_000L)) // Values will always appear to have taken - // 20_000_000 ns = 20 ms to compute + // Values will always appear to have taken 2x the took time threshold to compute, so they will be admitted + .setCachedResultParser(s -> new CachedQueryResult.PolicyValues(DEFAULT_TOOK_TIME_DISK_THRESHOLD.getNanos() * 2)) .setClusterSettings(clusterSettings) .setStoragePath(storagePath) .build(), @@ -2037,8 +2241,8 @@ public void testWithVeryLowDiskCacheSize() throws Exception { .setValueSerializer(new StringSerializer()) .setSettings(settings) .setDimensionNames(dimensionNames) - .setCachedResultParser(s -> new CachedQueryResult.PolicyValues(20_000_000L)) // Values will always appear to have taken - // 20_000_000 ns = 20 ms to compute + // Values will always appear to have taken 2x the took time threshold to compute, so they will be admitted + .setCachedResultParser(s -> new CachedQueryResult.PolicyValues(DEFAULT_TOOK_TIME_DISK_THRESHOLD.getNanos() * 2)) .setClusterSettings(clusterSettings) .setStoragePath(storagePath) .build(), @@ -2096,8 +2300,8 @@ public void testTieredCacheDefaultSegmentCount() { .setValueSerializer(new StringSerializer()) .setSettings(settings) .setDimensionNames(dimensionNames) - .setCachedResultParser(s -> new CachedQueryResult.PolicyValues(20_000_000L)) // Values will always appear to have taken - // 20_000_000 ns = 20 ms to compute + // Values will always appear to have taken 2x the took time threshold to compute, so they will be admitted + .setCachedResultParser(s -> new CachedQueryResult.PolicyValues(DEFAULT_TOOK_TIME_DISK_THRESHOLD.getNanos() * 2)) .setClusterSettings(clusterSettings) .setStoragePath(storagePath) .build(), @@ -2234,8 +2438,8 @@ public void testSegmentSizesWhenUsingFactory() { .setValueSerializer(new StringSerializer()) .setSettings(settings) .setDimensionNames(dimensionNames) - .setCachedResultParser(s -> new CachedQueryResult.PolicyValues(20_000_000L)) // Values will always appear to have taken - // 20_000_000 ns = 20 ms to compute + // Values will always appear to have taken 2x the took time threshold to compute, so they will be admitted + .setCachedResultParser(s -> new CachedQueryResult.PolicyValues(DEFAULT_TOOK_TIME_DISK_THRESHOLD.getNanos() * 2)) .setClusterSettings(clusterSettings) .setStoragePath(storagePath) .build(), @@ -2281,6 +2485,7 @@ public void testSegmentSizesWhenNotUsingFactory() { new MockDiskCache.MockDiskCacheFactory(0, diskSizeFromImplSetting, true, keyValueSize), cacheConfig, null, + null, removalListener, numSegments, expectedHeapSize, @@ -2351,14 +2556,14 @@ public boolean isLoaded() { }; } - private LoadAwareCacheLoader, String> getLoadAwareCacheLoader(Map keyValueMap) { + private LoadAwareCacheLoader, String> getLoadAwareCacheLoader(Map> keyValueMap) { return new LoadAwareCacheLoader<>() { boolean isLoaded = false; @Override public String load(ICacheKey key) { isLoaded = true; - String mapValue = keyValueMap.get(key.key); + String mapValue = keyValueMap.get(key.key).v1(); if (mapValue == null) { mapValue = UUID.randomUUID().toString(); } @@ -2377,6 +2582,7 @@ private TieredSpilloverCache getTieredSpilloverCache( ICache.Factory mockDiskCacheFactory, CacheConfig cacheConfig, List> policies, + List> diskPolicies, RemovalListener, String> removalListener, int numberOfSegments, long onHeapCacheSizeInBytes, @@ -2393,7 +2599,14 @@ private TieredSpilloverCache getTieredSpilloverCache( .setOnHeapCacheSizeInBytes(onHeapCacheSizeInBytes) .setCacheConfig(cacheConfig); if (policies != null) { - builder.addPolicies(policies); + for (Predicate policy : policies) { + builder.addPolicy(policy); + } + } + if (diskPolicies != null) { + for (Predicate diskPolicy : diskPolicies) { + builder.addDiskPolicy(diskPolicy); + } } return builder.build(); } @@ -2406,7 +2619,7 @@ private TieredSpilloverCache initializeTieredSpilloverCache( long diskDeliberateDelay ) { - return intializeTieredSpilloverCache(keyValueSize, diskCacheSize, removalListener, settings, diskDeliberateDelay, null, 256); + return initializeTieredSpilloverCache(keyValueSize, diskCacheSize, removalListener, settings, diskDeliberateDelay, null, 256); } private TieredSpilloverCache initializeTieredSpilloverCache( @@ -2418,7 +2631,7 @@ private TieredSpilloverCache initializeTieredSpilloverCache( int numberOfSegments ) { - return intializeTieredSpilloverCache( + return initializeTieredSpilloverCache( keyValueSize, diskCacheSize, removalListener, @@ -2429,13 +2642,35 @@ private TieredSpilloverCache initializeTieredSpilloverCache( ); } - private TieredSpilloverCache intializeTieredSpilloverCache( + private TieredSpilloverCache initializeTieredSpilloverCache( + int keyValueSize, + int diskCacheSize, + RemovalListener, String> removalListener, + Settings settings, + long diskDeliberateDelay, + List> diskPolicies, + int numberOfSegments + ) { + return initializeTieredSpilloverCache( + keyValueSize, + diskCacheSize, + removalListener, + settings, + diskDeliberateDelay, + new ArrayList>(), + diskPolicies, + numberOfSegments + ); + } + + private TieredSpilloverCache initializeTieredSpilloverCache( int keyValueSize, int diskCacheSize, RemovalListener, String> removalListener, Settings settings, long diskDeliberateDelay, List> policies, + List> diskPolicies, int numberOfSegments ) { ICache.Factory onHeapCacheFactory = new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory(); @@ -2481,6 +2716,7 @@ private TieredSpilloverCache intializeTieredSpilloverCache( mockDiskCacheFactory, cacheConfig, policies, + diskPolicies, removalListener, numberOfSegments, onHeapCacheSizeInBytes, From bd6e2a7e99236645aaddae6ff4858265bc0e0a99 Mon Sep 17 00:00:00 2001 From: Rishabh Singh Date: Thu, 27 Feb 2025 14:58:21 -0800 Subject: [PATCH 037/550] Refresh benchmark configs to use 3.0.0-alpha1 version (#17476) Signed-off-by: Rishabh Singh --- .github/benchmark-configs.json | 121 +++---------------- .github/workflows/benchmark-pull-request.yml | 2 +- 2 files changed, 19 insertions(+), 104 deletions(-) diff --git a/.github/benchmark-configs.json b/.github/benchmark-configs.json index 1c80f5048a611..17644c067ac98 100644 --- a/.github/benchmark-configs.json +++ b/.github/benchmark-configs.json @@ -2,7 +2,7 @@ "name": "Cluster and opensearch-benchmark configurations", "id_1": { "description": "Indexing only configuration for NYC_TAXIS workload", - "supported_major_versions": ["2", "3"], + "supported_major_versions": ["3"], "cluster-benchmark-configs": { "SINGLE_NODE_CLUSTER": "true", "MIN_DISTRIBUTION": "true", @@ -19,7 +19,7 @@ }, "id_2": { "description": "Indexing only configuration for HTTP_LOGS workload", - "supported_major_versions": ["2", "3"], + "supported_major_versions": ["3"], "cluster-benchmark-configs": { "SINGLE_NODE_CLUSTER": "true", "MIN_DISTRIBUTION": "true", @@ -41,7 +41,7 @@ "SINGLE_NODE_CLUSTER": "true", "MIN_DISTRIBUTION": "true", "TEST_WORKLOAD": "nyc_taxis", - "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-300\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-300\",\"snapshot_name\":\"nyc_taxis_1_shard\"}", + "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-3x\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-3x\",\"snapshot_name\":\"nyc_taxis_1_shard\"}", "CAPTURE_NODE_STAT": "true", "TEST_PROCEDURE": "restore-from-snapshot" }, @@ -52,81 +52,13 @@ "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" }, "id_4": { - "description": "Search only test-procedure for HTTP_LOGS, uses snapshot to restore the data for OS-3.0.0", - "supported_major_versions": ["3"], - "cluster-benchmark-configs": { - "SINGLE_NODE_CLUSTER": "true", - "MIN_DISTRIBUTION": "true", - "TEST_WORKLOAD": "http_logs", - "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-300\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-300\",\"snapshot_name\":\"http_logs_1_shard\"}", - "CAPTURE_NODE_STAT": "true", - "TEST_PROCEDURE": "restore-from-snapshot" - }, - "cluster_configuration": { - "size": "Single-Node", - "data_instance_config": "4vCPU, 32G Mem, 16G Heap" - }, - "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" - }, - "id_5": { "description": "Search only test-procedure for big5, uses snapshot to restore the data for OS-3.0.0", "supported_major_versions": ["3"], "cluster-benchmark-configs": { "SINGLE_NODE_CLUSTER": "true", "MIN_DISTRIBUTION": "true", "TEST_WORKLOAD": "big5", - "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-300\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-300\",\"snapshot_name\":\"big5_1_shard_ordered\"}", - "CAPTURE_NODE_STAT": "true", - "TEST_PROCEDURE": "restore-from-snapshot" - }, - "cluster_configuration": { - "size": "Single-Node", - "data_instance_config": "4vCPU, 32G Mem, 16G Heap" - }, - "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" - }, - "id_6": { - "description": "Search only test-procedure for NYC_TAXIS, uses snapshot to restore the data for OS-2.x", - "supported_major_versions": ["2"], - "cluster-benchmark-configs": { - "SINGLE_NODE_CLUSTER": "true", - "MIN_DISTRIBUTION": "true", - "TEST_WORKLOAD": "nyc_taxis", - "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots\",\"snapshot_name\":\"nyc_taxis_1_shard\"}", - "CAPTURE_NODE_STAT": "true", - "TEST_PROCEDURE": "restore-from-snapshot" - }, - "cluster_configuration": { - "size": "Single-Node", - "data_instance_config": "4vCPU, 32G Mem, 16G Heap" - }, - "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" - }, - "id_7": { - "description": "Search only test-procedure for HTTP_LOGS, uses snapshot to restore the data for OS-2.x", - "supported_major_versions": ["2"], - "cluster-benchmark-configs": { - "SINGLE_NODE_CLUSTER": "true", - "MIN_DISTRIBUTION": "true", - "TEST_WORKLOAD": "http_logs", - "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots\",\"snapshot_name\":\"http_logs_1_shard\"}", - "CAPTURE_NODE_STAT": "true", - "TEST_PROCEDURE": "restore-from-snapshot" - }, - "cluster_configuration": { - "size": "Single-Node", - "data_instance_config": "4vCPU, 32G Mem, 16G Heap" - }, - "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" - }, - "id_8": { - "description": "Search only test-procedure for big5, uses snapshot to restore the data for OS-2.x", - "supported_major_versions": ["2"], - "cluster-benchmark-configs": { - "SINGLE_NODE_CLUSTER": "true", - "MIN_DISTRIBUTION": "true", - "TEST_WORKLOAD": "big5", - "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots\",\"snapshot_name\":\"big5_1_shard_ordered\"}", + "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-3x\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-3x\",\"snapshot_name\":\"big5_1_shard_single_client\"}", "CAPTURE_NODE_STAT": "true", "TEST_PROCEDURE": "restore-from-snapshot" }, @@ -136,9 +68,9 @@ }, "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" }, - "id_9": { + "id_5": { "description": "Indexing and search configuration for pmc workload", - "supported_major_versions": ["2", "3"], + "supported_major_versions": ["3"], "cluster-benchmark-configs": { "SINGLE_NODE_CLUSTER": "true", "MIN_DISTRIBUTION": "true", @@ -152,9 +84,9 @@ }, "baseline_cluster_config": "x64-r5.xlarge-single-node-1-shard-0-replica-baseline" }, - "id_10": { + "id_6": { "description": "Indexing only configuration for stack-overflow workload", - "supported_major_versions": ["2", "3"], + "supported_major_versions": ["3"], "cluster-benchmark-configs": { "SINGLE_NODE_CLUSTER": "true", "MIN_DISTRIBUTION": "true", @@ -168,7 +100,7 @@ }, "baseline_cluster_config": "x64-r5.xlarge-single-node-1-shard-0-replica-baseline" }, - "id_11": { + "id_7": { "description": "Search only test-procedure for big5 with concurrent segment search setting enabled", "supported_major_versions": ["3"], "cluster-benchmark-configs": { @@ -176,7 +108,7 @@ "MIN_DISTRIBUTION": "true", "TEST_WORKLOAD": "big5", "ADDITIONAL_CONFIG": "search.concurrent_segment_search.enabled:true", - "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-300\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-300\",\"snapshot_name\":\"big5_1_shard_ordered\"}", + "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-3x\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-3x\",\"snapshot_name\":\"big5_1_shard_single_client\"}", "CAPTURE_NODE_STAT": "true", "TEST_PROCEDURE": "restore-from-snapshot" }, @@ -186,7 +118,7 @@ }, "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" }, - "id_12": { + "id_8": { "description": "Search only test-procedure for big5 with concurrent segment search mode as all", "supported_major_versions": ["3"], "cluster-benchmark-configs": { @@ -194,7 +126,7 @@ "MIN_DISTRIBUTION": "true", "TEST_WORKLOAD": "big5", "ADDITIONAL_CONFIG": "search.concurrent_segment_search.mode:all", - "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-300\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-300\",\"snapshot_name\":\"big5_1_shard_ordered\"}", + "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-3x\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-3x\",\"snapshot_name\":\"big5_1_shard_single_client\"}", "CAPTURE_NODE_STAT": "true", "TEST_PROCEDURE": "restore-from-snapshot" }, @@ -204,7 +136,7 @@ }, "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" }, - "id_13": { + "id_9": { "description": "Search only test-procedure for big5 with concurrent segment search mode as auto", "supported_major_versions": ["3"], "cluster-benchmark-configs": { @@ -212,7 +144,7 @@ "MIN_DISTRIBUTION": "true", "TEST_WORKLOAD": "big5", "ADDITIONAL_CONFIG": "search.concurrent_segment_search.mode:auto", - "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-300\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-300\",\"snapshot_name\":\"big5_1_shard_ordered\"}", + "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-3x\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-3x\",\"snapshot_name\":\"big5_1_shard_single_client\"}", "CAPTURE_NODE_STAT": "true", "TEST_PROCEDURE": "restore-from-snapshot" }, @@ -222,7 +154,7 @@ }, "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" }, - "id_14": { + "id_10": { "description": "Search only test-procedure for big5, uses snapshot to restore the data for OS-3.0.0. Enables range query approximation.", "supported_major_versions": ["3"], "cluster-benchmark-configs": { @@ -230,23 +162,6 @@ "MIN_DISTRIBUTION": "true", "TEST_WORKLOAD": "big5", "ADDITIONAL_CONFIG": "opensearch.experimental.feature.approximate_point_range_query.enabled:true", - "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-300\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-300\",\"snapshot_name\":\"big5_1_shard_ordered\"}", - "CAPTURE_NODE_STAT": "true", - "TEST_PROCEDURE": "restore-from-snapshot" - }, - "cluster_configuration": { - "size": "Single-Node", - "data_instance_config": "4vCPU, 32G Mem, 16G Heap" - }, - "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" - }, - "id_15": { - "description": "Search only test-procedure for big5, uses lucene-10 index snapshot to restore the data for OS-3.0.0", - "supported_major_versions": ["3"], - "cluster-benchmark-configs": { - "SINGLE_NODE_CLUSTER": "true", - "MIN_DISTRIBUTION": "true", - "TEST_WORKLOAD": "big5", "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-3x\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-3x\",\"snapshot_name\":\"big5_1_shard_single_client\"}", "CAPTURE_NODE_STAT": "true", "TEST_PROCEDURE": "restore-from-snapshot" @@ -257,9 +172,9 @@ }, "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" }, - "id_16": { + "id_11": { "description": "Benchmarking config for NESTED workload, benchmarks nested queries with inner-hits", - "supported_major_versions": ["2", "3"], + "supported_major_versions": ["3"], "cluster-benchmark-configs": { "SINGLE_NODE_CLUSTER": "true", "MIN_DISTRIBUTION": "true", @@ -273,4 +188,4 @@ }, "baseline_cluster_config": "x64-r5.xlarge-single-node-1-shard-0-replica-baseline" } - } +} \ No newline at end of file diff --git a/.github/workflows/benchmark-pull-request.yml b/.github/workflows/benchmark-pull-request.yml index e6ccc31160bf9..38e12f97d4480 100644 --- a/.github/workflows/benchmark-pull-request.yml +++ b/.github/workflows/benchmark-pull-request.yml @@ -22,7 +22,7 @@ jobs: echo "PR_NUMBER=${{ github.event.issue.number }}" >> $GITHUB_ENV echo "REPOSITORY=${{ github.event.repository.full_name }}" >> $GITHUB_ENV OPENSEARCH_VERSION=$(awk -F '=' '/^opensearch[[:space:]]*=/ {gsub(/[[:space:]]/, "", $2); print $2}' buildSrc/version.properties) - echo "OPENSEARCH_VERSION=$OPENSEARCH_VERSION" >> $GITHUB_ENV + echo "OPENSEARCH_VERSION=$OPENSEARCH_VERSION-alpha1" >> $GITHUB_ENV major_version=$(echo $OPENSEARCH_VERSION | cut -d'.' -f1) echo "OPENSEARCH_MAJOR_VERSION=$major_version" >> $GITHUB_ENV echo "USER_TAGS=pull_request_number:${{ github.event.issue.number }},repository:OpenSearch" >> $GITHUB_ENV From 0dde4da59e50835701f03b21022633aae959ef99 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Thu, 27 Feb 2025 18:55:15 -0500 Subject: [PATCH 038/550] Add 2.19.1 release notes (#17468) (#17477) Signed-off-by: Andriy Redko (cherry picked from commit 2e4741fb45d1b150aaeeadf66d41445b23ff5982) --- CHANGELOG.md | 2 -- release-notes/opensearch.release-notes-2.19.1.md | 16 ++++++++++++++++ 2 files changed, 16 insertions(+), 2 deletions(-) create mode 100644 release-notes/opensearch.release-notes-2.19.1.md diff --git a/CHANGELOG.md b/CHANGELOG.md index bd218393919bd..45fd4813e72da 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,7 +20,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.google.code.gson:gson` from 2.11.0 to 2.12.1 ([#17229](https://github.com/opensearch-project/OpenSearch/pull/17229)) - Bump `org.jruby.joni:joni` from 2.2.1 to 2.2.3 ([#17136](https://github.com/opensearch-project/OpenSearch/pull/17136)) - Bump `org.apache.ant:ant` from 1.10.14 to 1.10.15 ([#17288](https://github.com/opensearch-project/OpenSearch/pull/17288)) -- Bump netty from 4.1.117.Final to 4.1.118.Final ([#17320](https://github.com/opensearch-project/OpenSearch/pull/17320)) - Bump `reactor_netty` from 1.1.26 to 1.1.27 ([#17322](https://github.com/opensearch-project/OpenSearch/pull/17322)) - Bump `me.champeau.gradle.japicmp` from 0.4.5 to 0.4.6 ([#17375](https://github.com/opensearch-project/OpenSearch/pull/17375)) - Bump `com.google.api.grpc:proto-google-common-protos` from 2.37.1 to 2.52.0 ([#17379](https://github.com/opensearch-project/OpenSearch/pull/17379)) @@ -41,7 +40,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix exists queries on nested flat_object fields throws exception ([#16803](https://github.com/opensearch-project/OpenSearch/pull/16803)) - Add highlighting for wildcard search on `match_only_text` field ([#17101](https://github.com/opensearch-project/OpenSearch/pull/17101)) - Fix illegal argument exception when creating a PIT ([#16781](https://github.com/opensearch-project/OpenSearch/pull/16781)) -- Fix HTTP API calls that hang with 'Accept-Encoding: zstd' ([#17408](https://github.com/opensearch-project/OpenSearch/pull/17408)) ### Security diff --git a/release-notes/opensearch.release-notes-2.19.1.md b/release-notes/opensearch.release-notes-2.19.1.md new file mode 100644 index 0000000000000..81eccde2b1c30 --- /dev/null +++ b/release-notes/opensearch.release-notes-2.19.1.md @@ -0,0 +1,16 @@ +## 2025-02-27 Version 2.19.1 Release Notes + +## [2.19.1] +### Added +- Add execution_hint to cardinality aggregator request (#[17420](https://github.com/opensearch-project/OpenSearch/pull/17420)) + +### Dependencies +- Bump netty from 4.1.117.Final to 4.1.118.Final ([#17320](https://github.com/opensearch-project/OpenSearch/pull/17320)) +- Bump `jetty` version from 9.4.55.v20240627 to 9.4.57.v20241219 + +### Changed + +### Deprecated + +### Fixed +- Fix HTTP API calls that hang with 'Accept-Encoding: zstd' ([#17408](https://github.com/opensearch-project/OpenSearch/pull/17408)) From ceddbe07a3c1f2fe28c3548695528fd00497601b Mon Sep 17 00:00:00 2001 From: Rishabh Singh Date: Thu, 27 Feb 2025 15:58:54 -0800 Subject: [PATCH 039/550] add alpha1 qualifier to assemble command in benchmark workflow (#17481) Signed-off-by: Rishabh Singh --- .github/workflows/benchmark-pull-request.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/benchmark-pull-request.yml b/.github/workflows/benchmark-pull-request.yml index 38e12f97d4480..850a3310cbf6c 100644 --- a/.github/workflows/benchmark-pull-request.yml +++ b/.github/workflows/benchmark-pull-request.yml @@ -147,7 +147,7 @@ jobs: distribution: 'temurin' - name: Build and Assemble OpenSearch from PR run: | - ./gradlew :distribution:archives:linux-tar:assemble -Dbuild.snapshot=false + ./gradlew :distribution:archives:linux-tar:assemble -Dbuild.snapshot=false -Dbuild.version_qualifier=alpha1 - name: Configure AWS credentials uses: aws-actions/configure-aws-credentials@v4 with: From a961ec728859b5318a8c7f80206ff6566a954971 Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Thu, 27 Feb 2025 19:13:38 -0800 Subject: [PATCH 040/550] Add @bugmakerrrrrr as maintainerrrrrr (#17466) Following the [nomination process][1], I have nominated and other maintainers have agreed to add Pan Guixin (@bugmakerrrrrr) as a co-Maintainer of the OpenSearch repository. Pan Guixin has kindly accepted the invitation. [1]: https://github.com/opensearch-project/.github/blob/main/RESPONSIBILITIES.md#becoming-a-maintainer Signed-off-by: Andrew Ross --- .github/CODEOWNERS | 2 +- MAINTAINERS.md | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 38ce0c3a3f927..5915365677ca2 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -11,7 +11,7 @@ # 3. Use the command palette to run the CODEOWNERS: Show owners of current file command, which will display all code owners for the current file. # Default ownership for all repo files -* @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @cwperks @dblock @dbwiddis @gbbafna @jainankitk @kotwanikunal @linuxpi @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +* @anasalkouz @andrross @ashking94 @bugmakerrrrrr @Bukhtawar @CEHENKLE @cwperks @dblock @dbwiddis @gbbafna @jainankitk @kotwanikunal @linuxpi @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah /modules/lang-painless/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah /modules/parent-join/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 887ff654dff96..7906596f047d5 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -25,6 +25,7 @@ This document contains a list of maintainers in this repo. See [opensearch-proje | Michael Froh | [msfroh](https://github.com/msfroh) | Amazon | | Nick Knize | [nknize](https://github.com/nknize) | Lucenia | | Owais Kazi | [owaiskazi19](https://github.com/owaiskazi19) | Amazon | +| Pan Guixin | [bugmakerrrrrr](https://github.com/bugmakerrrrrr) | ByteDance | | Peter Nied | [peternied](https://github.com/peternied) | Amazon | | Rishikesh Pasham | [Rishikesh1159](https://github.com/Rishikesh1159) | Amazon | | Sachin Kale | [sachinpkale](https://github.com/sachinpkale) | Amazon | From 968eafbd37ef0b864e887643c74291cc3e5ca0d0 Mon Sep 17 00:00:00 2001 From: Craig Perkins Date: Fri, 28 Feb 2025 14:34:55 -0500 Subject: [PATCH 041/550] Update version to 2_19_1 for serialization of execution hint in CardinalityAggregationBuilder (#17492) Signed-off-by: Craig Perkins --- .../aggregations/metrics/CardinalityAggregationBuilder.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregationBuilder.java index f77bbfbd48461..202a6babafec7 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregationBuilder.java @@ -116,7 +116,7 @@ public CardinalityAggregationBuilder(StreamInput in) throws IOException { if (in.readBoolean()) { precisionThreshold = in.readLong(); } - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_19_1)) { executionHint = in.readOptionalString(); } } @@ -133,7 +133,7 @@ protected void innerWriteTo(StreamOutput out) throws IOException { if (hasPrecisionThreshold) { out.writeLong(precisionThreshold); } - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_19_1)) { out.writeOptionalString(executionHint); } } From bfdf019ab3c9def800b015b95c1bd5aa3a5232a1 Mon Sep 17 00:00:00 2001 From: Wenqi Gao Date: Mon, 3 Mar 2025 12:13:04 -0800 Subject: [PATCH 042/550] Add filter function for AbstractQueryBuilder, BoolQueryBuilder, ConstantScoreQueryBuilder. (#17409) (#17409) * The filter function will combine a filter with the query builder. If the query builder itself has a filter we will combine the filter and return the query builder itself. If no we will use a bool query builder to combine the query builder and the filter and then return the bool query builder. Signed-off-by: Chloe Gao --- CHANGELOG-3.0.md | 1 + .../index/query/AbstractQueryBuilder.java | 24 +++++++++++++++++++ .../index/query/BoolQueryBuilder.java | 12 ++++++---- .../query/ConstantScoreQueryBuilder.java | 16 +++++++++++++ .../opensearch/index/query/QueryBuilder.java | 12 ++++++++++ .../index/query/SpanNearQueryBuilder.java | 5 ++++ .../index/query/BoolQueryBuilderTests.java | 18 +++++++++++++- .../query/ConstantScoreQueryBuilderTests.java | 17 +++++++++++++ .../query/SpanMultiTermQueryBuilderTests.java | 5 ++++ .../test/AbstractQueryTestCase.java | 19 +++++++++++++++ 10 files changed, 123 insertions(+), 6 deletions(-) diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index 4c366d0c7714f..1a0f9280136c4 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -21,6 +21,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add execution_hint to cardinality aggregator request (#[17312](https://github.com/opensearch-project/OpenSearch/pull/17312)) - Arrow Flight RPC plugin with Flight server bootstrap logic and client for internode communication ([#16962](https://github.com/opensearch-project/OpenSearch/pull/16962)) - Added offset management for the pull-based Ingestion ([#17354](https://github.com/opensearch-project/OpenSearch/pull/17354)) +- Add filter function for AbstractQueryBuilder, BoolQueryBuilder, ConstantScoreQueryBuilder([#17409](https://github.com/opensearch-project/OpenSearch/pull/17409)) ### Dependencies - Update Apache Lucene to 10.1.0 ([#16366](https://github.com/opensearch-project/OpenSearch/pull/16366)) diff --git a/server/src/main/java/org/opensearch/index/query/AbstractQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/AbstractQueryBuilder.java index 66c6ee115c3f0..cd133798faa6d 100644 --- a/server/src/main/java/org/opensearch/index/query/AbstractQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/AbstractQueryBuilder.java @@ -86,6 +86,30 @@ protected AbstractQueryBuilder(StreamInput in) throws IOException { queryName = in.readOptionalString(); } + /** + * Check the input parameters of filter function. + * @param filter filter to combine with current query builder + * @return true if parameters are valid. Returns false when the filter is null. + */ + public static boolean validateFilterParams(QueryBuilder filter) { + return filter != null; + } + + /** + * Combine filter with current query builder + * @param filter filter to combine with current query builder + * @return query builder with filter combined + */ + public QueryBuilder filter(QueryBuilder filter) { + if (validateFilterParams(filter) == false) { + return this; + } + final BoolQueryBuilder modifiedQB = new BoolQueryBuilder(); + modifiedQB.must(this); + modifiedQB.filter(filter); + return modifiedQB; + } + @Override public final void writeTo(StreamOutput out) throws IOException { out.writeFloat(boost); diff --git a/server/src/main/java/org/opensearch/index/query/BoolQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/BoolQueryBuilder.java index c44a7ef6a397c..58009f055650b 100644 --- a/server/src/main/java/org/opensearch/index/query/BoolQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/BoolQueryBuilder.java @@ -135,13 +135,15 @@ public List must() { /** * Adds a query that must appear in the matching documents but will - * not contribute to scoring. No {@code null} value allowed. + * not contribute to scoring. If null value passed, then do nothing and return. + * @param filter the filter to add to the current ConstantScoreQuery + * @return query builder with filter combined */ - public BoolQueryBuilder filter(QueryBuilder queryBuilder) { - if (queryBuilder == null) { - throw new IllegalArgumentException("inner bool query clause cannot be null"); + public BoolQueryBuilder filter(QueryBuilder filter) { + if (validateFilterParams(filter) == false) { + return this; } - filterClauses.add(queryBuilder); + filterClauses.add(filter); return this; } diff --git a/server/src/main/java/org/opensearch/index/query/ConstantScoreQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/ConstantScoreQueryBuilder.java index b2764d29da80a..b74224cd5ef22 100644 --- a/server/src/main/java/org/opensearch/index/query/ConstantScoreQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/ConstantScoreQueryBuilder.java @@ -101,6 +101,22 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep builder.endObject(); } + /** + * Adds a filter to the current ConstantScoreQuery. + * @param filter the filter to add to the current ConstantScoreQuery + * @return query builder with filter combined + */ + public ConstantScoreQueryBuilder filter(QueryBuilder filter) { + if (validateFilterParams(filter) == false) { + return this; + } + QueryBuilder filteredFilterBuilder = filterBuilder.filter(filter); + if (filteredFilterBuilder != filterBuilder) { + return new ConstantScoreQueryBuilder(filteredFilterBuilder); + } + return this; + } + public static ConstantScoreQueryBuilder fromXContent(XContentParser parser) throws IOException { QueryBuilder query = null; boolean queryFound = false; diff --git a/server/src/main/java/org/opensearch/index/query/QueryBuilder.java b/server/src/main/java/org/opensearch/index/query/QueryBuilder.java index 0cdf7f31c2ebf..f52b393202d28 100644 --- a/server/src/main/java/org/opensearch/index/query/QueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/QueryBuilder.java @@ -47,6 +47,18 @@ @PublicApi(since = "1.0.0") public interface QueryBuilder extends NamedWriteable, ToXContentObject, Rewriteable { + /** + * This function combines a filter with a query builder. If the query builder itself has + * a filter we will combine the filter and return the query builder itself. + * If not we will use a bool query builder to combine the query builder and + * the filter and then return the bool query builder. + * If the filter is null we simply return the query builder without any operation. + * + * @param filter The null filter to be added to the existing filter. + * @return A QueryBuilder with the filter added to the existing filter. + */ + QueryBuilder filter(QueryBuilder filter); + /** * Converts this QueryBuilder to a lucene {@link Query}. * Returns {@code null} if this query should be ignored in the context of diff --git a/server/src/main/java/org/opensearch/index/query/SpanNearQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/SpanNearQueryBuilder.java index 179673f500a92..2912a5cb09276 100644 --- a/server/src/main/java/org/opensearch/index/query/SpanNearQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/SpanNearQueryBuilder.java @@ -375,6 +375,11 @@ public Query toQuery(QueryShardContext context) throws IOException { throw new UnsupportedOperationException(); } + @Override + public QueryBuilder filter(QueryBuilder filter) { + throw new UnsupportedOperationException("You can't add a filter to a SpanGapQueryBuilder"); + } + @Override public String queryName() { throw new UnsupportedOperationException(); diff --git a/server/src/test/java/org/opensearch/index/query/BoolQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/BoolQueryBuilderTests.java index a23dff39c6496..f3de666c52932 100644 --- a/server/src/test/java/org/opensearch/index/query/BoolQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/BoolQueryBuilderTests.java @@ -178,7 +178,6 @@ public void testIllegalArguments() { BoolQueryBuilder booleanQuery = new BoolQueryBuilder(); expectThrows(IllegalArgumentException.class, () -> booleanQuery.must(null)); expectThrows(IllegalArgumentException.class, () -> booleanQuery.mustNot(null)); - expectThrows(IllegalArgumentException.class, () -> booleanQuery.filter(null)); expectThrows(IllegalArgumentException.class, () -> booleanQuery.should(null)); } @@ -326,6 +325,23 @@ public void testFilterNull() throws IOException { assertTrue(builder.filter().isEmpty()); } + /** + * Check if a filter can be applied to the BoolQuery + * @throws IOException + */ + public void testFilter() throws IOException { + // Test for non null filter + String query = "{\"bool\" : {\"filter\" : null } }"; + QueryBuilder filter = QueryBuilders.matchAllQuery(); + BoolQueryBuilder builder = (BoolQueryBuilder) parseQuery(query); + assertFalse(builder.filter(filter).filter().isEmpty()); + assertEquals(builder.filter(filter).filter().get(0), filter); + + // Test for null filter case + builder = (BoolQueryBuilder) parseQuery(query); + assertTrue(builder.filter(null).filter().isEmpty()); + } + /** * test that unknown query names in the clauses throw an error */ diff --git a/server/src/test/java/org/opensearch/index/query/ConstantScoreQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/ConstantScoreQueryBuilderTests.java index 527413d2513d0..cdc61a7f66e9c 100644 --- a/server/src/test/java/org/opensearch/index/query/ConstantScoreQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/ConstantScoreQueryBuilderTests.java @@ -143,4 +143,21 @@ public void testVisit() { assertEquals(2, visitorQueries.size()); } + + public void testFilter() { + // Test for non null filter + BoolQueryBuilder filterBuilder = new BoolQueryBuilder(); + ConstantScoreQueryBuilder constantScoreQueryBuilder = new ConstantScoreQueryBuilder(filterBuilder); + QueryBuilder filter = QueryBuilders.matchAllQuery(); + constantScoreQueryBuilder.filter(filter); + assertEquals(1, filterBuilder.filter().size()); + assertEquals(filter, filterBuilder.filter().get(0)); + + // Test for null filter + filterBuilder = new BoolQueryBuilder(); + constantScoreQueryBuilder = new ConstantScoreQueryBuilder(filterBuilder); + constantScoreQueryBuilder.filter(null); + assertEquals(0, filterBuilder.filter().size()); + + } } diff --git a/server/src/test/java/org/opensearch/index/query/SpanMultiTermQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/SpanMultiTermQueryBuilderTests.java index fe8ab7c0765e6..48cd5c0f2f918 100644 --- a/server/src/test/java/org/opensearch/index/query/SpanMultiTermQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/SpanMultiTermQueryBuilderTests.java @@ -182,6 +182,11 @@ public void writeTo(StreamOutput out) throws IOException { public String fieldName() { return "foo"; } + + @Override + public QueryBuilder filter(QueryBuilder filter) { + return this; + } } @Override diff --git a/test/framework/src/main/java/org/opensearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/opensearch/test/AbstractQueryTestCase.java index afd93e1b72fbb..bffde62b193da 100644 --- a/test/framework/src/main/java/org/opensearch/test/AbstractQueryTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/AbstractQueryTestCase.java @@ -63,7 +63,9 @@ import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.query.AbstractQueryBuilder; +import org.opensearch.index.query.BoolQueryBuilder; import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.query.QueryRewriteContext; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.query.Rewriteable; @@ -868,4 +870,21 @@ public void testCacheability() throws IOException { assertTrue("query should be cacheable: " + queryBuilder.toString(), context.isCacheable()); } + /** + * Check if a filter can be applied to the abstract query builder. + * @throws UnsupportedOperationException + */ + public void testFilter() throws IOException { + QB queryBuilder = createTestQueryBuilder(); + QueryBuilder filter = QueryBuilders.matchAllQuery(); + // Test for Null Filter case + QueryBuilder returnedQuerybuilder = queryBuilder.filter(null); + assertEquals(queryBuilder, returnedQuerybuilder); + + // Test for non null filter + returnedQuerybuilder = queryBuilder.filter(filter); + assertTrue(returnedQuerybuilder instanceof BoolQueryBuilder); + assertTrue(((BoolQueryBuilder) returnedQuerybuilder).filter().size() == 1); + assertEquals(filter, ((BoolQueryBuilder) returnedQuerybuilder).filter().get(0)); + } } From 21f69cae7667d3666a2d09fb6936e1b04b44b015 Mon Sep 17 00:00:00 2001 From: Xu Xiong Date: Mon, 3 Mar 2025 15:48:14 -0800 Subject: [PATCH 043/550] [Pull-based Ingestion] Add basic NodeStats metrics (#17444) Signed-off-by: xuxiong1 --- .../plugin/kafka/IngestFromKafkaIT.java | 6 + .../stats/TransportClusterStatsAction.java | 7 +- .../admin/indices/stats/ShardStats.java | 23 ++- .../stats/TransportIndicesStatsAction.java | 14 +- .../org/opensearch/index/engine/Engine.java | 8 + .../index/engine/IngestionEngine.java | 6 + .../opensearch/index/shard/IndexShard.java | 5 + .../opensearch/indices/IndicesService.java | 7 +- .../pollingingest/DefaultStreamPoller.java | 12 ++ .../MessageProcessorRunnable.java | 7 + .../pollingingest/PollingIngestStats.java | 175 ++++++++++++++++++ .../indices/pollingingest/StreamPoller.java | 2 + .../cluster/node/stats/NodeStatsTests.java | 2 + .../cluster/stats/ClusterStatsNodesTests.java | 1 + .../stats/ClusterStatsResponseTests.java | 1 + .../TransportRolloverActionTests.java | 2 +- .../shards/CatShardsResponseTests.java | 2 +- .../stats/IndicesStatsResponseTests.java | 2 +- .../opensearch/cluster/DiskUsageTests.java | 4 +- .../index/shard/IndexShardTests.java | 6 +- .../PollingIngestStatsTests.java | 58 ++++++ .../action/cat/RestShardsActionTests.java | 1 + 22 files changed, 340 insertions(+), 11 deletions(-) create mode 100644 server/src/main/java/org/opensearch/indices/pollingingest/PollingIngestStats.java create mode 100644 server/src/test/java/org/opensearch/indices/pollingingest/PollingIngestStatsTests.java diff --git a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/IngestFromKafkaIT.java b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/IngestFromKafkaIT.java index d51569431506a..6fe670d4d5b62 100644 --- a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/IngestFromKafkaIT.java +++ b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/IngestFromKafkaIT.java @@ -16,6 +16,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.index.query.RangeQueryBuilder; +import org.opensearch.indices.pollingingest.PollingIngestStats; import org.opensearch.plugins.PluginInfo; import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.Assert; @@ -75,6 +76,11 @@ public void testKafkaIngestion() { refresh("test"); SearchResponse response = client().prepareSearch("test").setQuery(query).get(); assertThat(response.getHits().getTotalHits().value(), is(1L)); + PollingIngestStats stats = client().admin().indices().prepareStats("test").get().getIndex("test").getShards()[0] + .getPollingIngestStats(); + assertNotNull(stats); + assertThat(stats.getMessageProcessorStats().getTotalProcessedCount(), is(2L)); + assertThat(stats.getConsumerStats().getTotalPolledCount(), is(2L)); }); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java index c6581b99eb559..6ea6fe5ea9715 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -56,6 +56,7 @@ import org.opensearch.index.seqno.SeqNoStats; import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.pollingingest.PollingIngestStats; import org.opensearch.node.NodeService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportRequest; @@ -210,15 +211,18 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq CommitStats commitStats; SeqNoStats seqNoStats; RetentionLeaseStats retentionLeaseStats; + PollingIngestStats pollingIngestStats; try { commitStats = indexShard.commitStats(); seqNoStats = indexShard.seqNoStats(); retentionLeaseStats = indexShard.getRetentionLeaseStats(); + pollingIngestStats = indexShard.pollingIngestStats(); } catch (final AlreadyClosedException e) { // shard is closed - no stats is fine commitStats = null; seqNoStats = null; retentionLeaseStats = null; + pollingIngestStats = null; } shardsStats.add( new ShardStats( @@ -227,7 +231,8 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq new CommonStats(indicesService.getIndicesQueryCache(), indexShard, commonStatsFlags), commitStats, seqNoStats, - retentionLeaseStats + retentionLeaseStats, + pollingIngestStats ) ); } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/ShardStats.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/ShardStats.java index 4ed1ce95b7de2..7c78a903217ab 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/ShardStats.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/ShardStats.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.indices.stats; +import org.opensearch.Version; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.Nullable; import org.opensearch.common.annotation.PublicApi; @@ -44,6 +45,7 @@ import org.opensearch.index.seqno.RetentionLeaseStats; import org.opensearch.index.seqno.SeqNoStats; import org.opensearch.index.shard.ShardPath; +import org.opensearch.indices.pollingingest.PollingIngestStats; import java.io.IOException; @@ -65,6 +67,9 @@ public class ShardStats implements Writeable, ToXContentFragment { @Nullable private RetentionLeaseStats retentionLeaseStats; + @Nullable + private PollingIngestStats pollingIngestStats; + /** * Gets the current retention lease stats. * @@ -87,6 +92,9 @@ public ShardStats(StreamInput in) throws IOException { isCustomDataPath = in.readBoolean(); seqNoStats = in.readOptionalWriteable(SeqNoStats::new); retentionLeaseStats = in.readOptionalWriteable(RetentionLeaseStats::new); + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + pollingIngestStats = in.readOptionalWriteable(PollingIngestStats::new); + } } public ShardStats( @@ -95,7 +103,8 @@ public ShardStats( final CommonStats commonStats, final CommitStats commitStats, final SeqNoStats seqNoStats, - final RetentionLeaseStats retentionLeaseStats + final RetentionLeaseStats retentionLeaseStats, + final PollingIngestStats pollingIngestStats ) { this.shardRouting = routing; this.dataPath = shardPath.getRootDataPath().toString(); @@ -105,6 +114,7 @@ public ShardStats( this.commonStats = commonStats; this.seqNoStats = seqNoStats; this.retentionLeaseStats = retentionLeaseStats; + this.pollingIngestStats = pollingIngestStats; } /** @@ -128,6 +138,11 @@ public SeqNoStats getSeqNoStats() { return this.seqNoStats; } + @Nullable + public PollingIngestStats getPollingIngestStats() { + return this.pollingIngestStats; + } + public String getDataPath() { return dataPath; } @@ -150,6 +165,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(isCustomDataPath); out.writeOptionalWriteable(seqNoStats); out.writeOptionalWriteable(retentionLeaseStats); + if (out.getVersion().onOrAfter((Version.V_3_0_0))) { + out.writeOptionalWriteable(pollingIngestStats); + } } @Override @@ -171,6 +189,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (retentionLeaseStats != null) { retentionLeaseStats.toXContent(builder, params); } + if (pollingIngestStats != null) { + pollingIngestStats.toXContent(builder, params); + } builder.startObject(Fields.SHARD_PATH); builder.field(Fields.STATE_PATH, statePath); builder.field(Fields.DATA_PATH, dataPath); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/TransportIndicesStatsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/TransportIndicesStatsAction.java index 2b85b6d5d6b5b..baa1dfa2431e6 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/TransportIndicesStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/TransportIndicesStatsAction.java @@ -52,6 +52,7 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ShardNotFoundException; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.pollingingest.PollingIngestStats; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -141,16 +142,27 @@ protected ShardStats shardOperation(IndicesStatsRequest request, ShardRouting sh CommitStats commitStats; SeqNoStats seqNoStats; RetentionLeaseStats retentionLeaseStats; + PollingIngestStats pollingIngestStats; try { commitStats = indexShard.commitStats(); seqNoStats = indexShard.seqNoStats(); retentionLeaseStats = indexShard.getRetentionLeaseStats(); + pollingIngestStats = indexShard.pollingIngestStats(); } catch (final AlreadyClosedException e) { // shard is closed - no stats is fine commitStats = null; seqNoStats = null; retentionLeaseStats = null; + pollingIngestStats = null; } - return new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), commonStats, commitStats, seqNoStats, retentionLeaseStats); + return new ShardStats( + indexShard.routingEntry(), + indexShard.shardPath(), + commonStats, + commitStats, + seqNoStats, + retentionLeaseStats, + pollingIngestStats + ); } } diff --git a/server/src/main/java/org/opensearch/index/engine/Engine.java b/server/src/main/java/org/opensearch/index/engine/Engine.java index db08ea1164f68..92858ffc26902 100644 --- a/server/src/main/java/org/opensearch/index/engine/Engine.java +++ b/server/src/main/java/org/opensearch/index/engine/Engine.java @@ -93,6 +93,7 @@ import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.TranslogDeletionPolicy; import org.opensearch.index.translog.TranslogManager; +import org.opensearch.indices.pollingingest.PollingIngestStats; import org.opensearch.search.suggest.completion.CompletionStats; import java.io.Closeable; @@ -946,6 +947,13 @@ public SegmentsStats segmentsStats(boolean includeSegmentFileSizes, boolean incl return stats; } + /** + * @return Stats for pull-based ingestion. + */ + public PollingIngestStats pollingIngestStats() { + return null; + } + protected TranslogDeletionPolicy getTranslogDeletionPolicy(EngineConfig engineConfig) { TranslogDeletionPolicy customTranslogDeletionPolicy = null; if (engineConfig.getCustomTranslogDeletionPolicyFactory() != null) { diff --git a/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java b/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java index 72b59ba88b4c2..00feab082c178 100644 --- a/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java @@ -29,6 +29,7 @@ import org.opensearch.index.translog.TranslogStats; import org.opensearch.index.translog.listener.CompositeTranslogEventListener; import org.opensearch.indices.pollingingest.DefaultStreamPoller; +import org.opensearch.indices.pollingingest.PollingIngestStats; import org.opensearch.indices.pollingingest.StreamPoller; import java.io.IOException; @@ -288,4 +289,9 @@ protected TranslogManager createTranslogManager( protected Map commitDataAsMap() { return commitDataAsMap(indexWriter); } + + @Override + public PollingIngestStats pollingIngestStats() { + return streamPoller.getStats(); + } } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index f8ad3fc8cf866..bd47a664b729d 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -184,6 +184,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.cluster.IndicesClusterStateService; +import org.opensearch.indices.pollingingest.PollingIngestStats; import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoveryFailedException; import org.opensearch.indices.recovery.RecoveryListener; @@ -1533,6 +1534,10 @@ public CompletionStats completionStats(String... fields) { return getEngine().completionStats(fields); } + public PollingIngestStats pollingIngestStats() { + return getEngine().pollingIngestStats(); + } + /** * Executes the given flush request against the engine. * diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 527c2c23ba6b1..f3b0121dd5c88 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -149,6 +149,7 @@ import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.indices.mapper.MapperRegistry; import org.opensearch.indices.pollingingest.IngestionEngineFactory; +import org.opensearch.indices.pollingingest.PollingIngestStats; import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoveryListener; import org.opensearch.indices.recovery.RecoverySettings; @@ -758,15 +759,18 @@ IndexShardStats indexShardStats(final IndicesService indicesService, final Index CommitStats commitStats; SeqNoStats seqNoStats; RetentionLeaseStats retentionLeaseStats; + PollingIngestStats pollingIngestStats; try { commitStats = indexShard.commitStats(); seqNoStats = indexShard.seqNoStats(); retentionLeaseStats = indexShard.getRetentionLeaseStats(); + pollingIngestStats = indexShard.pollingIngestStats(); } catch (AlreadyClosedException e) { // shard is closed - no stats is fine commitStats = null; seqNoStats = null; retentionLeaseStats = null; + pollingIngestStats = null; } return new IndexShardStats( @@ -778,7 +782,8 @@ IndexShardStats indexShardStats(final IndicesService indicesService, final Index new CommonStats(indicesService.getIndicesQueryCache(), indexShard, flags), commitStats, seqNoStats, - retentionLeaseStats + retentionLeaseStats, + pollingIngestStats ) } ); } diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java b/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java index 884cffec4aad5..3dfd77f75c82d 100644 --- a/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java +++ b/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.common.Nullable; +import org.opensearch.common.metrics.CounterMetric; import org.opensearch.index.IngestionShardConsumer; import org.opensearch.index.IngestionShardPointer; import org.opensearch.index.Message; @@ -60,6 +61,8 @@ public class DefaultStreamPoller implements StreamPoller { private MessageProcessorRunnable processorRunnable; + private final CounterMetric totalPolledCount = new CounterMetric(); + // A pointer to the max persisted pointer for optimizing the check @Nullable private IngestionShardPointer maxPersistedPointer; @@ -204,6 +207,7 @@ protected void startPoll() { logger.info("Skipping message with pointer {} as it is already processed", result.getPointer().asString()); continue; } + totalPolledCount.inc(); blockingQueue.put(result); logger.debug( "Put message {} with pointer {} to the blocking queue", @@ -297,6 +301,14 @@ public IngestionShardPointer getBatchStartPointer() { return batchStartPointer; } + @Override + public PollingIngestStats getStats() { + PollingIngestStats.Builder builder = new PollingIngestStats.Builder(); + builder.setTotalPolledCount(totalPolledCount.count()); + builder.setTotalProcessedCount(processorRunnable.getStats().count()); + return builder.build(); + } + public State getState() { return state; } diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/MessageProcessorRunnable.java b/server/src/main/java/org/opensearch/indices/pollingingest/MessageProcessorRunnable.java index 53f9353477869..0c06ebc558466 100644 --- a/server/src/main/java/org/opensearch/indices/pollingingest/MessageProcessorRunnable.java +++ b/server/src/main/java/org/opensearch/indices/pollingingest/MessageProcessorRunnable.java @@ -14,6 +14,7 @@ import org.apache.lucene.index.Term; import org.opensearch.action.DocWriteRequest; import org.opensearch.common.lucene.uid.Versions; +import org.opensearch.common.metrics.CounterMetric; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.common.bytes.BytesArray; @@ -48,6 +49,7 @@ public class MessageProcessorRunnable implements Runnable { private final BlockingQueue> blockingQueue; private final MessageProcessor messageProcessor; + private final CounterMetric stats = new CounterMetric(); private static final String ID = "_id"; private static final String OP_TYPE = "_op_type"; @@ -229,8 +231,13 @@ public void run() { Thread.currentThread().interrupt(); // Restore interrupt status } if (result != null) { + stats.inc(); messageProcessor.process(result.getMessage(), result.getPointer()); } } } + + public CounterMetric getStats() { + return stats; + } } diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/PollingIngestStats.java b/server/src/main/java/org/opensearch/indices/pollingingest/PollingIngestStats.java new file mode 100644 index 0000000000000..cda706b29083a --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/pollingingest/PollingIngestStats.java @@ -0,0 +1,175 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.pollingingest; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * Stats for pull-based ingestion + */ +@ExperimentalApi +public class PollingIngestStats implements Writeable, ToXContentFragment { + private final MessageProcessorStats messageProcessorStats; + private final ConsumerStats consumerStats; + // TODO: add error stats from error handling sink + + public PollingIngestStats(MessageProcessorStats messageProcessorStats, ConsumerStats consumerStats) { + this.messageProcessorStats = messageProcessorStats; + this.consumerStats = consumerStats; + } + + public PollingIngestStats(StreamInput in) throws IOException { + long totalProcessedCount = in.readLong(); + this.messageProcessorStats = new MessageProcessorStats(totalProcessedCount); + long totalPolledCount = in.readLong(); + this.consumerStats = new ConsumerStats(totalPolledCount); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(messageProcessorStats.getTotalProcessedCount()); + out.writeLong(consumerStats.getTotalPolledCount()); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("polling_ingest_stats"); + builder.startObject("message_processor_stats"); + builder.field("total_processed_count", messageProcessorStats.getTotalProcessedCount()); + builder.endObject(); + builder.startObject("consumer_stats"); + builder.field("total_polled_count", consumerStats.getTotalPolledCount()); + builder.endObject(); + builder.endObject(); + return builder; + } + + public MessageProcessorStats getMessageProcessorStats() { + return messageProcessorStats; + } + + public ConsumerStats getConsumerStats() { + return consumerStats; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof PollingIngestStats)) return false; + PollingIngestStats that = (PollingIngestStats) o; + return Objects.equals(messageProcessorStats, that.messageProcessorStats) && Objects.equals(consumerStats, that.consumerStats); + } + + @Override + public int hashCode() { + return Objects.hash(messageProcessorStats, consumerStats); + } + + /** + * Stats for message processor + */ + @ExperimentalApi + public static class MessageProcessorStats { + private final long totalProcessedCount; + + public MessageProcessorStats(long totalProcessedCount) { + this.totalProcessedCount = totalProcessedCount; + } + + public long getTotalProcessedCount() { + return totalProcessedCount; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof MessageProcessorStats)) return false; + MessageProcessorStats that = (MessageProcessorStats) o; + return totalProcessedCount == that.totalProcessedCount; + } + + @Override + public int hashCode() { + return Objects.hash(totalProcessedCount); + } + } + + /** + * Stats for consumer (poller) + */ + @ExperimentalApi + public static class ConsumerStats { + private final long totalPolledCount; + + public ConsumerStats(long totalPolledCount) { + this.totalPolledCount = totalPolledCount; + } + + public long getTotalPolledCount() { + return totalPolledCount; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof ConsumerStats)) return false; + ConsumerStats that = (ConsumerStats) o; + return totalPolledCount == that.totalPolledCount; + } + + @Override + public int hashCode() { + return Objects.hash(totalPolledCount); + } + } + + /** + * Builder for {@link PollingIngestStats} + */ + @ExperimentalApi + public static class Builder { + private long totalProcessedCount; + private long totalPolledCount; + + public Builder() {} + + public Builder setTotalProcessedCount(long totalProcessedCount) { + this.totalProcessedCount = totalProcessedCount; + return this; + } + + public Builder setTotalPolledCount(long totalPolledCount) { + this.totalPolledCount = totalPolledCount; + return this; + } + + public PollingIngestStats build() { + MessageProcessorStats messageProcessorStats = new MessageProcessorStats(totalProcessedCount); + ConsumerStats consumerStats = new ConsumerStats(totalPolledCount); + return new PollingIngestStats(messageProcessorStats, consumerStats); + } + } + + /** + * Returns a new builder for creating a {@link PollingIngestStats} instance. + * + * @return a new {@code Builder} instance + */ + public static Builder builder() { + return new Builder(); + } +} diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/StreamPoller.java b/server/src/main/java/org/opensearch/indices/pollingingest/StreamPoller.java index 5010982991ecc..15e1745433df2 100644 --- a/server/src/main/java/org/opensearch/indices/pollingingest/StreamPoller.java +++ b/server/src/main/java/org/opensearch/indices/pollingingest/StreamPoller.java @@ -50,6 +50,8 @@ public interface StreamPoller extends Closeable { */ IngestionShardPointer getBatchStartPointer(); + PollingIngestStats getStats(); + /** * a state to indicate the current state of the poller */ diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java index 34065daff2b8a..cccca0448a2cc 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -1413,6 +1413,7 @@ private HashMap> createRandomShardByStats(List shardStatsList = new ArrayList<>(); @@ -1464,6 +1465,7 @@ public MockNodeIndicesStats generateMockNodeIndicesStats( commonStats, null, null, + null, null ); IndexShardStats indexShardStats = new IndexShardStats(shardRouting.shardId(), new ShardStats[] { shardStats }); diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodesTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodesTests.java index 823661ba14abf..58d789b704a38 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodesTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodesTests.java @@ -402,6 +402,7 @@ private ShardStats[] createshardStats(DiscoveryNode localNode, Index index, Comm commonStats, null, null, + null, null ); shardStatsList.add(shardStats); diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponseTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponseTests.java index ad7706292d93c..193c9cc471f7b 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponseTests.java @@ -270,6 +270,7 @@ private ShardStats[] createShardStats(DiscoveryNode localNode, Index index, Comm commonStats, null, null, + null, null ); shardStatsList.add(shardStats); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/rollover/TransportRolloverActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/rollover/TransportRolloverActionTests.java index 724c919f65375..6cef1049b3b50 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/rollover/TransportRolloverActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/rollover/TransportRolloverActionTests.java @@ -422,7 +422,7 @@ public static IndicesStatsResponse randomIndicesStatsResponse(final IndexMetadat stats.get = new GetStats(); stats.flush = new FlushStats(); stats.warmer = new WarmerStats(); - shardStats.add(new ShardStats(shardRouting, new ShardPath(false, path, path, shardId), stats, null, null, null)); + shardStats.add(new ShardStats(shardRouting, new ShardPath(false, path, path, shardId), stats, null, null, null, null)); } } return IndicesStatsTests.newIndicesStatsResponse( diff --git a/server/src/test/java/org/opensearch/action/admin/indices/shards/CatShardsResponseTests.java b/server/src/test/java/org/opensearch/action/admin/indices/shards/CatShardsResponseTests.java index 11b1d5567d9fb..00d4a311dca1d 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/shards/CatShardsResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/shards/CatShardsResponseTests.java @@ -152,7 +152,7 @@ private IndicesStatsResponse getIndicesStatsResponse() { Path path = createTempDir().resolve("indices").resolve(index.getUUID()).resolve(String.valueOf(shardId)); ShardPath shardPath = new ShardPath(false, path, path, shId); ShardRouting routing = createShardRouting(shId, (shardId == 0)); - shards.add(new ShardStats(routing, shardPath, new CommonStats(), null, null, null)); + shards.add(new ShardStats(routing, shardPath, new CommonStats(), null, null, null, null)); } } return new IndicesStatsResponse(shards.toArray(new ShardStats[0]), 0, 0, 0, null); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/stats/IndicesStatsResponseTests.java b/server/src/test/java/org/opensearch/action/admin/indices/stats/IndicesStatsResponseTests.java index 2b79e523fc620..421646f0812fe 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/stats/IndicesStatsResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/stats/IndicesStatsResponseTests.java @@ -88,7 +88,7 @@ public void testGetIndices() { Path path = createTempDir().resolve("indices").resolve(index.getUUID()).resolve(String.valueOf(shardId)); ShardPath shardPath = new ShardPath(false, path, path, shId); ShardRouting routing = createShardRouting(index, shId, (shardId == 0)); - shards.add(new ShardStats(routing, shardPath, null, null, null, null)); + shards.add(new ShardStats(routing, shardPath, null, null, null, null, null)); AtomicLong primaryShardsCounter = expectedIndexToPrimaryShardsCount.computeIfAbsent( index.getName(), k -> new AtomicLong(0L) diff --git a/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java b/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java index cd050fb346563..d790d95757b02 100644 --- a/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java +++ b/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java @@ -135,8 +135,8 @@ public void testFillShardLevelInfo() { CommonStats commonStats1 = new CommonStats(); commonStats1.store = new StoreStats(1000, 0L); ShardStats[] stats = new ShardStats[] { - new ShardStats(test_0, new ShardPath(false, test0Path, test0Path, test_0.shardId()), commonStats0, null, null, null), - new ShardStats(test_1, new ShardPath(false, test1Path, test1Path, test_1.shardId()), commonStats1, null, null, null) }; + new ShardStats(test_0, new ShardPath(false, test0Path, test0Path, test_0.shardId()), commonStats0, null, null, null, null), + new ShardStats(test_1, new ShardPath(false, test1Path, test1Path, test_1.shardId()), commonStats1, null, null, null, null) }; final Map shardSizes = new HashMap<>(); final Map routingToPath = new HashMap<>(); InternalClusterInfoService.buildShardLevelInfo(logger, stats, shardSizes, routingToPath, new HashMap<>()); diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index 7614a54da52bf..9fc779891b810 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -1696,7 +1696,8 @@ public void testShardStats() throws IOException { new CommonStats(new IndicesQueryCache(Settings.EMPTY), shard, new CommonStatsFlags()), shard.commitStats(), shard.seqNoStats(), - shard.getRetentionLeaseStats() + shard.getRetentionLeaseStats(), + shard.pollingIngestStats() ); assertEquals(shard.shardPath().getRootDataPath().toString(), stats.getDataPath()); assertEquals(shard.shardPath().getRootStatePath().toString(), stats.getStatePath()); @@ -1838,7 +1839,8 @@ public void testShardStatsWithRemoteStoreEnabled() throws IOException { new CommonStats(new IndicesQueryCache(Settings.EMPTY), shard, new CommonStatsFlags()), shard.commitStats(), shard.seqNoStats(), - shard.getRetentionLeaseStats() + shard.getRetentionLeaseStats(), + shard.pollingIngestStats() ); RemoteSegmentStats remoteSegmentStats = shardStats.getStats().getSegments().getRemoteSegmentStats(); assertRemoteSegmentStats(remoteSegmentTransferTracker, remoteSegmentStats); diff --git a/server/src/test/java/org/opensearch/indices/pollingingest/PollingIngestStatsTests.java b/server/src/test/java/org/opensearch/indices/pollingingest/PollingIngestStatsTests.java new file mode 100644 index 0000000000000..d64f350239013 --- /dev/null +++ b/server/src/test/java/org/opensearch/indices/pollingingest/PollingIngestStatsTests.java @@ -0,0 +1,58 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.pollingingest; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class PollingIngestStatsTests extends OpenSearchTestCase { + + public void testToXContent() throws IOException { + PollingIngestStats stats = createTestInstance(); + + XContentBuilder builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); + builder.startObject(); + stats.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + + String expected = "{\"polling_ingest_stats\":{\"message_processor_stats\":{\"total_processed_count\":" + + stats.getMessageProcessorStats().getTotalProcessedCount() + + "},\"consumer_stats\":{\"total_polled_count\":" + + stats.getConsumerStats().getTotalPolledCount() + + "}}}"; + + assertEquals(expected, builder.toString()); + } + + public void testSerialization() throws IOException { + PollingIngestStats original = createTestInstance(); + + try (BytesStreamOutput output = new BytesStreamOutput()) { + original.writeTo(output); + + try (StreamInput input = output.bytes().streamInput()) { + PollingIngestStats deserialized = new PollingIngestStats(input); + assertEquals(original, deserialized); + } + } + } + + private PollingIngestStats createTestInstance() { + return PollingIngestStats.builder() + .setTotalProcessedCount(randomNonNegativeLong()) + .setTotalPolledCount(randomNonNegativeLong()) + .build(); + } +} diff --git a/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java b/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java index c412167a10c75..53a5cec1332fb 100644 --- a/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java @@ -93,6 +93,7 @@ public void setup() { commonStats, null, null, + null, null ); shardStatsMap.put(shardRouting, shardStats); From 218f353e821f5f641443bc1ffa1dc8ea73818709 Mon Sep 17 00:00:00 2001 From: Sandesh Kumar Date: Tue, 4 Mar 2025 08:28:13 +0530 Subject: [PATCH 044/550] [Star Tree] [Search] Keyword & Numeric Terms Aggregation (#17165) --------- Signed-off-by: Sandesh Kumar --- CHANGELOG-3.0.md | 1 + .../bucket/BucketsAggregator.java | 6 + .../histogram/DateHistogramAggregator.java | 50 +-- .../GlobalOrdinalsStringTermsAggregator.java | 81 ++++- .../bucket/terms/NumericTermsAggregator.java | 82 ++++- .../bucket/terms/TermsAggregator.java | 7 +- .../search/startree/StarTreeQueryContext.java | 33 ++ .../search/startree/StarTreeQueryHelper.java | 33 ++ .../search/SearchServiceStarTreeTests.java | 150 ++++++++ .../startree/KeywordTermsAggregatorTests.java | 245 +++++++++++++ .../startree/NumericTermsAggregatorTests.java | 342 ++++++++++++++++++ .../aggregations/AggregatorTestCase.java | 3 + 12 files changed, 986 insertions(+), 47 deletions(-) create mode 100644 server/src/test/java/org/opensearch/search/aggregations/startree/KeywordTermsAggregatorTests.java create mode 100644 server/src/test/java/org/opensearch/search/aggregations/startree/NumericTermsAggregatorTests.java diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index 1a0f9280136c4..7211368c65ffb 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -22,6 +22,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Arrow Flight RPC plugin with Flight server bootstrap logic and client for internode communication ([#16962](https://github.com/opensearch-project/OpenSearch/pull/16962)) - Added offset management for the pull-based Ingestion ([#17354](https://github.com/opensearch-project/OpenSearch/pull/17354)) - Add filter function for AbstractQueryBuilder, BoolQueryBuilder, ConstantScoreQueryBuilder([#17409](https://github.com/opensearch-project/OpenSearch/pull/17409)) +- [Star Tree] [Search] Resolving keyword & numeric bucket aggregation with metric aggregation using star-tree ([#17165](https://github.com/opensearch-project/OpenSearch/pull/17165)) ### Dependencies - Update Apache Lucene to 10.1.0 ([#16366](https://github.com/opensearch-project/OpenSearch/pull/16366)) diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/BucketsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/BucketsAggregator.java index f075d67b0f48d..a65728b2d658a 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/BucketsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/BucketsAggregator.java @@ -136,6 +136,12 @@ public final void collectExistingBucket(LeafBucketCollector subCollector, int do */ public final void collectStarTreeBucket(StarTreeBucketCollector collector, long docCount, long bucketOrd, int entryBit) throws IOException { + if (bucketOrd < 0) { + bucketOrd = -1 - bucketOrd; + } else { + grow(bucketOrd + 1); + } + if (docCounts.increment(bucketOrd, docCount) == docCount) { multiBucketConsumer.accept(0); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java index 2294ba6f9a2b5..d825b33a0f150 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java @@ -33,18 +33,14 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; -import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.util.CollectionUtil; -import org.apache.lucene.util.FixedBitSet; import org.opensearch.common.Nullable; import org.opensearch.common.Rounding; import org.opensearch.common.lease.Releasables; import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; import org.opensearch.index.compositeindex.datacube.DateDimension; -import org.opensearch.index.compositeindex.datacube.MetricStat; import org.opensearch.index.compositeindex.datacube.startree.index.StarTreeValues; -import org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeUtils; import org.opensearch.index.compositeindex.datacube.startree.utils.date.DateTimeUnitAdapter; import org.opensearch.index.compositeindex.datacube.startree.utils.date.DateTimeUnitRounding; import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedNumericStarTreeValuesIterator; @@ -192,9 +188,9 @@ public ScoreMode scoreMode() { protected boolean tryPrecomputeAggregationForLeaf(LeafReaderContext ctx) throws IOException { CompositeIndexFieldInfo supportedStarTree = getSupportedStarTree(this.context.getQueryShardContext()); if (supportedStarTree != null) { - if (preComputeWithStarTree(ctx, supportedStarTree) == true) { - return true; - } + StarTreeBucketCollector starTreeBucketCollector = getStarTreeBucketCollector(ctx, supportedStarTree, null); + StarTreeQueryHelper.preComputeBucketsWithStarTree(starTreeBucketCollector); + return true; } return filterRewriteOptimizationContext.tryOptimize(ctx, this::incrementBucketDocCount, segmentMatchAll(context, ctx)); } @@ -268,6 +264,10 @@ public StarTreeBucketCollector getStarTreeBucketCollector( ) throws IOException { assert parentCollector == null; StarTreeValues starTreeValues = StarTreeQueryHelper.getStarTreeValues(ctx, starTree); + SortedNumericStarTreeValuesIterator valuesIterator = (SortedNumericStarTreeValuesIterator) starTreeValues + .getDimensionValuesIterator(starTreeDateDimension); + SortedNumericStarTreeValuesIterator docCountsIterator = StarTreeQueryHelper.getDocCountsIterator(starTreeValues, starTree); + return new StarTreeBucketCollector( starTreeValues, StarTreeTraversalUtil.getStarTreeResult( @@ -287,17 +287,6 @@ public void setSubCollectors() throws IOException { } } - SortedNumericStarTreeValuesIterator valuesIterator = (SortedNumericStarTreeValuesIterator) starTreeValues - .getDimensionValuesIterator(starTreeDateDimension); - - String metricName = StarTreeUtils.fullyQualifiedFieldNameForStarTreeMetricsDocValues( - starTree.getField(), - "_doc_count", - MetricStat.DOC_COUNT.getTypeName() - ); - SortedNumericStarTreeValuesIterator docCountsIterator = (SortedNumericStarTreeValuesIterator) starTreeValues - .getMetricValuesIterator(metricName); - @Override public void collectStarTreeEntry(int starTreeEntry, long owningBucketOrd) throws IOException { if (!valuesIterator.advanceExact(starTreeEntry)) { @@ -311,15 +300,8 @@ public void collectStarTreeEntry(int starTreeEntry, long owningBucketOrd) throws if (docCountsIterator.advanceExact(starTreeEntry)) { long metricValue = docCountsIterator.nextValue(); - long bucketOrd = bucketOrds.add(owningBucketOrd, dimensionValue); - if (bucketOrd < 0) { - bucketOrd = -1 - bucketOrd; - collectStarTreeBucket(this, metricValue, bucketOrd, starTreeEntry); - } else { - grow(bucketOrd + 1); - collectStarTreeBucket(this, metricValue, bucketOrd, starTreeEntry); - } + collectStarTreeBucket(this, metricValue, bucketOrd, starTreeEntry); } } } @@ -393,20 +375,4 @@ public double bucketSize(long bucket, Rounding.DateTimeUnit unitSize) { return 1.0; } } - - private boolean preComputeWithStarTree(LeafReaderContext ctx, CompositeIndexFieldInfo starTree) throws IOException { - StarTreeBucketCollector starTreeBucketCollector = getStarTreeBucketCollector(ctx, starTree, null); - FixedBitSet matchingDocsBitSet = starTreeBucketCollector.getMatchingDocsBitSet(); - - int numBits = matchingDocsBitSet.length(); - - if (numBits > 0) { - for (int bit = matchingDocsBitSet.nextSetBit(0); bit != DocIdSetIterator.NO_MORE_DOCS; bit = (bit + 1 < numBits) - ? matchingDocsBitSet.nextSetBit(bit + 1) - : DocIdSetIterator.NO_MORE_DOCS) { - starTreeBucketCollector.collectStarTreeEntry(bit, 0); - } - } - return true; - } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java index ef925b7f6416a..d8ec9feaf44b4 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java @@ -51,6 +51,10 @@ import org.opensearch.common.util.LongHash; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; +import org.opensearch.index.compositeindex.datacube.startree.index.StarTreeValues; +import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedNumericStarTreeValuesIterator; +import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedSetStarTreeValuesIterator; import org.opensearch.index.mapper.DocCountFieldMapper; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.AggregationExecutionException; @@ -63,14 +67,20 @@ import org.opensearch.search.aggregations.InternalOrder; import org.opensearch.search.aggregations.LeafBucketCollector; import org.opensearch.search.aggregations.LeafBucketCollectorBase; +import org.opensearch.search.aggregations.StarTreeBucketCollector; +import org.opensearch.search.aggregations.StarTreePreComputeCollector; import org.opensearch.search.aggregations.bucket.LocalBucketCountThresholds; import org.opensearch.search.aggregations.bucket.terms.SignificanceLookup.BackgroundFrequencyForBytes; import org.opensearch.search.aggregations.bucket.terms.heuristic.SignificanceHeuristic; import org.opensearch.search.aggregations.support.ValuesSource; import org.opensearch.search.internal.SearchContext; +import org.opensearch.search.startree.StarTreeQueryHelper; +import org.opensearch.search.startree.StarTreeTraversalUtil; +import org.opensearch.search.startree.filter.DimensionFilter; import java.io.IOException; import java.util.Arrays; +import java.util.List; import java.util.Map; import java.util.function.BiConsumer; import java.util.function.Function; @@ -85,18 +95,19 @@ * * @opensearch.internal */ -public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggregator { +public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggregator implements StarTreePreComputeCollector { protected final ResultStrategy resultStrategy; protected final ValuesSource.Bytes.WithOrdinals valuesSource; private final LongPredicate acceptedGlobalOrdinals; private final long valueCount; - private final String fieldName; + protected final String fieldName; private Weight weight; protected final CollectionStrategy collectionStrategy; private final SetOnce dvs = new SetOnce<>(); protected int segmentsWithSingleValuedOrds = 0; protected int segmentsWithMultiValuedOrds = 0; + LongUnaryOperator globalOperator; /** * Lookup global ordinals @@ -219,6 +230,9 @@ boolean tryCollectFromTermFrequencies(LeafReaderContext ctx, SortedSetDocValues @Override protected boolean tryPrecomputeAggregationForLeaf(LeafReaderContext ctx) throws IOException { SortedSetDocValues globalOrds = valuesSource.globalOrdinalsValues(ctx); + if (tryStarTreePrecompute(ctx) == true) { + return true; + } if (collectionStrategy instanceof DenseGlobalOrds && this.resultStrategy instanceof StandardTermsResults && subAggregators.length == 0) { @@ -231,6 +245,17 @@ protected boolean tryPrecomputeAggregationForLeaf(LeafReaderContext ctx) throws return false; } + protected boolean tryStarTreePrecompute(LeafReaderContext ctx) throws IOException { + CompositeIndexFieldInfo supportedStarTree = StarTreeQueryHelper.getSupportedStarTree(this.context.getQueryShardContext()); + if (supportedStarTree != null) { + globalOperator = valuesSource.globalOrdinalsMapping(ctx); + StarTreeBucketCollector starTreeBucketCollector = getStarTreeBucketCollector(ctx, supportedStarTree, null); + StarTreeQueryHelper.preComputeBucketsWithStarTree(starTreeBucketCollector); + return true; + } + return false; + } + @Override public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException { SortedSetDocValues globalOrds = valuesSource.globalOrdinalsValues(ctx); @@ -307,6 +332,56 @@ public void collect(int doc, long owningBucketOrd) throws IOException { }); } + public StarTreeBucketCollector getStarTreeBucketCollector( + LeafReaderContext ctx, + CompositeIndexFieldInfo starTree, + StarTreeBucketCollector parent + ) throws IOException { + assert parent == null; + StarTreeValues starTreeValues = StarTreeQueryHelper.getStarTreeValues(ctx, starTree); + SortedSetStarTreeValuesIterator valuesIterator = (SortedSetStarTreeValuesIterator) starTreeValues.getDimensionValuesIterator( + fieldName + ); + SortedNumericStarTreeValuesIterator docCountsIterator = StarTreeQueryHelper.getDocCountsIterator(starTreeValues, starTree); + + return new StarTreeBucketCollector( + starTreeValues, + StarTreeTraversalUtil.getStarTreeResult( + starTreeValues, + StarTreeQueryHelper.mergeDimensionFilterIfNotExists( + context.getQueryShardContext().getStarTreeQueryContext().getBaseQueryStarTreeFilter(), + fieldName, + List.of(DimensionFilter.MATCH_ALL_DEFAULT) + ), + context + ) + ) { + @Override + public void setSubCollectors() throws IOException { + for (Aggregator aggregator : subAggregators) { + this.subCollectors.add(((StarTreePreComputeCollector) aggregator).getStarTreeBucketCollector(ctx, starTree, this)); + } + } + + @Override + public void collectStarTreeEntry(int starTreeEntry, long owningBucketOrd) throws IOException { + if (valuesIterator.advanceExact(starTreeEntry) == false) { + return; + } + for (int i = 0, count = valuesIterator.docValueCount(); i < count; i++) { + long dimensionValue = valuesIterator.value(); + long ord = globalOperator.applyAsLong(dimensionValue); + + if (docCountsIterator.advanceExact(starTreeEntry)) { + long metricValue = docCountsIterator.nextValue(); + long bucketOrd = collectionStrategy.globalOrdToBucketOrd(0, ord); + collectStarTreeBucket(this, metricValue, bucketOrd, starTreeEntry); + } + } + } + }; + } + @Override public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { return resultStrategy.buildAggregations(owningBucketOrds); @@ -444,7 +519,7 @@ protected boolean tryPrecomputeAggregationForLeaf(LeafReaderContext ctx) throws (ord, docCount) -> incrementBucketDocCount(mapping.applyAsLong(ord), docCount) ); } - return false; + return tryStarTreePrecompute(ctx); } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/NumericTermsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/NumericTermsAggregator.java index 1d78a59a563f0..bcdea9fb4af3c 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/NumericTermsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/NumericTermsAggregator.java @@ -41,7 +41,11 @@ import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.LongArray; +import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; +import org.opensearch.index.compositeindex.datacube.startree.index.StarTreeValues; +import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedNumericStarTreeValuesIterator; import org.opensearch.index.fielddata.FieldData; +import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorFactories; @@ -52,6 +56,8 @@ import org.opensearch.search.aggregations.InternalOrder; import org.opensearch.search.aggregations.LeafBucketCollector; import org.opensearch.search.aggregations.LeafBucketCollectorBase; +import org.opensearch.search.aggregations.StarTreeBucketCollector; +import org.opensearch.search.aggregations.StarTreePreComputeCollector; import org.opensearch.search.aggregations.bucket.LocalBucketCountThresholds; import org.opensearch.search.aggregations.bucket.terms.IncludeExclude.LongFilter; import org.opensearch.search.aggregations.bucket.terms.LongKeyedBucketOrds.BucketOrdsEnum; @@ -60,6 +66,9 @@ import org.opensearch.search.aggregations.support.ValuesSource; import org.opensearch.search.internal.ContextIndexSearcher; import org.opensearch.search.internal.SearchContext; +import org.opensearch.search.startree.StarTreeQueryHelper; +import org.opensearch.search.startree.StarTreeTraversalUtil; +import org.opensearch.search.startree.filter.DimensionFilter; import java.io.IOException; import java.math.BigInteger; @@ -79,11 +88,12 @@ * * @opensearch.internal */ -public class NumericTermsAggregator extends TermsAggregator { +public class NumericTermsAggregator extends TermsAggregator implements StarTreePreComputeCollector { private final ResultStrategy resultStrategy; private final ValuesSource.Numeric valuesSource; private final LongKeyedBucketOrds bucketOrds; private final LongFilter longFilter; + private final String fieldName; public NumericTermsAggregator( String name, @@ -105,6 +115,9 @@ public NumericTermsAggregator( this.valuesSource = valuesSource; this.longFilter = longFilter; bucketOrds = LongKeyedBucketOrds.build(context.bigArrays(), cardinality); + this.fieldName = (this.valuesSource instanceof ValuesSource.Numeric.FieldData) + ? ((ValuesSource.Numeric.FieldData) valuesSource).getIndexFieldName() + : null; } @Override @@ -146,6 +159,73 @@ public void collect(int doc, long owningBucketOrd) throws IOException { }); } + protected boolean tryPrecomputeAggregationForLeaf(LeafReaderContext ctx) throws IOException { + CompositeIndexFieldInfo supportedStarTree = StarTreeQueryHelper.getSupportedStarTree(this.context.getQueryShardContext()); + if (supportedStarTree != null) { + StarTreeBucketCollector starTreeBucketCollector = getStarTreeBucketCollector(ctx, supportedStarTree, null); + StarTreeQueryHelper.preComputeBucketsWithStarTree(starTreeBucketCollector); + return true; + } + return false; + } + + public StarTreeBucketCollector getStarTreeBucketCollector( + LeafReaderContext ctx, + CompositeIndexFieldInfo starTree, + StarTreeBucketCollector parent + ) throws IOException { + assert parent == null; + StarTreeValues starTreeValues = StarTreeQueryHelper.getStarTreeValues(ctx, starTree); + SortedNumericStarTreeValuesIterator valuesIterator = (SortedNumericStarTreeValuesIterator) starTreeValues + .getDimensionValuesIterator(fieldName); + SortedNumericStarTreeValuesIterator docCountsIterator = StarTreeQueryHelper.getDocCountsIterator(starTreeValues, starTree); + + return new StarTreeBucketCollector( + starTreeValues, + StarTreeTraversalUtil.getStarTreeResult( + starTreeValues, + StarTreeQueryHelper.mergeDimensionFilterIfNotExists( + context.getQueryShardContext().getStarTreeQueryContext().getBaseQueryStarTreeFilter(), + fieldName, + List.of(DimensionFilter.MATCH_ALL_DEFAULT) + ), + context + ) + ) { + @Override + public void setSubCollectors() throws IOException { + for (Aggregator aggregator : subAggregators) { + this.subCollectors.add(((StarTreePreComputeCollector) aggregator).getStarTreeBucketCollector(ctx, starTree, this)); + } + } + + @Override + public void collectStarTreeEntry(int starTreeEntry, long owningBucketOrd) throws IOException { + if (valuesIterator.advanceExact(starTreeEntry) == false) { + return; + } + long dimensionValue = valuesIterator.nextValue(); + // Only numeric & floating points are supported as of now in star-tree + // TODO: Add support for isBigInteger() when it gets supported in star-tree + if (valuesSource.isFloatingPoint()) { + double doubleValue = ((NumberFieldMapper.NumberFieldType) context.mapperService().fieldType(fieldName)).toDoubleValue( + dimensionValue + ); + dimensionValue = NumericUtils.doubleToSortableLong(doubleValue); + } + + for (int i = 0, count = valuesIterator.entryValueCount(); i < count; i++) { + + if (docCountsIterator.advanceExact(starTreeEntry)) { + long metricValue = docCountsIterator.nextValue(); + long bucketOrd = bucketOrds.add(owningBucketOrd, dimensionValue); + collectStarTreeBucket(this, metricValue, bucketOrd, starTreeEntry); + } + } + } + }; + } + @Override public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { return resultStrategy.buildAggregations(owningBucketOrds); diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregator.java index 918cc0276ed13..1ea78e08b91af 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregator.java @@ -291,6 +291,11 @@ private boolean subAggsNeedScore() { @Override protected boolean shouldDefer(Aggregator aggregator) { - return collectMode == SubAggCollectionMode.BREADTH_FIRST && !aggsUsedForSorting.contains(aggregator); + if (context.getQueryShardContext().getStarTreeQueryContext() == null) { + return collectMode == SubAggCollectionMode.BREADTH_FIRST && !aggsUsedForSorting.contains(aggregator); + } else { + // when pre-computing using star-tree - return false (don't defer) for BREADTH_FIRST case + return collectMode != SubAggCollectionMode.BREADTH_FIRST; + } } } diff --git a/server/src/main/java/org/opensearch/search/startree/StarTreeQueryContext.java b/server/src/main/java/org/opensearch/search/startree/StarTreeQueryContext.java index ca0ab9ce52f6e..a8f54f5793551 100644 --- a/server/src/main/java/org/opensearch/search/startree/StarTreeQueryContext.java +++ b/server/src/main/java/org/opensearch/search/startree/StarTreeQueryContext.java @@ -21,6 +21,7 @@ import org.opensearch.index.query.QueryBuilder; import org.opensearch.search.aggregations.AggregatorFactory; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramAggregatorFactory; +import org.opensearch.search.aggregations.bucket.terms.TermsAggregatorFactory; import org.opensearch.search.aggregations.metrics.MetricAggregatorFactory; import org.opensearch.search.internal.SearchContext; import org.opensearch.search.startree.filter.StarTreeFilter; @@ -113,6 +114,13 @@ public boolean consolidateAllFilters(SearchContext context) { if (validateDateHistogramSupport(compositeMappedFieldType, aggregatorFactory)) { continue; } + + // validation for terms aggregation + if (validateKeywordTermsAggregationSupport(compositeMappedFieldType, aggregatorFactory)) { + continue; + } + + // invalid query shape return false; } @@ -151,6 +159,31 @@ private static boolean validateStarTreeMetricSupport( return false; } + private static boolean validateKeywordTermsAggregationSupport( + CompositeDataCubeFieldType compositeIndexFieldInfo, + AggregatorFactory aggregatorFactory + ) { + if (!(aggregatorFactory instanceof TermsAggregatorFactory termsAggregatorFactory)) { + return false; + } + + // Validate request field is part of dimensions + if (compositeIndexFieldInfo.getDimensions() + .stream() + .map(Dimension::getField) + .noneMatch(termsAggregatorFactory.getField()::equals)) { + return false; + } + + // Validate all sub-factories + for (AggregatorFactory subFactory : aggregatorFactory.getSubFactories().getFactories()) { + if (!validateStarTreeMetricSupport(compositeIndexFieldInfo, subFactory)) { + return false; + } + } + return true; + } + private StarTreeFilter getStarTreeFilter( SearchContext context, QueryBuilder queryBuilder, diff --git a/server/src/main/java/org/opensearch/search/startree/StarTreeQueryHelper.java b/server/src/main/java/org/opensearch/search/startree/StarTreeQueryHelper.java index 0e3bc220461b9..68a613a373edf 100644 --- a/server/src/main/java/org/opensearch/search/startree/StarTreeQueryHelper.java +++ b/server/src/main/java/org/opensearch/search/startree/StarTreeQueryHelper.java @@ -16,9 +16,11 @@ import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; import org.opensearch.index.codec.composite.CompositeIndexReader; import org.opensearch.index.compositeindex.datacube.Dimension; +import org.opensearch.index.compositeindex.datacube.MetricStat; import org.opensearch.index.compositeindex.datacube.startree.index.StarTreeValues; import org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeUtils; import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedNumericStarTreeValuesIterator; +import org.opensearch.index.mapper.DocCountFieldMapper; import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.aggregations.StarTreeBucketCollector; import org.opensearch.search.aggregations.support.ValuesSource; @@ -177,6 +179,37 @@ public void collectStarTreeEntry(int starTreeEntryBit, long bucket) throws IOExc }; } + /** + * Fetches the metric values iterator for document counts from StarTreeValues. + */ + public static SortedNumericStarTreeValuesIterator getDocCountsIterator( + StarTreeValues starTreeValues, + CompositeIndexFieldInfo starTree + ) { + String metricName = StarTreeUtils.fullyQualifiedFieldNameForStarTreeMetricsDocValues( + starTree.getField(), + DocCountFieldMapper.NAME, + MetricStat.DOC_COUNT.getTypeName() + ); + return (SortedNumericStarTreeValuesIterator) starTreeValues.getMetricValuesIterator(metricName); + } + + /** + * For a StarTreeBucketCollector, get matching star-tree entries and update relevant buckets in aggregator + */ + public static void preComputeBucketsWithStarTree(StarTreeBucketCollector starTreeBucketCollector) throws IOException { + FixedBitSet matchingDocsBitSet = starTreeBucketCollector.getMatchingDocsBitSet(); + int numBits = matchingDocsBitSet.length(); + + if (numBits > 0) { + for (int bit = matchingDocsBitSet.nextSetBit(0); bit != DocIdSetIterator.NO_MORE_DOCS; bit = (bit + 1 < numBits) + ? matchingDocsBitSet.nextSetBit(bit + 1) + : DocIdSetIterator.NO_MORE_DOCS) { + starTreeBucketCollector.collectStarTreeEntry(bit, 0); + } + } + } + public static StarTreeFilter mergeDimensionFilterIfNotExists( StarTreeFilter baseStarTreeFilter, String dimensionToMerge, diff --git a/server/src/test/java/org/opensearch/search/SearchServiceStarTreeTests.java b/server/src/test/java/org/opensearch/search/SearchServiceStarTreeTests.java index 93b133c0302c9..95c877bfce0a8 100644 --- a/server/src/test/java/org/opensearch/search/SearchServiceStarTreeTests.java +++ b/server/src/test/java/org/opensearch/search/SearchServiceStarTreeTests.java @@ -47,10 +47,12 @@ import org.opensearch.search.aggregations.SearchContextAggregations; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.opensearch.search.aggregations.metrics.MaxAggregationBuilder; import org.opensearch.search.aggregations.metrics.MedianAbsoluteDeviationAggregationBuilder; import org.opensearch.search.aggregations.metrics.SumAggregationBuilder; import org.opensearch.search.aggregations.startree.DateHistogramAggregatorTests; +import org.opensearch.search.aggregations.startree.NumericTermsAggregatorTests; import org.opensearch.search.aggregations.startree.StarTreeFilterTests; import org.opensearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.opensearch.search.builder.SearchSourceBuilder; @@ -70,6 +72,7 @@ import static org.opensearch.search.aggregations.AggregationBuilders.max; import static org.opensearch.search.aggregations.AggregationBuilders.medianAbsoluteDeviation; import static org.opensearch.search.aggregations.AggregationBuilders.sum; +import static org.opensearch.search.aggregations.AggregationBuilders.terms; import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.CoreMatchers.nullValue; import static org.mockito.Mockito.mock; @@ -539,6 +542,153 @@ public void testInvalidQueryParsingForDateHistogramAggregations() throws IOExcep setStarTreeIndexSetting(null); } + /** + * Test query parsing for bucket aggregations, with/without numeric term query + */ + public void testQueryParsingForBucketAggregations() throws IOException { + FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.STAR_TREE_INDEX, true).build()); + setStarTreeIndexSetting("true"); + + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .put(IndexMetadata.INDEX_APPEND_ONLY_ENABLED_SETTING.getKey(), true) + .build(); + CreateIndexRequestBuilder builder = client().admin() + .indices() + .prepareCreate("test") + .setSettings(settings) + .setMapping(NumericTermsAggregatorTests.getExpandedMapping(1, false)); + createIndex("test", builder); + + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexService indexService = indicesService.indexServiceSafe(resolveIndex("test")); + IndexShard indexShard = indexService.getShard(0); + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + new SearchRequest().allowPartialSearchResults(true), + indexShard.shardId(), + 1, + new AliasFilter(null, Strings.EMPTY_ARRAY), + 1.0f, + -1, + null, + null + ); + String KEYWORD_FIELD = "clientip"; + String NUMERIC_FIELD = "size"; + + MaxAggregationBuilder maxAggNoSub = max("max").field(FIELD_NAME); + MaxAggregationBuilder sumAggNoSub = max("sum").field(FIELD_NAME); + SumAggregationBuilder sumAggSub = sum("sum").field(FIELD_NAME).subAggregation(maxAggNoSub); + MedianAbsoluteDeviationAggregationBuilder medianAgg = medianAbsoluteDeviation("median").field(FIELD_NAME); + + QueryBuilder baseQuery; + SearchContext searchContext = createSearchContext(indexService); + StarTreeFieldConfiguration starTreeFieldConfiguration = new StarTreeFieldConfiguration( + 1, + Collections.emptySet(), + StarTreeFieldConfiguration.StarTreeBuildMode.ON_HEAP + ); + + // Case 1: MatchAllQuery and non-nested metric aggregations is nested within keyword term aggregation, should use star tree + TermsAggregationBuilder termsAggregationBuilder = terms("term").field(KEYWORD_FIELD).subAggregation(maxAggNoSub); + baseQuery = new MatchAllQueryBuilder(); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().size(0).query(baseQuery).aggregation(termsAggregationBuilder); + + assertStarTreeContext( + request, + sourceBuilder, + getStarTreeQueryContext( + searchContext, + starTreeFieldConfiguration, + "startree1", + -1, + List.of(new NumericDimension(NUMERIC_FIELD), new OrdinalDimension(KEYWORD_FIELD)), + List.of(new Metric(FIELD_NAME, List.of(MetricStat.SUM, MetricStat.MAX))), + baseQuery, + sourceBuilder, + true + ), + -1 + ); + + // Case 2: MatchAllQuery and non-nested metric aggregations is nested within numeric term aggregation, should use star tree + termsAggregationBuilder = terms("term").field(NUMERIC_FIELD).subAggregation(maxAggNoSub); + sourceBuilder = new SearchSourceBuilder().size(0).query(new MatchAllQueryBuilder()).aggregation(termsAggregationBuilder); + assertStarTreeContext( + request, + sourceBuilder, + getStarTreeQueryContext( + searchContext, + starTreeFieldConfiguration, + "startree1", + -1, + List.of(new NumericDimension(NUMERIC_FIELD), new OrdinalDimension(KEYWORD_FIELD)), + List.of(new Metric(FIELD_NAME, List.of(MetricStat.SUM, MetricStat.MAX))), + baseQuery, + sourceBuilder, + true + ), + -1 + ); + + // Case 3: NumericTermsQuery and non-nested metric aggregations is nested within keyword term aggregation, should use star tree + termsAggregationBuilder = terms("term").field(KEYWORD_FIELD).subAggregation(maxAggNoSub); + baseQuery = new TermQueryBuilder(FIELD_NAME, 1); + sourceBuilder = new SearchSourceBuilder().size(0).query(baseQuery).aggregation(termsAggregationBuilder); + assertStarTreeContext( + request, + sourceBuilder, + getStarTreeQueryContext( + searchContext, + starTreeFieldConfiguration, + "startree1", + -1, + List.of(new NumericDimension(NUMERIC_FIELD), new OrdinalDimension(KEYWORD_FIELD), new NumericDimension(FIELD_NAME)), + List.of(new Metric(FIELD_NAME, List.of(MetricStat.SUM, MetricStat.MAX))), + baseQuery, + sourceBuilder, + true + ), + -1 + ); + + // Case 4: NumericTermsQuery and multiple non-nested metric aggregations is within numeric term aggregation, should use star tree + termsAggregationBuilder = terms("term").field(NUMERIC_FIELD).subAggregation(maxAggNoSub).subAggregation(sumAggNoSub); + sourceBuilder = new SearchSourceBuilder().size(0).query(new TermQueryBuilder(FIELD_NAME, 1)).aggregation(termsAggregationBuilder); + + assertStarTreeContext( + request, + sourceBuilder, + getStarTreeQueryContext( + searchContext, + starTreeFieldConfiguration, + "startree1", + -1, + List.of(new NumericDimension(NUMERIC_FIELD), new OrdinalDimension(KEYWORD_FIELD), new NumericDimension(FIELD_NAME)), + List.of(new Metric(FIELD_NAME, List.of(MetricStat.SUM, MetricStat.MAX))), + baseQuery, + sourceBuilder, + true + ), + -1 + ); + + // Case 5: Nested metric aggregations is nested within numeric term aggregation, should not use star tree + termsAggregationBuilder = terms("term").field(NUMERIC_FIELD).subAggregation(sumAggSub); + sourceBuilder = new SearchSourceBuilder().size(0).query(new TermQueryBuilder(FIELD_NAME, 1)).aggregation(termsAggregationBuilder); + assertStarTreeContext(request, sourceBuilder, null, -1); + + // Case 6: Unsupported aggregations is nested within numeric term aggregation, should not use star tree + termsAggregationBuilder = terms("term").field(NUMERIC_FIELD).subAggregation(medianAgg); + sourceBuilder = new SearchSourceBuilder().size(0).query(new TermQueryBuilder(FIELD_NAME, 1)).aggregation(termsAggregationBuilder); + assertStarTreeContext(request, sourceBuilder, null, -1); + + setStarTreeIndexSetting(null); + } + private void setStarTreeIndexSetting(String value) { client().admin() .cluster() diff --git a/server/src/test/java/org/opensearch/search/aggregations/startree/KeywordTermsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/startree/KeywordTermsAggregatorTests.java new file mode 100644 index 0000000000000..2ca9f6b592a0d --- /dev/null +++ b/server/src/test/java/org/opensearch/search/aggregations/startree/KeywordTermsAggregatorTests.java @@ -0,0 +1,245 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.startree; + +import com.carrotsearch.randomizedtesting.RandomizedTest; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.codecs.Codec; +import org.apache.lucene.codecs.lucene101.Lucene101Codec; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.document.SortedSetDocValuesField; +import org.apache.lucene.document.StringField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SegmentReader; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.NumericUtils; +import org.opensearch.common.lucene.Lucene; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; +import org.opensearch.index.codec.composite.CompositeIndexReader; +import org.opensearch.index.codec.composite.composite101.Composite101Codec; +import org.opensearch.index.codec.composite912.datacube.startree.StarTreeDocValuesFormatTests; +import org.opensearch.index.compositeindex.datacube.Dimension; +import org.opensearch.index.compositeindex.datacube.NumericDimension; +import org.opensearch.index.compositeindex.datacube.OrdinalDimension; +import org.opensearch.index.mapper.KeywordFieldMapper; +import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.mapper.NumberFieldMapper; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.search.aggregations.Aggregator; +import org.opensearch.search.aggregations.AggregatorTestCase; +import org.opensearch.search.aggregations.bucket.terms.InternalTerms; +import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.opensearch.search.aggregations.support.ValuesSourceAggregationBuilder; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Random; + +import static org.opensearch.search.aggregations.AggregationBuilders.avg; +import static org.opensearch.search.aggregations.AggregationBuilders.count; +import static org.opensearch.search.aggregations.AggregationBuilders.max; +import static org.opensearch.search.aggregations.AggregationBuilders.min; +import static org.opensearch.search.aggregations.AggregationBuilders.sum; +import static org.opensearch.search.aggregations.AggregationBuilders.terms; +import static org.opensearch.test.InternalAggregationTestCase.DEFAULT_MAX_BUCKETS; + +public class KeywordTermsAggregatorTests extends AggregatorTestCase { + final static String STATUS = "status"; + final static String SIZE = "size"; + final static String CLIENTIP = "clientip"; + private static final MappedFieldType STATUS_FIELD_TYPE = new NumberFieldMapper.NumberFieldType( + STATUS, + NumberFieldMapper.NumberType.LONG + ); + private static final MappedFieldType SIZE_FIELD_NAME = new NumberFieldMapper.NumberFieldType(SIZE, NumberFieldMapper.NumberType.FLOAT); + private static final MappedFieldType CLIENTIP_FIELD_NAME = new KeywordFieldMapper.KeywordFieldType(CLIENTIP); + + @Before + public void setup() { + FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.STAR_TREE_INDEX, true).build()); + } + + @After + public void teardown() throws IOException { + FeatureFlags.initializeFeatureFlags(Settings.EMPTY); + } + + protected Codec getCodec() { + final Logger testLogger = LogManager.getLogger(KeywordTermsAggregatorTests.class); + MapperService mapperService; + try { + mapperService = StarTreeDocValuesFormatTests.createMapperService(NumericTermsAggregatorTests.getExpandedMapping(1, false)); + } catch (IOException e) { + throw new RuntimeException(e); + } + return new Composite101Codec(Lucene101Codec.Mode.BEST_SPEED, mapperService, testLogger); + } + + public void testStarTreeKeywordTerms() throws IOException { + Directory directory = newDirectory(); + IndexWriterConfig conf = newIndexWriterConfig(null); + conf.setCodec(getCodec()); + conf.setMergePolicy(newLogMergePolicy()); + RandomIndexWriter iw = new RandomIndexWriter(random(), directory, conf); + + Random random = RandomizedTest.getRandom(); + int totalDocs = 100; + + long val; + + List docs = new ArrayList<>(); + // Index 100 random documents + for (int i = 0; i < totalDocs; i++) { + Document doc = new Document(); + if (random.nextBoolean()) { + val = random.nextInt(10); // Random int between 0 and 9 for status + doc.add(new SortedNumericDocValuesField(STATUS, val)); + } + if (random.nextBoolean()) { + val = NumericUtils.doubleToSortableLong(random.nextInt(100) + 0.5f); + doc.add(new SortedNumericDocValuesField(SIZE, val)); + } + if (random.nextBoolean()) { + val = random.nextInt(10); // Random strings for int between 0 and 9 for clientip + doc.add(new SortedSetDocValuesField(CLIENTIP, new BytesRef(String.valueOf(val)))); + doc.add(new StringField(CLIENTIP, String.valueOf(val), Field.Store.NO)); + } + iw.addDocument(doc); + docs.add(doc); + } + + if (randomBoolean()) { + iw.forceMerge(1); + } + iw.close(); + DirectoryReader ir = DirectoryReader.open(directory); + LeafReaderContext context = ir.leaves().get(0); + + SegmentReader reader = Lucene.segmentReader(context.reader()); + IndexSearcher indexSearcher = newSearcher(wrapInMockESDirectoryReader(ir), false, false); + CompositeIndexReader starTreeDocValuesReader = (CompositeIndexReader) reader.getDocValuesReader(); + + List compositeIndexFields = starTreeDocValuesReader.getCompositeIndexFields(); + CompositeIndexFieldInfo starTree = compositeIndexFields.get(0); + + LinkedHashMap supportedDimensions = new LinkedHashMap<>(); + supportedDimensions.put(new NumericDimension(STATUS), STATUS_FIELD_TYPE); + supportedDimensions.put(new NumericDimension(SIZE), SIZE_FIELD_NAME); + supportedDimensions.put(new OrdinalDimension(CLIENTIP), CLIENTIP_FIELD_NAME); + + Query query = new MatchAllDocsQuery(); + QueryBuilder queryBuilder = null; + TermsAggregationBuilder termsAggregationBuilder = terms("terms_agg").field(CLIENTIP) + .collectMode(Aggregator.SubAggCollectionMode.BREADTH_FIRST); + testCase(indexSearcher, query, queryBuilder, termsAggregationBuilder, starTree, supportedDimensions); + + ValuesSourceAggregationBuilder[] aggBuilders = { + sum("_sum").field(SIZE), + max("_max").field(SIZE), + min("_min").field(SIZE), + count("_count").field(SIZE), + avg("_avg").field(SIZE) }; + + for (ValuesSourceAggregationBuilder aggregationBuilder : aggBuilders) { + query = new MatchAllDocsQuery(); + queryBuilder = null; + + termsAggregationBuilder = terms("terms_agg").field(CLIENTIP) + .subAggregation(aggregationBuilder) + .collectMode(Aggregator.SubAggCollectionMode.BREADTH_FIRST); + testCase(indexSearcher, query, queryBuilder, termsAggregationBuilder, starTree, supportedDimensions); + + // Numeric-terms query with keyword terms aggregation + for (int cases = 0; cases < 100; cases++) { + // query of status field + String queryField = STATUS; + long queryValue = random.nextInt(10); + query = SortedNumericDocValuesField.newSlowExactQuery(queryField, queryValue); + queryBuilder = new TermQueryBuilder(queryField, queryValue); + testCase(indexSearcher, query, queryBuilder, termsAggregationBuilder, starTree, supportedDimensions); + + // query on size field + queryField = SIZE; + queryValue = NumericUtils.floatToSortableInt(random.nextInt(20) - 14.5f); + query = SortedNumericDocValuesField.newSlowExactQuery(queryField, queryValue); + queryBuilder = new TermQueryBuilder(queryField, queryValue); + testCase(indexSearcher, query, queryBuilder, termsAggregationBuilder, starTree, supportedDimensions); + } + } + ir.close(); + directory.close(); + } + + private void testCase( + IndexSearcher indexSearcher, + Query query, + QueryBuilder queryBuilder, + TermsAggregationBuilder termsAggregationBuilder, + CompositeIndexFieldInfo starTree, + LinkedHashMap supportedDimensions + ) throws IOException { + InternalTerms starTreeAggregation = searchAndReduceStarTree( + createIndexSettings(), + indexSearcher, + query, + queryBuilder, + termsAggregationBuilder, + starTree, + supportedDimensions, + null, + DEFAULT_MAX_BUCKETS, + false, + null, + true, + STATUS_FIELD_TYPE, + SIZE_FIELD_NAME, + CLIENTIP_FIELD_NAME + ); + + InternalTerms defaultAggregation = searchAndReduceStarTree( + createIndexSettings(), + indexSearcher, + query, + queryBuilder, + termsAggregationBuilder, + null, + null, + null, + DEFAULT_MAX_BUCKETS, + false, + null, + false, + STATUS_FIELD_TYPE, + SIZE_FIELD_NAME, + CLIENTIP_FIELD_NAME + ); + + assertEquals(defaultAggregation.getBuckets().size(), starTreeAggregation.getBuckets().size()); + assertEquals(defaultAggregation.getBuckets(), starTreeAggregation.getBuckets()); + } +} diff --git a/server/src/test/java/org/opensearch/search/aggregations/startree/NumericTermsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/startree/NumericTermsAggregatorTests.java new file mode 100644 index 0000000000000..d3cb2d17e7c16 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/aggregations/startree/NumericTermsAggregatorTests.java @@ -0,0 +1,342 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.startree; + +import com.carrotsearch.randomizedtesting.RandomizedTest; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.codecs.Codec; +import org.apache.lucene.codecs.lucene101.Lucene101Codec; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SegmentReader; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.apache.lucene.util.NumericUtils; +import org.opensearch.common.lucene.Lucene; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; +import org.opensearch.index.codec.composite.CompositeIndexReader; +import org.opensearch.index.codec.composite.composite101.Composite101Codec; +import org.opensearch.index.codec.composite912.datacube.startree.StarTreeDocValuesFormatTests; +import org.opensearch.index.compositeindex.datacube.Dimension; +import org.opensearch.index.compositeindex.datacube.NumericDimension; +import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.mapper.NumberFieldMapper; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.search.aggregations.AggregatorTestCase; +import org.opensearch.search.aggregations.bucket.terms.InternalTerms; +import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.opensearch.search.aggregations.support.ValuesSourceAggregationBuilder; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Random; + +import static org.opensearch.index.codec.composite912.datacube.startree.AbstractStarTreeDVFormatTests.topMapping; +import static org.opensearch.search.aggregations.AggregationBuilders.avg; +import static org.opensearch.search.aggregations.AggregationBuilders.count; +import static org.opensearch.search.aggregations.AggregationBuilders.max; +import static org.opensearch.search.aggregations.AggregationBuilders.min; +import static org.opensearch.search.aggregations.AggregationBuilders.sum; +import static org.opensearch.search.aggregations.AggregationBuilders.terms; +import static org.opensearch.test.InternalAggregationTestCase.DEFAULT_MAX_BUCKETS; + +public class NumericTermsAggregatorTests extends AggregatorTestCase { + final static String STATUS = "status"; + final static String SIZE = "size"; + private static final MappedFieldType STATUS_FIELD_TYPE = new NumberFieldMapper.NumberFieldType( + STATUS, + NumberFieldMapper.NumberType.LONG + ); + private static final MappedFieldType SIZE_FIELD_NAME = new NumberFieldMapper.NumberFieldType(SIZE, NumberFieldMapper.NumberType.FLOAT); + + @Before + public void setup() { + FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.STAR_TREE_INDEX, true).build()); + } + + @After + public void teardown() throws IOException { + FeatureFlags.initializeFeatureFlags(Settings.EMPTY); + } + + protected Codec getCodec() { + final Logger testLogger = LogManager.getLogger(NumericTermsAggregatorTests.class); + MapperService mapperService; + try { + mapperService = StarTreeDocValuesFormatTests.createMapperService(getExpandedMapping(1, false)); + } catch (IOException e) { + throw new RuntimeException(e); + } + return new Composite101Codec(Lucene101Codec.Mode.BEST_SPEED, mapperService, testLogger); + } + + public void testStarTreeNumericTerms() throws IOException { + Directory directory = newDirectory(); + IndexWriterConfig conf = newIndexWriterConfig(null); + conf.setCodec(getCodec()); + conf.setMergePolicy(newLogMergePolicy()); + RandomIndexWriter iw = new RandomIndexWriter(random(), directory, conf); + + Random random = RandomizedTest.getRandom(); + int totalDocs = 100; + + long val; + + List docs = new ArrayList<>(); + // Index 100 random documents + for (int i = 0; i < totalDocs; i++) { + Document doc = new Document(); + if (random.nextBoolean()) { + val = random.nextInt(10); // Random int between (0 and 9) for status + doc.add(new SortedNumericDocValuesField(STATUS, val)); + } + if (random.nextBoolean()) { + val = NumericUtils.doubleToSortableLong(random.nextInt(100) + 0.5f); + // Random float between (0 and 99)+0.5f for size + doc.add(new SortedNumericDocValuesField(SIZE, val)); + } + iw.addDocument(doc); + docs.add(doc); + } + + if (randomBoolean()) { + iw.forceMerge(1); + } + iw.close(); + DirectoryReader ir = DirectoryReader.open(directory); + LeafReaderContext context = ir.leaves().get(0); + + SegmentReader reader = Lucene.segmentReader(context.reader()); + IndexSearcher indexSearcher = newSearcher(reader, false, false); + CompositeIndexReader starTreeDocValuesReader = (CompositeIndexReader) reader.getDocValuesReader(); + + List compositeIndexFields = starTreeDocValuesReader.getCompositeIndexFields(); + CompositeIndexFieldInfo starTree = compositeIndexFields.get(0); + + LinkedHashMap supportedDimensions = new LinkedHashMap<>(); + supportedDimensions.put(new NumericDimension(STATUS), STATUS_FIELD_TYPE); + supportedDimensions.put(new NumericDimension(SIZE), SIZE_FIELD_NAME); + + Query query = new MatchAllDocsQuery(); + QueryBuilder queryBuilder = null; + TermsAggregationBuilder termsAggregationBuilder = terms("terms_agg").field(STATUS); + testCase(indexSearcher, query, queryBuilder, termsAggregationBuilder, starTree, supportedDimensions); + + ValuesSourceAggregationBuilder[] aggBuilders = { + sum("_sum").field(SIZE), + max("_max").field(SIZE), + min("_min").field(SIZE), + count("_count").field(SIZE), + avg("_avg").field(SIZE) }; + + for (ValuesSourceAggregationBuilder aggregationBuilder : aggBuilders) { + query = new MatchAllDocsQuery(); + queryBuilder = null; + termsAggregationBuilder = terms("terms_agg").field(STATUS).subAggregation(aggregationBuilder); + testCase(indexSearcher, query, queryBuilder, termsAggregationBuilder, starTree, supportedDimensions); + + // Numeric-terms query with numeric terms aggregation + for (int cases = 0; cases < 100; cases++) { + + // query of status field + String queryField = STATUS; + long queryValue = random.nextInt(10); + query = SortedNumericDocValuesField.newSlowExactQuery(queryField, queryValue); + queryBuilder = new TermQueryBuilder(queryField, queryValue); + testCase(indexSearcher, query, queryBuilder, termsAggregationBuilder, starTree, supportedDimensions); + + // query on size field + queryField = SIZE; + queryValue = NumericUtils.floatToSortableInt(random.nextInt(20) - 14.5f); + query = SortedNumericDocValuesField.newSlowExactQuery(queryField, queryValue); + queryBuilder = new TermQueryBuilder(queryField, queryValue); + testCase(indexSearcher, query, queryBuilder, termsAggregationBuilder, starTree, supportedDimensions); + } + } + + aggBuilders = new ValuesSourceAggregationBuilder[] { + sum("_sum").field(STATUS), + max("_max").field(STATUS), + min("_min").field(STATUS), + count("_count").field(STATUS), + avg("_avg").field(STATUS) }; + + for (ValuesSourceAggregationBuilder aggregationBuilder : aggBuilders) { + query = new MatchAllDocsQuery(); + queryBuilder = null; + + termsAggregationBuilder = terms("terms_agg").field(SIZE).subAggregation(aggregationBuilder); + testCase(indexSearcher, query, queryBuilder, termsAggregationBuilder, starTree, supportedDimensions); + } + + ir.close(); + directory.close(); + } + + private void testCase( + IndexSearcher indexSearcher, + Query query, + QueryBuilder queryBuilder, + TermsAggregationBuilder termsAggregationBuilder, + CompositeIndexFieldInfo starTree, + LinkedHashMap supportedDimensions + ) throws IOException { + InternalTerms starTreeAggregation = searchAndReduceStarTree( + createIndexSettings(), + indexSearcher, + query, + queryBuilder, + termsAggregationBuilder, + starTree, + supportedDimensions, + null, + DEFAULT_MAX_BUCKETS, + false, + null, + true, + STATUS_FIELD_TYPE, + SIZE_FIELD_NAME + ); + + InternalTerms defaultAggregation = searchAndReduceStarTree( + createIndexSettings(), + indexSearcher, + query, + queryBuilder, + termsAggregationBuilder, + null, + null, + null, + DEFAULT_MAX_BUCKETS, + false, + null, + false, + STATUS_FIELD_TYPE, + SIZE_FIELD_NAME + ); + + assertEquals(defaultAggregation.getBuckets().size(), starTreeAggregation.getBuckets().size()); + assertEquals(defaultAggregation.getBuckets(), starTreeAggregation.getBuckets()); + } + + public static XContentBuilder getExpandedMapping(int maxLeafDocs, boolean skipStarNodeCreationForStatusDimension) throws IOException { + return topMapping(b -> { + b.startObject("composite"); + b.startObject("startree1"); // Use the same name as the provided mapping + b.field("type", "star_tree"); + b.startObject("config"); + b.field("max_leaf_docs", maxLeafDocs); + if (skipStarNodeCreationForStatusDimension) { + b.startArray("skip_star_node_creation_for_dimensions"); + b.value("status"); // Skip for "status" dimension + b.endArray(); + } + b.startArray("ordered_dimensions"); + b.startObject(); + b.field("name", "status"); + b.endObject(); + b.startObject(); + b.field("name", "size"); + b.endObject(); + b.startObject(); + b.field("name", "clientip"); + b.endObject(); + b.startObject(); + b.field("name", "@timestamp"); + b.startArray("calendar_intervals"); + b.value("month"); + b.value("day"); + b.endArray(); + b.endObject(); + b.endArray(); + b.startArray("metrics"); + b.startObject(); + b.field("name", "size"); + b.startArray("stats"); + b.value("sum"); + b.value("value_count"); + b.value("min"); + b.value("max"); + b.endArray(); + b.endObject(); + b.startObject(); + b.field("name", "status"); + b.startArray("stats"); + b.value("sum"); + b.value("value_count"); + b.value("min"); + b.value("max"); + b.endArray(); + b.endObject(); + b.endArray(); + b.endObject(); + b.endObject(); + b.endObject(); + b.startObject("properties"); + b.startObject("@timestamp"); + b.field("type", "date"); + b.field("format", "strict_date_optional_time||epoch_second"); + b.endObject(); + b.startObject("message"); + b.field("type", "keyword"); + b.field("index", false); + b.field("doc_values", false); + b.endObject(); + b.startObject("clientip"); + b.field("type", "keyword"); + b.endObject(); + b.startObject("request"); + b.field("type", "text"); + b.startObject("fields"); + b.startObject("raw"); + b.field("type", "keyword"); + b.field("ignore_above", 256); + b.endObject(); + b.endObject(); + b.endObject(); + b.startObject("status"); + b.field("type", "integer"); + b.endObject(); + b.startObject("size"); + b.field("type", "float"); + b.endObject(); + b.startObject("geoip"); + b.startObject("properties"); + b.startObject("country_name"); + b.field("type", "keyword"); + b.endObject(); + b.startObject("city_name"); + b.field("type", "keyword"); + b.endObject(); + b.startObject("location"); + b.field("type", "geo_point"); + b.endObject(); + b.endObject(); + b.endObject(); + b.endObject(); + }); + } +} diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java index eba1769ad882d..df982d4f0c7f3 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java @@ -168,6 +168,7 @@ import java.util.function.Function; import java.util.function.Supplier; import java.util.stream.Collectors; +import java.util.stream.Stream; import static java.util.Collections.emptyMap; import static java.util.Collections.singletonList; @@ -441,6 +442,8 @@ protected SearchContext createSearchContextWithStarTreeContext( searchContext.getQueryShardContext().setStarTreeQueryContext(starTreeQueryContext); } + Stream.of(fieldTypes).forEach(fieldType -> when(mapperService.fieldType(fieldType.name())).thenReturn(fieldType)); + return searchContext; } From f6d6aa61e5039e4c6143cc25a71c3e448572dd33 Mon Sep 17 00:00:00 2001 From: Sachin Kale Date: Tue, 4 Mar 2025 10:44:36 +0530 Subject: [PATCH 045/550] Fix flaky test RemoteIndexRecoveryIT.testRerouteRecovery (#17228) Signed-off-by: Sachin Kale --- .../indices/recovery/IndexRecoveryIT.java | 4 ++-- .../remotestore/RemoteIndexRecoveryIT.java | 24 +++++++++++++++++++ 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java index d30806b4325ac..9d893cb6f33c7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java @@ -250,7 +250,7 @@ private void assertOnGoingRecoveryState( assertThat(state.getStage(), not(equalTo(Stage.DONE))); } - private void slowDownRecovery(ByteSizeValue shardSize) { + public void slowDownRecovery(ByteSizeValue shardSize) { long chunkSize = Math.max(1, shardSize.getBytes() / 10); assertTrue( client().admin() @@ -528,7 +528,7 @@ public void testRerouteRecovery() throws Exception { assertThat(indicesService.indexServiceSafe(index).getShard(0).recoveryStats().currentAsSource(), equalTo(1)); indicesService = internalCluster().getInstance(IndicesService.class, nodeB); assertThat(indicesService.indexServiceSafe(index).getShard(0).recoveryStats().currentAsTarget(), equalTo(1)); - }, TimeValue.timeValueSeconds(10), TimeValue.timeValueMillis(500)); + }, TimeValue.timeValueSeconds(60), TimeValue.timeValueMillis(500)); logger.info("--> request recoveries"); RecoveryResponse response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet(); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java index 6de61cf203c60..1961b0fa43705 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java @@ -10,9 +10,12 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; import org.opensearch.indices.recovery.IndexRecoveryIT; +import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.OpenSearchIntegTestCase; import org.hamcrest.Matcher; @@ -22,6 +25,7 @@ import java.nio.file.Path; +import static org.opensearch.indices.recovery.RecoverySettings.INDICES_RECOVERY_CHUNK_SIZE_SETTING; import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) @@ -54,6 +58,26 @@ public Settings indexSettings() { .build(); } + @Override + public void slowDownRecovery(ByteSizeValue shardSize) { + logger.info("--> shardSize: " + shardSize); + long chunkSize = Math.max(1, shardSize.getBytes() / 50); + assertTrue( + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder() + // one chunk per sec.. + .put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), chunkSize, ByteSizeUnit.BYTES) + // small chunks + .put(INDICES_RECOVERY_CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(chunkSize, ByteSizeUnit.BYTES)) + ) + .get() + .isAcknowledged() + ); + } + @After public void teardown() { clusterAdmin().prepareCleanupRepository(REPOSITORY_NAME).get(); From 2e4cc8c6e12f0e5fdfe9274da0126e81f95f59b3 Mon Sep 17 00:00:00 2001 From: Fen Qin <75345540+fen-qin@users.noreply.github.com> Date: Tue, 4 Mar 2025 14:37:02 -0800 Subject: [PATCH 046/550] Fix explain action on query rewrite (#17286) (#17286) Signed-off-by: Fen Qin --- CHANGELOG-3.0.md | 1 + .../opensearch/explain/ExplainActionIT.java | 24 +++++++++++++++++++ .../explain/TransportExplainAction.java | 17 ++++++++++++- 3 files changed, 41 insertions(+), 1 deletion(-) diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index 7211368c65ffb..62d55d40f4bb7 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -77,6 +77,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix swapped field formats in nodes API where `total_indexing_buffer_in_bytes` and `total_indexing_buffer` values were reversed ([#17070](https://github.com/opensearch-project/OpenSearch/pull/17070)) - Add HTTP/2 protocol support to HttpRequest.HttpVersion ([#17248](https://github.com/opensearch-project/OpenSearch/pull/17248)) - Fix missing bucket in terms aggregation with missing value ([#17418](https://github.com/opensearch-project/OpenSearch/pull/17418)) +- Fix explain action on query rewrite ([#17286](https://github.com/opensearch-project/OpenSearch/pull/17286)) ### Security diff --git a/server/src/internalClusterTest/java/org/opensearch/explain/ExplainActionIT.java b/server/src/internalClusterTest/java/org/opensearch/explain/ExplainActionIT.java index 2949fa34a0795..723ff803851d3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/explain/ExplainActionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/explain/ExplainActionIT.java @@ -40,6 +40,8 @@ import org.opensearch.core.common.io.stream.InputStreamStreamInput; import org.opensearch.core.common.io.stream.OutputStreamStreamOutput; import org.opensearch.index.query.QueryBuilders; +import org.opensearch.index.query.TermsQueryBuilder; +import org.opensearch.indices.TermsLookup; import org.opensearch.test.OpenSearchIntegTestCase; import java.io.ByteArrayInputStream; @@ -52,6 +54,7 @@ import java.util.Set; import static java.util.Collections.singleton; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.queryStringQuery; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -305,4 +308,25 @@ public void testStreamExplain() throws Exception { result = Lucene.readExplanation(esBuffer); assertThat(exp.toString(), equalTo(result.toString())); } + + public void testQueryRewrite() { + client().admin() + .indices() + .prepareCreate("twitter") + .setMapping("user", "type=integer", "followers", "type=integer") + .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 2).put("index.number_of_routing_shards", 2)) + .get(); + client().prepareIndex("twitter").setId("1").setSource("followers", new int[] { 1, 2, 3 }).get(); + refresh(); + + TermsQueryBuilder termsLookupQuery = QueryBuilders.termsLookupQuery("user", new TermsLookup("twitter", "1", "followers")); + ExplainResponse response = client().prepareExplain("twitter", "1").setQuery(termsLookupQuery).get(); + assertNotNull(response); + assertTrue(response.isExists()); + assertFalse(response.isMatch()); + assertThat(response.getIndex(), equalTo("twitter")); + assertThat(response.getId(), equalTo("1")); + assertNotNull(response.getExplanation()); + assertFalse(response.getExplanation().isMatch()); + } } diff --git a/server/src/main/java/org/opensearch/action/explain/TransportExplainAction.java b/server/src/main/java/org/opensearch/action/explain/TransportExplainAction.java index fb2ccc6ebbf12..710fb46ce7328 100644 --- a/server/src/main/java/org/opensearch/action/explain/TransportExplainAction.java +++ b/server/src/main/java/org/opensearch/action/explain/TransportExplainAction.java @@ -52,6 +52,8 @@ import org.opensearch.index.get.GetResult; import org.opensearch.index.mapper.IdFieldMapper; import org.opensearch.index.mapper.Uid; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.Rewriteable; import org.opensearch.index.shard.IndexShard; import org.opensearch.search.SearchService; import org.opensearch.search.internal.AliasFilter; @@ -101,7 +103,20 @@ public TransportExplainAction( @Override protected void doExecute(Task task, ExplainRequest request, ActionListener listener) { request.nowInMillis = System.currentTimeMillis(); - super.doExecute(task, request, listener); + // if there's no query we can't rewrite it + if (request.query() == null) { + super.doExecute(task, request, listener); + return; + } + ActionListener rewriteListener = ActionListener.wrap(rewrittenQuery -> { + request.query(rewrittenQuery); + super.doExecute(task, request, listener); + }, listener::onFailure); + Rewriteable.rewriteAndFetch( + request.query(), + searchService.getIndicesService().getRewriteContext(() -> request.nowInMillis), + rewriteListener + ); } @Override From 17363d5bb46584e7dcd4dea218c05dd37a563dbe Mon Sep 17 00:00:00 2001 From: Varun Bharadwaj Date: Tue, 4 Mar 2025 15:04:48 -0800 Subject: [PATCH 047/550] [Pull-based Ingestion] Add error handling strategy to pull-based ingestion (#17427) * Add error handling strategy to pull-based ingestion Signed-off-by: Varun Bharadwaj * Make error strategy config type-safe Signed-off-by: Varun Bharadwaj --------- Signed-off-by: Varun Bharadwaj --- CHANGELOG-3.0.md | 1 + .../plugin/kafka/KafkaPartitionConsumer.java | 5 + .../cluster/metadata/IndexMetadata.java | 14 +- .../cluster/metadata/IngestionSource.java | 31 +++- .../common/settings/IndexScopedSettings.java | 1 + .../index/IngestionShardConsumer.java | 5 + .../index/engine/IngestionEngine.java | 20 ++- .../BlockIngestionErrorStrategy.java | 36 +++++ .../pollingingest/DefaultStreamPoller.java | 30 +++- .../DropIngestionErrorStrategy.java | 37 +++++ .../pollingingest/IngestionErrorStrategy.java | 68 +++++++++ .../MessageProcessorRunnable.java | 21 ++- .../metadata/IngestionSourceTests.java | 24 +-- .../index/engine/FakeIngestionSource.java | 5 + .../DefaultStreamPollerTests.java | 137 +++++++++++++++++- 15 files changed, 398 insertions(+), 37 deletions(-) create mode 100644 server/src/main/java/org/opensearch/indices/pollingingest/BlockIngestionErrorStrategy.java create mode 100644 server/src/main/java/org/opensearch/indices/pollingingest/DropIngestionErrorStrategy.java create mode 100644 server/src/main/java/org/opensearch/indices/pollingingest/IngestionErrorStrategy.java diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index 62d55d40f4bb7..7e82efd268007 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -23,6 +23,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Added offset management for the pull-based Ingestion ([#17354](https://github.com/opensearch-project/OpenSearch/pull/17354)) - Add filter function for AbstractQueryBuilder, BoolQueryBuilder, ConstantScoreQueryBuilder([#17409](https://github.com/opensearch-project/OpenSearch/pull/17409)) - [Star Tree] [Search] Resolving keyword & numeric bucket aggregation with metric aggregation using star-tree ([#17165](https://github.com/opensearch-project/OpenSearch/pull/17165)) +- Added error handling support for the pull-based ingestion ([#17427](https://github.com/opensearch-project/OpenSearch/pull/17427)) ### Dependencies - Update Apache Lucene to 10.1.0 ([#16366](https://github.com/opensearch-project/OpenSearch/pull/16366)) diff --git a/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaPartitionConsumer.java b/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaPartitionConsumer.java index 9461cfbc2de98..c749a887a2ccb 100644 --- a/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaPartitionConsumer.java +++ b/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaPartitionConsumer.java @@ -132,6 +132,11 @@ public KafkaOffset nextPointer() { return new KafkaOffset(lastFetchedOffset + 1); } + @Override + public KafkaOffset nextPointer(KafkaOffset pointer) { + return new KafkaOffset(pointer.getOffset() + 1); + } + @Override public IngestionShardPointer earliestPointer() { long startOffset = AccessController.doPrivileged( diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index cabea0efe8433..e9bd3b74404b1 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -71,6 +71,7 @@ import org.opensearch.index.IndexModule; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.indices.pollingingest.IngestionErrorStrategy; import org.opensearch.indices.pollingingest.StreamPoller; import org.opensearch.indices.replication.SegmentReplicationSource; import org.opensearch.indices.replication.common.ReplicationType; @@ -770,6 +771,15 @@ public Iterator> settings() { Property.Final ); + public static final String SETTING_INGESTION_SOURCE_ERROR_STRATEGY = "index.ingestion_source.error_strategy"; + public static final Setting INGESTION_SOURCE_ERROR_STRATEGY_SETTING = new Setting<>( + SETTING_INGESTION_SOURCE_ERROR_STRATEGY, + IngestionErrorStrategy.ErrorStrategy.DROP.name(), + IngestionErrorStrategy.ErrorStrategy::parseFromString, + (errorStrategy) -> {}, + Property.IndexScope + ); + public static final Setting.AffixSetting INGESTION_SOURCE_PARAMS_SETTING = Setting.prefixKeySetting( "index.ingestion_source.param.", key -> new Setting<>(key, "", (value) -> { @@ -1004,8 +1014,10 @@ public IngestionSource getIngestionSource() { pointerInitResetType, pointerInitResetValue ); + + final IngestionErrorStrategy.ErrorStrategy errorStrategy = INGESTION_SOURCE_ERROR_STRATEGY_SETTING.get(settings); final Map ingestionSourceParams = INGESTION_SOURCE_PARAMS_SETTING.getAsMap(settings); - return new IngestionSource(ingestionSourceType, pointerInitReset, ingestionSourceParams); + return new IngestionSource(ingestionSourceType, pointerInitReset, errorStrategy, ingestionSourceParams); } return null; } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IngestionSource.java b/server/src/main/java/org/opensearch/cluster/metadata/IngestionSource.java index 9849c0a5f2ba9..fd28acf3246ad 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IngestionSource.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IngestionSource.java @@ -9,6 +9,7 @@ package org.opensearch.cluster.metadata; import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.indices.pollingingest.IngestionErrorStrategy; import org.opensearch.indices.pollingingest.StreamPoller; import java.util.Map; @@ -21,12 +22,19 @@ public class IngestionSource { private String type; private PointerInitReset pointerInitReset; + private IngestionErrorStrategy.ErrorStrategy errorStrategy; private Map params; - public IngestionSource(String type, PointerInitReset pointerInitReset, Map params) { + public IngestionSource( + String type, + PointerInitReset pointerInitReset, + IngestionErrorStrategy.ErrorStrategy errorStrategy, + Map params + ) { this.type = type; this.pointerInitReset = pointerInitReset; this.params = params; + this.errorStrategy = errorStrategy; } public String getType() { @@ -37,6 +45,10 @@ public PointerInitReset getPointerInitReset() { return pointerInitReset; } + public IngestionErrorStrategy.ErrorStrategy getErrorStrategy() { + return errorStrategy; + } + public Map params() { return params; } @@ -48,17 +60,30 @@ public boolean equals(Object o) { IngestionSource ingestionSource = (IngestionSource) o; return Objects.equals(type, ingestionSource.type) && Objects.equals(pointerInitReset, ingestionSource.pointerInitReset) + && Objects.equals(errorStrategy, ingestionSource.errorStrategy) && Objects.equals(params, ingestionSource.params); } @Override public int hashCode() { - return Objects.hash(type, pointerInitReset, params); + return Objects.hash(type, pointerInitReset, params, errorStrategy); } @Override public String toString() { - return "IngestionSource{" + "type='" + type + '\'' + ",pointer_init_reset='" + pointerInitReset + '\'' + ", params=" + params + '}'; + return "IngestionSource{" + + "type='" + + type + + '\'' + + ",pointer_init_reset='" + + pointerInitReset + + '\'' + + ",error_strategy='" + + errorStrategy + + '\'' + + ", params=" + + params + + '}'; } /** diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index dc77ffd720bad..12bee5cd14f57 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -266,6 +266,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexMetadata.INGESTION_SOURCE_POINTER_INIT_RESET_SETTING, IndexMetadata.INGESTION_SOURCE_POINTER_INIT_RESET_VALUE_SETTING, IndexMetadata.INGESTION_SOURCE_PARAMS_SETTING, + IndexMetadata.INGESTION_SOURCE_ERROR_STRATEGY_SETTING, // validate that built-in similarities don't get redefined Setting.groupSetting("index.similarity.", (s) -> { diff --git a/server/src/main/java/org/opensearch/index/IngestionShardConsumer.java b/server/src/main/java/org/opensearch/index/IngestionShardConsumer.java index 41e659196a612..a9ffcaca850f2 100644 --- a/server/src/main/java/org/opensearch/index/IngestionShardConsumer.java +++ b/server/src/main/java/org/opensearch/index/IngestionShardConsumer.java @@ -72,6 +72,11 @@ public M getMessage() { */ T nextPointer(); + /** + * @return the immediate next pointer from the provided start pointer + */ + T nextPointer(T startPointer); + /** * @return the earliest pointer in the shard */ diff --git a/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java b/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java index 00feab082c178..b919e15b56211 100644 --- a/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java @@ -29,6 +29,7 @@ import org.opensearch.index.translog.TranslogStats; import org.opensearch.index.translog.listener.CompositeTranslogEventListener; import org.opensearch.indices.pollingingest.DefaultStreamPoller; +import org.opensearch.indices.pollingingest.IngestionErrorStrategy; import org.opensearch.indices.pollingingest.PollingIngestStats; import org.opensearch.indices.pollingingest.StreamPoller; @@ -99,12 +100,21 @@ public void start() { } String resetValue = ingestionSource.getPointerInitReset().getValue(); - streamPoller = new DefaultStreamPoller(startPointer, persistedPointers, ingestionShardConsumer, this, resetState, resetValue); + IngestionErrorStrategy ingestionErrorStrategy = IngestionErrorStrategy.create( + ingestionSource.getErrorStrategy(), + ingestionSource.getType() + ); - // Poller is only started on the primary shard. Replica shards will rely on segment replication. - if (!engineConfig.isReadOnlyReplica()) { - streamPoller.start(); - } + streamPoller = new DefaultStreamPoller( + startPointer, + persistedPointers, + ingestionShardConsumer, + this, + resetState, + resetValue, + ingestionErrorStrategy + ); + streamPoller.start(); } protected Set fetchPersistedOffsets(DirectoryReader directoryReader, IngestionShardPointer batchStart) diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/BlockIngestionErrorStrategy.java b/server/src/main/java/org/opensearch/indices/pollingingest/BlockIngestionErrorStrategy.java new file mode 100644 index 0000000000000..d0febd0909be2 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/pollingingest/BlockIngestionErrorStrategy.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.pollingingest; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +/** + * This error handling strategy blocks on failures preventing processing of remaining updates in the ingestion source. + */ +public class BlockIngestionErrorStrategy implements IngestionErrorStrategy { + private static final Logger logger = LogManager.getLogger(BlockIngestionErrorStrategy.class); + private final String ingestionSource; + + public BlockIngestionErrorStrategy(String ingestionSource) { + this.ingestionSource = ingestionSource; + } + + @Override + public void handleError(Throwable e, ErrorStage stage) { + logger.error("Error processing update from {}: {}", ingestionSource, e); + + // todo: record blocking update and emit metrics + } + + @Override + public boolean shouldPauseIngestion(Throwable e, ErrorStage stage) { + return true; + } +} diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java b/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java index 3dfd77f75c82d..6fe010504f1a8 100644 --- a/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java +++ b/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java @@ -67,21 +67,25 @@ public class DefaultStreamPoller implements StreamPoller { @Nullable private IngestionShardPointer maxPersistedPointer; + private IngestionErrorStrategy errorStrategy; + public DefaultStreamPoller( IngestionShardPointer startPointer, Set persistedPointers, IngestionShardConsumer consumer, IngestionEngine ingestionEngine, ResetState resetState, - String resetValue + String resetValue, + IngestionErrorStrategy errorStrategy ) { this( startPointer, persistedPointers, consumer, - new MessageProcessorRunnable(new ArrayBlockingQueue<>(100), ingestionEngine), + new MessageProcessorRunnable(new ArrayBlockingQueue<>(100), ingestionEngine, errorStrategy), resetState, - resetValue + resetValue, + errorStrategy ); } @@ -91,7 +95,8 @@ public DefaultStreamPoller( IngestionShardConsumer consumer, MessageProcessorRunnable processorRunnable, ResetState resetState, - String resetValue + String resetValue, + IngestionErrorStrategy errorStrategy ) { this.consumer = Objects.requireNonNull(consumer); this.resetState = resetState; @@ -117,6 +122,7 @@ public DefaultStreamPoller( String.format(Locale.ROOT, "stream-poller-processor-%d-%d", consumer.getShardId(), System.currentTimeMillis()) ) ); + this.errorStrategy = errorStrategy; } @Override @@ -141,6 +147,9 @@ protected void startPoll() { } logger.info("Starting poller for shard {}", consumer.getShardId()); + // track the last record successfully written to the blocking queue + IngestionShardPointer lastSuccessfulPointer = null; + while (true) { try { if (closed) { @@ -209,6 +218,7 @@ protected void startPoll() { } totalPolledCount.inc(); blockingQueue.put(result); + lastSuccessfulPointer = result.getPointer(); logger.debug( "Put message {} with pointer {} to the blocking queue", String.valueOf(result.getMessage().getPayload()), @@ -218,8 +228,18 @@ protected void startPoll() { // update the batch start pointer to the next batch batchStartPointer = consumer.nextPointer(); } catch (Throwable e) { - // TODO better error handling logger.error("Error in polling the shard {}: {}", consumer.getShardId(), e); + errorStrategy.handleError(e, IngestionErrorStrategy.ErrorStage.POLLING); + + if (errorStrategy.shouldPauseIngestion(e, IngestionErrorStrategy.ErrorStage.POLLING)) { + // Blocking error encountered. Pause poller to stop processing remaining updates. + pause(); + } else { + // Advance the batch start pointer to ignore the error and continue from next record + batchStartPointer = lastSuccessfulPointer == null + ? consumer.nextPointer(batchStartPointer) + : consumer.nextPointer(lastSuccessfulPointer); + } } } } diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/DropIngestionErrorStrategy.java b/server/src/main/java/org/opensearch/indices/pollingingest/DropIngestionErrorStrategy.java new file mode 100644 index 0000000000000..4598bf1248cfd --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/pollingingest/DropIngestionErrorStrategy.java @@ -0,0 +1,37 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.pollingingest; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +/** + * This error handling strategy drops failures and proceeds with remaining updates in the ingestion source. + */ +public class DropIngestionErrorStrategy implements IngestionErrorStrategy { + private static final Logger logger = LogManager.getLogger(DropIngestionErrorStrategy.class); + private final String ingestionSource; + + public DropIngestionErrorStrategy(String ingestionSource) { + this.ingestionSource = ingestionSource; + } + + @Override + public void handleError(Throwable e, ErrorStage stage) { + logger.error("Error processing update from {}: {}", ingestionSource, e); + + // todo: record failed update stats and emit metrics + } + + @Override + public boolean shouldPauseIngestion(Throwable e, ErrorStage stage) { + return false; + } + +} diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/IngestionErrorStrategy.java b/server/src/main/java/org/opensearch/indices/pollingingest/IngestionErrorStrategy.java new file mode 100644 index 0000000000000..a6e992a460cc1 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/pollingingest/IngestionErrorStrategy.java @@ -0,0 +1,68 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.pollingingest; + +import org.opensearch.common.annotation.ExperimentalApi; + +import java.util.Locale; + +/** + * Defines the error handling strategy when an error is encountered either during polling records from ingestion source + * or during processing the polled records. + */ +@ExperimentalApi +public interface IngestionErrorStrategy { + + /** + * Process and record the error. + */ + void handleError(Throwable e, ErrorStage stage); + + /** + * Indicates if ingestion must be paused, blocking further writes. + */ + boolean shouldPauseIngestion(Throwable e, ErrorStage stage); + + static IngestionErrorStrategy create(ErrorStrategy errorStrategy, String ingestionSource) { + switch (errorStrategy) { + case BLOCK: + return new BlockIngestionErrorStrategy(ingestionSource); + case DROP: + default: + return new DropIngestionErrorStrategy(ingestionSource); + } + } + + /** + * Indicates available error handling strategies + */ + @ExperimentalApi + enum ErrorStrategy { + DROP, + BLOCK; + + public static ErrorStrategy parseFromString(String errorStrategy) { + try { + return ErrorStrategy.valueOf(errorStrategy.toUpperCase(Locale.ROOT)); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException("Invalid ingestion errorStrategy: " + errorStrategy, e); + } + } + } + + /** + * Indicates different stages of encountered errors + */ + @ExperimentalApi + enum ErrorStage { + POLLING, + PROCESSING + } + +} diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/MessageProcessorRunnable.java b/server/src/main/java/org/opensearch/indices/pollingingest/MessageProcessorRunnable.java index 0c06ebc558466..0ac791e60de5a 100644 --- a/server/src/main/java/org/opensearch/indices/pollingingest/MessageProcessorRunnable.java +++ b/server/src/main/java/org/opensearch/indices/pollingingest/MessageProcessorRunnable.java @@ -50,6 +50,7 @@ public class MessageProcessorRunnable implements Runnable { private final BlockingQueue> blockingQueue; private final MessageProcessor messageProcessor; private final CounterMetric stats = new CounterMetric(); + private IngestionErrorStrategy errorStrategy; private static final String ID = "_id"; private static final String OP_TYPE = "_op_type"; @@ -63,9 +64,10 @@ public class MessageProcessorRunnable implements Runnable { */ public MessageProcessorRunnable( BlockingQueue> blockingQueue, - IngestionEngine engine + IngestionEngine engine, + IngestionErrorStrategy errorStrategy ) { - this(blockingQueue, new MessageProcessor(engine)); + this(blockingQueue, new MessageProcessor(engine), errorStrategy); } /** @@ -75,10 +77,12 @@ public MessageProcessorRunnable( */ MessageProcessorRunnable( BlockingQueue> blockingQueue, - MessageProcessor messageProcessor + MessageProcessor messageProcessor, + IngestionErrorStrategy errorStrategy ) { this.blockingQueue = Objects.requireNonNull(blockingQueue); this.messageProcessor = messageProcessor; + this.errorStrategy = errorStrategy; } static class MessageProcessor { @@ -231,8 +235,15 @@ public void run() { Thread.currentThread().interrupt(); // Restore interrupt status } if (result != null) { - stats.inc(); - messageProcessor.process(result.getMessage(), result.getPointer()); + try { + stats.inc(); + messageProcessor.process(result.getMessage(), result.getPointer()); + } catch (Exception e) { + errorStrategy.handleError(e, IngestionErrorStrategy.ErrorStage.PROCESSING); + if (errorStrategy.shouldPauseIngestion(e, IngestionErrorStrategy.ErrorStage.PROCESSING)) { + Thread.currentThread().interrupt(); + } + } } } } diff --git a/server/src/test/java/org/opensearch/cluster/metadata/IngestionSourceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/IngestionSourceTests.java index 0afe67002517b..05037f33c3965 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/IngestionSourceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/IngestionSourceTests.java @@ -14,6 +14,8 @@ import java.util.HashMap; import java.util.Map; +import static org.opensearch.indices.pollingingest.IngestionErrorStrategy.ErrorStrategy.DROP; + public class IngestionSourceTests extends OpenSearchTestCase { private final IngestionSource.PointerInitReset pointerInitReset = new IngestionSource.PointerInitReset( @@ -24,52 +26,50 @@ public class IngestionSourceTests extends OpenSearchTestCase { public void testConstructorAndGetters() { Map params = new HashMap<>(); params.put("key", "value"); - IngestionSource source = new IngestionSource("type", pointerInitReset, params); + IngestionSource source = new IngestionSource("type", pointerInitReset, DROP, params); assertEquals("type", source.getType()); assertEquals(StreamPoller.ResetState.REWIND_BY_OFFSET, source.getPointerInitReset().getType()); assertEquals("1000", source.getPointerInitReset().getValue()); + assertEquals(DROP, source.getErrorStrategy()); assertEquals(params, source.params()); } public void testEquals() { Map params1 = new HashMap<>(); params1.put("key", "value"); - IngestionSource source1 = new IngestionSource("type", pointerInitReset, params1); + IngestionSource source1 = new IngestionSource("type", pointerInitReset, DROP, params1); Map params2 = new HashMap<>(); params2.put("key", "value"); - IngestionSource source2 = new IngestionSource("type", pointerInitReset, params2); - + IngestionSource source2 = new IngestionSource("type", pointerInitReset, DROP, params2); assertTrue(source1.equals(source2)); assertTrue(source2.equals(source1)); - IngestionSource source3 = new IngestionSource("differentType", pointerInitReset, params1); + IngestionSource source3 = new IngestionSource("differentType", pointerInitReset, DROP, params1); assertFalse(source1.equals(source3)); } public void testHashCode() { Map params1 = new HashMap<>(); params1.put("key", "value"); - IngestionSource source1 = new IngestionSource("type", pointerInitReset, params1); + IngestionSource source1 = new IngestionSource("type", pointerInitReset, DROP, params1); Map params2 = new HashMap<>(); params2.put("key", "value"); - IngestionSource source2 = new IngestionSource("type", pointerInitReset, params2); - + IngestionSource source2 = new IngestionSource("type", pointerInitReset, DROP, params2); assertEquals(source1.hashCode(), source2.hashCode()); - IngestionSource source3 = new IngestionSource("differentType", pointerInitReset, params1); + IngestionSource source3 = new IngestionSource("differentType", pointerInitReset, DROP, params1); assertNotEquals(source1.hashCode(), source3.hashCode()); } public void testToString() { Map params = new HashMap<>(); params.put("key", "value"); - IngestionSource source = new IngestionSource("type", pointerInitReset, params); - + IngestionSource source = new IngestionSource("type", pointerInitReset, DROP, params); String expected = - "IngestionSource{type='type',pointer_init_reset='PointerInitReset{type='REWIND_BY_OFFSET', value=1000}', params={key=value}}"; + "IngestionSource{type='type',pointer_init_reset='PointerInitReset{type='REWIND_BY_OFFSET', value=1000}',error_strategy='DROP', params={key=value}}"; assertEquals(expected, source.toString()); } } diff --git a/server/src/test/java/org/opensearch/index/engine/FakeIngestionSource.java b/server/src/test/java/org/opensearch/index/engine/FakeIngestionSource.java index 1d81a22e94e9c..6233a65664d0b 100644 --- a/server/src/test/java/org/opensearch/index/engine/FakeIngestionSource.java +++ b/server/src/test/java/org/opensearch/index/engine/FakeIngestionSource.java @@ -83,6 +83,11 @@ public FakeIngestionShardPointer nextPointer() { return new FakeIngestionShardPointer(lastFetchedOffset + 1); } + @Override + public FakeIngestionShardPointer nextPointer(FakeIngestionShardPointer startPointer) { + return new FakeIngestionShardPointer(startPointer.offset + 1); + } + @Override public FakeIngestionShardPointer earliestPointer() { return new FakeIngestionShardPointer(0); diff --git a/server/src/test/java/org/opensearch/indices/pollingingest/DefaultStreamPollerTests.java b/server/src/test/java/org/opensearch/indices/pollingingest/DefaultStreamPollerTests.java index c17b11791af09..0f0f90f392242 100644 --- a/server/src/test/java/org/opensearch/indices/pollingingest/DefaultStreamPollerTests.java +++ b/server/src/test/java/org/opensearch/indices/pollingingest/DefaultStreamPollerTests.java @@ -8,6 +8,7 @@ package org.opensearch.indices.pollingingest; +import org.opensearch.index.IngestionShardConsumer; import org.opensearch.index.IngestionShardPointer; import org.opensearch.index.engine.FakeIngestionSource; import org.opensearch.test.OpenSearchTestCase; @@ -16,19 +17,27 @@ import java.nio.charset.StandardCharsets; import java.util.ArrayList; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class DefaultStreamPollerTests extends OpenSearchTestCase { private DefaultStreamPoller poller; @@ -38,6 +47,8 @@ public class DefaultStreamPollerTests extends OpenSearchTestCase { private List messages; private Set persistedPointers; private final int awaitTime = 300; + private final int sleepTime = 300; + private DropIngestionErrorStrategy errorStrategy; @Before public void setUp() throws Exception { @@ -48,7 +59,8 @@ public void setUp() throws Exception { messages.add("{\"_id\":\"2\",\"_source\":{\"name\":\"alice\", \"age\": 21}}".getBytes(StandardCharsets.UTF_8)); fakeConsumer = new FakeIngestionSource.FakeIngestionConsumer(messages, 0); processor = mock(MessageProcessorRunnable.MessageProcessor.class); - processorRunnable = new MessageProcessorRunnable(new ArrayBlockingQueue<>(5), processor); + errorStrategy = new DropIngestionErrorStrategy("ingestion_source"); + processorRunnable = new MessageProcessorRunnable(new ArrayBlockingQueue<>(5), processor, errorStrategy); persistedPointers = new HashSet<>(); poller = new DefaultStreamPoller( new FakeIngestionSource.FakeIngestionShardPointer(0), @@ -56,7 +68,8 @@ public void setUp() throws Exception { fakeConsumer, processorRunnable, StreamPoller.ResetState.NONE, - "" + "", + errorStrategy ); } @@ -111,7 +124,8 @@ public void testSkipProcessed() throws InterruptedException { fakeConsumer, processorRunnable, StreamPoller.ResetState.NONE, - "" + "", + errorStrategy ); CountDownLatch latch = new CountDownLatch(2); @@ -147,7 +161,8 @@ public void testResetStateEarliest() throws InterruptedException { fakeConsumer, processorRunnable, StreamPoller.ResetState.EARLIEST, - "" + "", + errorStrategy ); CountDownLatch latch = new CountDownLatch(2); doAnswer(invocation -> { @@ -169,7 +184,8 @@ public void testResetStateLatest() throws InterruptedException { fakeConsumer, processorRunnable, StreamPoller.ResetState.LATEST, - "" + "", + errorStrategy ); poller.start(); @@ -187,7 +203,8 @@ public void testResetStateRewindByOffset() throws InterruptedException { fakeConsumer, processorRunnable, StreamPoller.ResetState.REWIND_BY_OFFSET, - "1" + "1", + errorStrategy ); CountDownLatch latch = new CountDownLatch(1); doAnswer(invocation -> { @@ -221,4 +238,112 @@ public void testStartClosedPoller() throws InterruptedException { assertEquals("poller is closed!", e.getMessage()); } } + + public void testDropErrorIngestionStrategy() throws TimeoutException, InterruptedException { + messages.add("{\"_id\":\"3\",\"_source\":{\"name\":\"bob\", \"age\": 24}}".getBytes(StandardCharsets.UTF_8)); + messages.add("{\"_id\":\"4\",\"_source\":{\"name\":\"alice\", \"age\": 21}}".getBytes(StandardCharsets.UTF_8)); + List< + IngestionShardConsumer.ReadResult< + FakeIngestionSource.FakeIngestionShardPointer, + FakeIngestionSource.FakeIngestionMessage>> readResultsBatch1 = fakeConsumer.readNext( + fakeConsumer.earliestPointer(), + 2, + 100 + ); + List< + IngestionShardConsumer.ReadResult< + FakeIngestionSource.FakeIngestionShardPointer, + FakeIngestionSource.FakeIngestionMessage>> readResultsBatch2 = fakeConsumer.readNext(fakeConsumer.nextPointer(), 2, 100); + IngestionShardConsumer mockConsumer = mock(IngestionShardConsumer.class); + when(mockConsumer.getShardId()).thenReturn(0); + when(mockConsumer.readNext(any(), anyLong(), anyInt())).thenThrow(new RuntimeException("message1 poll failed")) + .thenReturn(readResultsBatch1) + .thenThrow(new RuntimeException("message3 poll failed")) + .thenReturn(readResultsBatch2) + .thenReturn(Collections.emptyList()); + + IngestionErrorStrategy errorStrategy = spy(new DropIngestionErrorStrategy("ingestion_source")); + poller = new DefaultStreamPoller( + new FakeIngestionSource.FakeIngestionShardPointer(0), + persistedPointers, + mockConsumer, + processorRunnable, + StreamPoller.ResetState.NONE, + "", + errorStrategy + ); + poller.start(); + Thread.sleep(sleepTime); + + verify(errorStrategy, times(2)).handleError(any(), eq(IngestionErrorStrategy.ErrorStage.POLLING)); + verify(processor, times(4)).process(any(), any()); + } + + public void testBlockErrorIngestionStrategy() throws TimeoutException, InterruptedException { + messages.add("{\"_id\":\"3\",\"_source\":{\"name\":\"bob\", \"age\": 24}}".getBytes(StandardCharsets.UTF_8)); + messages.add("{\"_id\":\"4\",\"_source\":{\"name\":\"alice\", \"age\": 21}}".getBytes(StandardCharsets.UTF_8)); + List< + IngestionShardConsumer.ReadResult< + FakeIngestionSource.FakeIngestionShardPointer, + FakeIngestionSource.FakeIngestionMessage>> readResultsBatch1 = fakeConsumer.readNext( + fakeConsumer.earliestPointer(), + 2, + 100 + ); + List< + IngestionShardConsumer.ReadResult< + FakeIngestionSource.FakeIngestionShardPointer, + FakeIngestionSource.FakeIngestionMessage>> readResultsBatch2 = fakeConsumer.readNext(fakeConsumer.nextPointer(), 2, 100); + IngestionShardConsumer mockConsumer = mock(IngestionShardConsumer.class); + when(mockConsumer.getShardId()).thenReturn(0); + when(mockConsumer.readNext(any(), anyLong(), anyInt())).thenThrow(new RuntimeException("message1 poll failed")) + .thenReturn(readResultsBatch1) + .thenReturn(readResultsBatch2) + .thenReturn(Collections.emptyList()); + + IngestionErrorStrategy errorStrategy = spy(new BlockIngestionErrorStrategy("ingestion_source")); + poller = new DefaultStreamPoller( + new FakeIngestionSource.FakeIngestionShardPointer(0), + persistedPointers, + mockConsumer, + processorRunnable, + StreamPoller.ResetState.NONE, + "", + errorStrategy + ); + poller.start(); + Thread.sleep(sleepTime); + + verify(errorStrategy, times(1)).handleError(any(), eq(IngestionErrorStrategy.ErrorStage.POLLING)); + verify(processor, never()).process(any(), any()); + assertEquals(DefaultStreamPoller.State.PAUSED, poller.getState()); + assertTrue(poller.isPaused()); + } + + public void testProcessingErrorWithBlockErrorIngestionStrategy() throws TimeoutException, InterruptedException { + messages.add("{\"_id\":\"3\",\"_source\":{\"name\":\"bob\", \"age\": 24}}".getBytes(StandardCharsets.UTF_8)); + messages.add("{\"_id\":\"4\",\"_source\":{\"name\":\"alice\", \"age\": 21}}".getBytes(StandardCharsets.UTF_8)); + + doThrow(new RuntimeException("Error processing update")).when(processor).process(any(), any()); + BlockIngestionErrorStrategy mockErrorStrategy = spy(new BlockIngestionErrorStrategy("ingestion_source")); + processorRunnable = new MessageProcessorRunnable(new ArrayBlockingQueue<>(5), processor, mockErrorStrategy); + + poller = new DefaultStreamPoller( + new FakeIngestionSource.FakeIngestionShardPointer(0), + persistedPointers, + fakeConsumer, + processorRunnable, + StreamPoller.ResetState.NONE, + "", + mockErrorStrategy + ); + poller.start(); + Thread.sleep(sleepTime); + + verify(mockErrorStrategy, times(1)).handleError(any(), eq(IngestionErrorStrategy.ErrorStage.PROCESSING)); + verify(processor, times(1)).process(any(), any()); + // poller will continue to poll if an error is encountered during message processing but will be blocked by + // the write to blockingQueue + assertEquals(DefaultStreamPoller.State.POLLING, poller.getState()); + } } From 09af5184cc097d36d10ea2b418b17d4d252cb5de Mon Sep 17 00:00:00 2001 From: Peter Zhu Date: Tue, 4 Mar 2025 19:19:37 -0500 Subject: [PATCH 048/550] Bump software.amazon.awssdk from 2.20.86 to 2.30.31 (#17396) Signed-off-by: Peter Zhu Signed-off-by: Andrew Ross --- CHANGELOG.md | 1 + gradle/libs.versions.toml | 3 +- plugins/crypto-kms/build.gradle | 3 + .../licenses/annotations-2.20.86.jar.sha1 | 1 - .../licenses/annotations-2.30.31.jar.sha1 | 1 + .../licenses/apache-client-2.20.86.jar.sha1 | 1 - .../licenses/apache-client-2.30.31.jar.sha1 | 1 + .../crypto-kms/licenses/auth-2.20.86.jar.sha1 | 1 - .../crypto-kms/licenses/auth-2.30.31.jar.sha1 | 1 + .../licenses/aws-core-2.20.86.jar.sha1 | 1 - .../licenses/aws-core-2.30.31.jar.sha1 | 1 + .../aws-json-protocol-2.20.86.jar.sha1 | 1 - .../aws-json-protocol-2.30.31.jar.sha1 | 1 + .../aws-query-protocol-2.20.86.jar.sha1 | 1 - .../aws-query-protocol-2.30.31.jar.sha1 | 1 + .../licenses/endpoints-spi-2.20.86.jar.sha1 | 1 - .../licenses/endpoints-spi-2.30.31.jar.sha1 | 1 + .../licenses/http-auth-spi-2.30.31.jar.sha1 | 1 + .../licenses/http-auth-spi-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/http-auth-spi-NOTICE.txt | 14 ++ .../licenses/http-client-spi-2.20.86.jar.sha1 | 1 - .../licenses/http-client-spi-2.30.31.jar.sha1 | 1 + .../licenses/identity-spi-2.30.31.jar.sha1 | 1 + .../licenses/identity-spi-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/identity-spi-NOTICE.txt | 14 ++ .../licenses/json-utils-2.20.86.jar.sha1 | 1 - .../licenses/json-utils-2.30.31.jar.sha1 | 1 + .../crypto-kms/licenses/kms-2.20.86.jar.sha1 | 1 - .../crypto-kms/licenses/kms-2.30.31.jar.sha1 | 1 + .../licenses/metrics-spi-2.20.86.jar.sha1 | 1 - .../licenses/metrics-spi-2.30.31.jar.sha1 | 1 + .../licenses/profiles-2.20.86.jar.sha1 | 1 - .../licenses/profiles-2.30.31.jar.sha1 | 1 + .../licenses/protocol-core-2.20.86.jar.sha1 | 1 - .../licenses/protocol-core-2.30.31.jar.sha1 | 1 + .../licenses/regions-2.20.86.jar.sha1 | 1 - .../licenses/regions-2.30.31.jar.sha1 | 1 + .../licenses/retries-spi-2.30.31.jar.sha1 | 1 + .../licenses/retries-spi-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/retries-spi-NOTICE.txt | 14 ++ .../licenses/sdk-core-2.20.86.jar.sha1 | 1 - .../licenses/sdk-core-2.30.31.jar.sha1 | 1 + .../third-party-jackson-core-2.20.86.jar.sha1 | 1 - .../third-party-jackson-core-2.30.31.jar.sha1 | 1 + .../licenses/utils-2.20.86.jar.sha1 | 1 - .../licenses/utils-2.30.31.jar.sha1 | 1 + .../crypto/kms/KmsServiceTests.java | 17 +- plugins/discovery-ec2/build.gradle | 13 +- .../licenses/annotations-2.20.86.jar.sha1 | 1 - .../licenses/annotations-2.30.31.jar.sha1 | 1 + .../licenses/apache-client-2.20.86.jar.sha1 | 1 - .../licenses/apache-client-2.30.31.jar.sha1 | 1 + .../licenses/auth-2.20.86.jar.sha1 | 1 - .../licenses/auth-2.30.31.jar.sha1 | 1 + .../licenses/aws-core-2.20.86.jar.sha1 | 1 - .../licenses/aws-core-2.30.31.jar.sha1 | 1 + .../licenses/aws-crt-0.35.0.jar.sha1 | 1 + .../licenses/aws-crt-LICENSE.txt | 202 ++++++++++++++++++ .../discovery-ec2/licenses/aws-crt-NOTICE.txt | 14 ++ .../aws-json-protocol-2.20.86.jar.sha1 | 1 - .../aws-json-protocol-2.30.31.jar.sha1 | 1 + .../aws-query-protocol-2.20.86.jar.sha1 | 1 - .../aws-query-protocol-2.30.31.jar.sha1 | 1 + .../licenses/checksums-2.30.31.jar.sha1 | 1 + .../licenses/checksums-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/checksums-NOTICE.txt | 14 ++ .../licenses/checksums-spi-2.30.31.jar.sha1 | 1 + .../licenses/checksums-spi-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/checksums-spi-NOTICE.txt | 14 ++ .../licenses/ec2-2.20.86.jar.sha1 | 1 - .../licenses/ec2-2.30.31.jar.sha1 | 1 + .../licenses/endpoints-spi-2.20.86.jar.sha1 | 1 - .../licenses/endpoints-spi-2.30.31.jar.sha1 | 1 + .../licenses/http-auth-2.30.31.jar.sha1 | 1 + .../licenses/http-auth-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/http-auth-NOTICE.txt | 14 ++ .../licenses/http-auth-aws-2.30.31.jar.sha1 | 1 + .../licenses/http-auth-aws-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/http-auth-aws-NOTICE.txt | 14 ++ .../licenses/http-auth-spi-2.30.31.jar.sha1 | 1 + .../licenses/http-auth-spi-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/http-auth-spi-NOTICE.txt | 14 ++ .../licenses/http-client-spi-2.20.86.jar.sha1 | 1 - .../licenses/http-client-spi-2.30.31.jar.sha1 | 1 + .../licenses/identity-spi-2.30.31.jar.sha1 | 1 + .../licenses/identity-spi-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/identity-spi-NOTICE.txt | 14 ++ .../licenses/json-utils-2.20.86.jar.sha1 | 1 - .../licenses/json-utils-2.30.31.jar.sha1 | 1 + .../licenses/metrics-spi-2.20.86.jar.sha1 | 1 - .../licenses/metrics-spi-2.30.31.jar.sha1 | 1 + .../licenses/profiles-2.20.86.jar.sha1 | 1 - .../licenses/profiles-2.30.31.jar.sha1 | 1 + .../licenses/protocol-core-2.20.86.jar.sha1 | 1 - .../licenses/protocol-core-2.30.31.jar.sha1 | 1 + .../licenses/regions-2.20.86.jar.sha1 | 1 - .../licenses/regions-2.30.31.jar.sha1 | 1 + .../licenses/retries-2.30.31.jar.sha1 | 1 + .../licenses/retries-LICENSE.txt | 202 ++++++++++++++++++ .../discovery-ec2/licenses/retries-NOTICE.txt | 14 ++ .../licenses/retries-spi-2.30.31.jar.sha1 | 1 + .../licenses/retries-spi-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/retries-spi-NOTICE.txt | 14 ++ .../licenses/sdk-core-2.20.86.jar.sha1 | 1 - .../licenses/sdk-core-2.30.31.jar.sha1 | 1 + .../third-party-jackson-core-2.20.86.jar.sha1 | 1 - .../third-party-jackson-core-2.30.31.jar.sha1 | 1 + .../licenses/utils-2.20.86.jar.sha1 | 1 - .../licenses/utils-2.30.31.jar.sha1 | 1 + .../discovery/ec2/AwsEc2ServiceImplTests.java | 2 +- .../ec2/Ec2DiscoveryPluginTests.java | 15 +- plugins/repository-s3/build.gradle | 38 ++-- .../licenses/annotations-2.20.86.jar.sha1 | 1 - .../licenses/annotations-2.30.31.jar.sha1 | 1 + .../licenses/apache-client-2.20.86.jar.sha1 | 1 - .../licenses/apache-client-2.30.31.jar.sha1 | 1 + .../licenses/auth-2.20.86.jar.sha1 | 1 - .../licenses/auth-2.30.31.jar.sha1 | 1 + .../licenses/aws-core-2.20.86.jar.sha1 | 1 - .../licenses/aws-core-2.30.31.jar.sha1 | 1 + .../licenses/aws-crt-0.35.0.jar.sha1 | 1 + .../licenses/aws-crt-LICENSE.txt | 202 ++++++++++++++++++ .../repository-s3/licenses/aws-crt-NOTICE.txt | 14 ++ .../aws-json-protocol-2.20.86.jar.sha1 | 1 - .../aws-json-protocol-2.30.31.jar.sha1 | 1 + .../aws-query-protocol-2.20.86.jar.sha1 | 1 - .../aws-query-protocol-2.30.31.jar.sha1 | 1 + .../aws-xml-protocol-2.20.86.jar.sha1 | 1 - .../aws-xml-protocol-2.30.31.jar.sha1 | 1 + .../licenses/checksums-2.30.31.jar.sha1 | 1 + .../licenses/checksums-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/checksums-NOTICE.txt | 14 ++ .../licenses/checksums-spi-2.30.31.jar.sha1 | 1 + .../licenses/checksums-spi-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/checksums-spi-NOTICE.txt | 14 ++ .../licenses/endpoints-spi-2.20.86.jar.sha1 | 1 - .../licenses/endpoints-spi-2.30.31.jar.sha1 | 1 + .../licenses/http-auth-2.30.31.jar.sha1 | 1 + .../licenses/http-auth-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/http-auth-NOTICE.txt | 14 ++ .../licenses/http-auth-aws-2.30.31.jar.sha1 | 1 + .../licenses/http-auth-aws-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/http-auth-aws-NOTICE.txt | 14 ++ .../licenses/http-auth-spi-2.30.31.jar.sha1 | 1 + .../licenses/http-auth-spi-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/http-auth-spi-NOTICE.txt | 14 ++ .../licenses/http-client-spi-2.20.86.jar.sha1 | 1 - .../licenses/http-client-spi-2.30.31.jar.sha1 | 1 + .../licenses/identity-spi-2.30.31.jar.sha1 | 1 + .../licenses/identity-spi-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/identity-spi-NOTICE.txt | 14 ++ .../licenses/json-utils-2.20.86.jar.sha1 | 1 - .../licenses/json-utils-2.30.31.jar.sha1 | 1 + .../licenses/metrics-spi-2.20.86.jar.sha1 | 1 - .../licenses/metrics-spi-2.30.31.jar.sha1 | 1 + .../netty-nio-client-2.20.86.jar.sha1 | 1 - .../netty-nio-client-2.30.31.jar.sha1 | 1 + .../licenses/profiles-2.20.86.jar.sha1 | 1 - .../licenses/profiles-2.30.31.jar.sha1 | 1 + .../licenses/protocol-core-2.20.86.jar.sha1 | 1 - .../licenses/protocol-core-2.30.31.jar.sha1 | 1 + .../licenses/regions-2.20.86.jar.sha1 | 1 - .../licenses/regions-2.30.31.jar.sha1 | 1 + .../licenses/retries-2.30.31.jar.sha1 | 1 + .../licenses/retries-LICENSE.txt | 202 ++++++++++++++++++ .../repository-s3/licenses/retries-NOTICE.txt | 14 ++ .../licenses/retries-spi-2.30.31.jar.sha1 | 1 + .../licenses/retries-spi-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/retries-spi-NOTICE.txt | 14 ++ .../licenses/s3-2.20.86.jar.sha1 | 1 - .../licenses/s3-2.30.31.jar.sha1 | 1 + .../licenses/sdk-core-2.20.86.jar.sha1 | 1 - .../licenses/sdk-core-2.30.31.jar.sha1 | 1 + .../licenses/signer-2.20.86.jar.sha1 | 1 - .../licenses/signer-2.30.31.jar.sha1 | 1 + .../licenses/sts-2.20.86.jar.sha1 | 1 - .../licenses/sts-2.30.31.jar.sha1 | 1 + .../third-party-jackson-core-2.20.86.jar.sha1 | 1 - .../third-party-jackson-core-2.30.31.jar.sha1 | 1 + .../licenses/utils-2.20.86.jar.sha1 | 1 - .../licenses/utils-2.30.31.jar.sha1 | 1 + 181 files changed, 4650 insertions(+), 109 deletions(-) delete mode 100644 plugins/crypto-kms/licenses/annotations-2.20.86.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/annotations-2.30.31.jar.sha1 delete mode 100644 plugins/crypto-kms/licenses/apache-client-2.20.86.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/apache-client-2.30.31.jar.sha1 delete mode 100644 plugins/crypto-kms/licenses/auth-2.20.86.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/auth-2.30.31.jar.sha1 delete mode 100644 plugins/crypto-kms/licenses/aws-core-2.20.86.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/aws-core-2.30.31.jar.sha1 delete mode 100644 plugins/crypto-kms/licenses/aws-json-protocol-2.20.86.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/aws-json-protocol-2.30.31.jar.sha1 delete mode 100644 plugins/crypto-kms/licenses/aws-query-protocol-2.20.86.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/aws-query-protocol-2.30.31.jar.sha1 delete mode 100644 plugins/crypto-kms/licenses/endpoints-spi-2.20.86.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/endpoints-spi-2.30.31.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/http-auth-spi-2.30.31.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/http-auth-spi-LICENSE.txt create mode 100644 plugins/crypto-kms/licenses/http-auth-spi-NOTICE.txt delete mode 100644 plugins/crypto-kms/licenses/http-client-spi-2.20.86.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/http-client-spi-2.30.31.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/identity-spi-2.30.31.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/identity-spi-LICENSE.txt create mode 100644 plugins/crypto-kms/licenses/identity-spi-NOTICE.txt delete mode 100644 plugins/crypto-kms/licenses/json-utils-2.20.86.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/json-utils-2.30.31.jar.sha1 delete mode 100644 plugins/crypto-kms/licenses/kms-2.20.86.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/kms-2.30.31.jar.sha1 delete mode 100644 plugins/crypto-kms/licenses/metrics-spi-2.20.86.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/metrics-spi-2.30.31.jar.sha1 delete mode 100644 plugins/crypto-kms/licenses/profiles-2.20.86.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/profiles-2.30.31.jar.sha1 delete mode 100644 plugins/crypto-kms/licenses/protocol-core-2.20.86.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/protocol-core-2.30.31.jar.sha1 delete mode 100644 plugins/crypto-kms/licenses/regions-2.20.86.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/regions-2.30.31.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/retries-spi-2.30.31.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/retries-spi-LICENSE.txt create mode 100644 plugins/crypto-kms/licenses/retries-spi-NOTICE.txt delete mode 100644 plugins/crypto-kms/licenses/sdk-core-2.20.86.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/sdk-core-2.30.31.jar.sha1 delete mode 100644 plugins/crypto-kms/licenses/third-party-jackson-core-2.20.86.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/third-party-jackson-core-2.30.31.jar.sha1 delete mode 100644 plugins/crypto-kms/licenses/utils-2.20.86.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/utils-2.30.31.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/annotations-2.20.86.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/annotations-2.30.31.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/apache-client-2.20.86.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/apache-client-2.30.31.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/auth-2.20.86.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/auth-2.30.31.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/aws-core-2.20.86.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/aws-core-2.30.31.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/aws-crt-0.35.0.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/aws-crt-LICENSE.txt create mode 100644 plugins/discovery-ec2/licenses/aws-crt-NOTICE.txt delete mode 100644 plugins/discovery-ec2/licenses/aws-json-protocol-2.20.86.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/aws-json-protocol-2.30.31.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/aws-query-protocol-2.20.86.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/aws-query-protocol-2.30.31.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/checksums-2.30.31.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/checksums-LICENSE.txt create mode 100644 plugins/discovery-ec2/licenses/checksums-NOTICE.txt create mode 100644 plugins/discovery-ec2/licenses/checksums-spi-2.30.31.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/checksums-spi-LICENSE.txt create mode 100644 plugins/discovery-ec2/licenses/checksums-spi-NOTICE.txt delete mode 100644 plugins/discovery-ec2/licenses/ec2-2.20.86.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/ec2-2.30.31.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/endpoints-spi-2.20.86.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/endpoints-spi-2.30.31.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/http-auth-2.30.31.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/http-auth-LICENSE.txt create mode 100644 plugins/discovery-ec2/licenses/http-auth-NOTICE.txt create mode 100644 plugins/discovery-ec2/licenses/http-auth-aws-2.30.31.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/http-auth-aws-LICENSE.txt create mode 100644 plugins/discovery-ec2/licenses/http-auth-aws-NOTICE.txt create mode 100644 plugins/discovery-ec2/licenses/http-auth-spi-2.30.31.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/http-auth-spi-LICENSE.txt create mode 100644 plugins/discovery-ec2/licenses/http-auth-spi-NOTICE.txt delete mode 100644 plugins/discovery-ec2/licenses/http-client-spi-2.20.86.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/http-client-spi-2.30.31.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/identity-spi-2.30.31.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/identity-spi-LICENSE.txt create mode 100644 plugins/discovery-ec2/licenses/identity-spi-NOTICE.txt delete mode 100644 plugins/discovery-ec2/licenses/json-utils-2.20.86.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/json-utils-2.30.31.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/metrics-spi-2.20.86.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/metrics-spi-2.30.31.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/profiles-2.20.86.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/profiles-2.30.31.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/protocol-core-2.20.86.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/protocol-core-2.30.31.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/regions-2.20.86.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/regions-2.30.31.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/retries-2.30.31.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/retries-LICENSE.txt create mode 100644 plugins/discovery-ec2/licenses/retries-NOTICE.txt create mode 100644 plugins/discovery-ec2/licenses/retries-spi-2.30.31.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/retries-spi-LICENSE.txt create mode 100644 plugins/discovery-ec2/licenses/retries-spi-NOTICE.txt delete mode 100644 plugins/discovery-ec2/licenses/sdk-core-2.20.86.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/sdk-core-2.30.31.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/third-party-jackson-core-2.20.86.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/third-party-jackson-core-2.30.31.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/utils-2.20.86.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/utils-2.30.31.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/annotations-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/annotations-2.30.31.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/apache-client-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/apache-client-2.30.31.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/auth-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/auth-2.30.31.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/aws-core-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/aws-core-2.30.31.jar.sha1 create mode 100644 plugins/repository-s3/licenses/aws-crt-0.35.0.jar.sha1 create mode 100644 plugins/repository-s3/licenses/aws-crt-LICENSE.txt create mode 100644 plugins/repository-s3/licenses/aws-crt-NOTICE.txt delete mode 100644 plugins/repository-s3/licenses/aws-json-protocol-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/aws-json-protocol-2.30.31.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/aws-query-protocol-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/aws-query-protocol-2.30.31.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/aws-xml-protocol-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/aws-xml-protocol-2.30.31.jar.sha1 create mode 100644 plugins/repository-s3/licenses/checksums-2.30.31.jar.sha1 create mode 100644 plugins/repository-s3/licenses/checksums-LICENSE.txt create mode 100644 plugins/repository-s3/licenses/checksums-NOTICE.txt create mode 100644 plugins/repository-s3/licenses/checksums-spi-2.30.31.jar.sha1 create mode 100644 plugins/repository-s3/licenses/checksums-spi-LICENSE.txt create mode 100644 plugins/repository-s3/licenses/checksums-spi-NOTICE.txt delete mode 100644 plugins/repository-s3/licenses/endpoints-spi-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/endpoints-spi-2.30.31.jar.sha1 create mode 100644 plugins/repository-s3/licenses/http-auth-2.30.31.jar.sha1 create mode 100644 plugins/repository-s3/licenses/http-auth-LICENSE.txt create mode 100644 plugins/repository-s3/licenses/http-auth-NOTICE.txt create mode 100644 plugins/repository-s3/licenses/http-auth-aws-2.30.31.jar.sha1 create mode 100644 plugins/repository-s3/licenses/http-auth-aws-LICENSE.txt create mode 100644 plugins/repository-s3/licenses/http-auth-aws-NOTICE.txt create mode 100644 plugins/repository-s3/licenses/http-auth-spi-2.30.31.jar.sha1 create mode 100644 plugins/repository-s3/licenses/http-auth-spi-LICENSE.txt create mode 100644 plugins/repository-s3/licenses/http-auth-spi-NOTICE.txt delete mode 100644 plugins/repository-s3/licenses/http-client-spi-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/http-client-spi-2.30.31.jar.sha1 create mode 100644 plugins/repository-s3/licenses/identity-spi-2.30.31.jar.sha1 create mode 100644 plugins/repository-s3/licenses/identity-spi-LICENSE.txt create mode 100644 plugins/repository-s3/licenses/identity-spi-NOTICE.txt delete mode 100644 plugins/repository-s3/licenses/json-utils-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/json-utils-2.30.31.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/metrics-spi-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/metrics-spi-2.30.31.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-nio-client-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-nio-client-2.30.31.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/profiles-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/profiles-2.30.31.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/protocol-core-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/protocol-core-2.30.31.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/regions-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/regions-2.30.31.jar.sha1 create mode 100644 plugins/repository-s3/licenses/retries-2.30.31.jar.sha1 create mode 100644 plugins/repository-s3/licenses/retries-LICENSE.txt create mode 100644 plugins/repository-s3/licenses/retries-NOTICE.txt create mode 100644 plugins/repository-s3/licenses/retries-spi-2.30.31.jar.sha1 create mode 100644 plugins/repository-s3/licenses/retries-spi-LICENSE.txt create mode 100644 plugins/repository-s3/licenses/retries-spi-NOTICE.txt delete mode 100644 plugins/repository-s3/licenses/s3-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/s3-2.30.31.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/sdk-core-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/sdk-core-2.30.31.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/signer-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/signer-2.30.31.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/sts-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/sts-2.30.31.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/third-party-jackson-core-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/third-party-jackson-core-2.30.31.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/utils-2.20.86.jar.sha1 create mode 100644 plugins/repository-s3/licenses/utils-2.30.31.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 45fd4813e72da..091e832e01cd1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.google.api.grpc:proto-google-common-protos` from 2.37.1 to 2.52.0 ([#17379](https://github.com/opensearch-project/OpenSearch/pull/17379)) - Bump `net.minidev:json-smart` from 2.5.1 to 2.5.2 ([#17378](https://github.com/opensearch-project/OpenSearch/pull/17378)) - Bump `com.netflix.nebula.ospackage-base` from 11.10.1 to 11.11.1 ([#17374](https://github.com/opensearch-project/OpenSearch/pull/17374)) +- Bump `software.amazon.awssdk` from 2.20.86 to 2.30.31 ([17396](https://github.com/opensearch-project/OpenSearch/pull/17396)) ### Changed - Convert transport-reactor-netty4 to use gradle version catalog [#17233](https://github.com/opensearch-project/OpenSearch/pull/17233) diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index abdd87394b35c..90518ca71ec53 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -52,7 +52,8 @@ commonslang = "3.14.0" commonscompress = "1.26.1" commonsio = "2.16.0" # plugin dependencies -aws = "2.20.86" +aws = "2.30.31" +awscrt = "0.35.0" reactivestreams = "1.0.4" # when updating this version, you need to ensure compatibility with: diff --git a/plugins/crypto-kms/build.gradle b/plugins/crypto-kms/build.gradle index fa63a4a7153d3..d66c731dc16af 100644 --- a/plugins/crypto-kms/build.gradle +++ b/plugins/crypto-kms/build.gradle @@ -30,10 +30,13 @@ dependencies { api "software.amazon.awssdk:aws-core:${versions.aws}" api "software.amazon.awssdk:utils:${versions.aws}" api "software.amazon.awssdk:auth:${versions.aws}" + api "software.amazon.awssdk:identity-spi:${versions.aws}" api "software.amazon.awssdk:kms:${versions.aws}" + api "software.amazon.awssdk:http-auth-spi:${versions.aws}" api "software.amazon.awssdk:http-client-spi:${versions.aws}" api "software.amazon.awssdk:apache-client:${versions.aws}" api "software.amazon.awssdk:regions:${versions.aws}" + api "software.amazon.awssdk:retries-spi:${versions.aws}" api "software.amazon.awssdk:profiles:${versions.aws}" api "software.amazon.awssdk:endpoints-spi:${versions.aws}" api "software.amazon.awssdk:annotations:${versions.aws}" diff --git a/plugins/crypto-kms/licenses/annotations-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/annotations-2.20.86.jar.sha1 deleted file mode 100644 index 8d30ad649916b..0000000000000 --- a/plugins/crypto-kms/licenses/annotations-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7e785e9ecb1230e52e9daa713335f38809ddcb74 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/annotations-2.30.31.jar.sha1 b/plugins/crypto-kms/licenses/annotations-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..d45f8758c9405 --- /dev/null +++ b/plugins/crypto-kms/licenses/annotations-2.30.31.jar.sha1 @@ -0,0 +1 @@ +c5acc1da9567290302d80ffa1633785afa4ce630 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/apache-client-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/apache-client-2.20.86.jar.sha1 deleted file mode 100644 index e7ae36581925c..0000000000000 --- a/plugins/crypto-kms/licenses/apache-client-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -af31c4d3abec23b73061c6965364a6e3abbcc01a \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/apache-client-2.30.31.jar.sha1 b/plugins/crypto-kms/licenses/apache-client-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..97331cbda2c1b --- /dev/null +++ b/plugins/crypto-kms/licenses/apache-client-2.30.31.jar.sha1 @@ -0,0 +1 @@ +d1c602dba702782a0afec0a08c919322693a3bf8 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/auth-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/auth-2.20.86.jar.sha1 deleted file mode 100644 index e4c1b29cea894..0000000000000 --- a/plugins/crypto-kms/licenses/auth-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f2da82f33776ce4814a3ab53b5ccb82a5d135936 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/auth-2.30.31.jar.sha1 b/plugins/crypto-kms/licenses/auth-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..c1e199ca02fc8 --- /dev/null +++ b/plugins/crypto-kms/licenses/auth-2.30.31.jar.sha1 @@ -0,0 +1 @@ +8887962b04ce5f1a9f46d44acd806949b17082da \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/aws-core-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/aws-core-2.20.86.jar.sha1 deleted file mode 100644 index d42a15c4da413..0000000000000 --- a/plugins/crypto-kms/licenses/aws-core-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ea126147c3d17a8b3075e3122ec9c2b94fe1f6d5 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/aws-core-2.30.31.jar.sha1 b/plugins/crypto-kms/licenses/aws-core-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..16050fd1d8c6d --- /dev/null +++ b/plugins/crypto-kms/licenses/aws-core-2.30.31.jar.sha1 @@ -0,0 +1 @@ +5016fadbd7146171b4afe09eb0675b710b0f2d12 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/aws-json-protocol-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/aws-json-protocol-2.20.86.jar.sha1 deleted file mode 100644 index ee08d240fbfba..0000000000000 --- a/plugins/crypto-kms/licenses/aws-json-protocol-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8b9d09c1aa9d3f2119267f0b6549ae1810512c7b \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/aws-json-protocol-2.30.31.jar.sha1 b/plugins/crypto-kms/licenses/aws-json-protocol-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..bfc742d8687d1 --- /dev/null +++ b/plugins/crypto-kms/licenses/aws-json-protocol-2.30.31.jar.sha1 @@ -0,0 +1 @@ +4600659276f84e114c1fabeb1478911c581a7739 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/aws-query-protocol-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/aws-query-protocol-2.20.86.jar.sha1 deleted file mode 100644 index 9b19f570d56fb..0000000000000 --- a/plugins/crypto-kms/licenses/aws-query-protocol-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e001792ec1a681f5bc6ee4157d572173416304ad \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/aws-query-protocol-2.30.31.jar.sha1 b/plugins/crypto-kms/licenses/aws-query-protocol-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..9508295147c96 --- /dev/null +++ b/plugins/crypto-kms/licenses/aws-query-protocol-2.30.31.jar.sha1 @@ -0,0 +1 @@ +61596c0cb577a4a6c438a5a7ee0391d2d825b3fe \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/endpoints-spi-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/endpoints-spi-2.20.86.jar.sha1 deleted file mode 100644 index 16f9db1fd6327..0000000000000 --- a/plugins/crypto-kms/licenses/endpoints-spi-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2b9075dd0ed32da97f95229f55c01425353e8cba \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/endpoints-spi-2.30.31.jar.sha1 b/plugins/crypto-kms/licenses/endpoints-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..4dbc884c3da6f --- /dev/null +++ b/plugins/crypto-kms/licenses/endpoints-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +0734f4b9c68f19201896dd47639035b4e0a7964d \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/http-auth-spi-2.30.31.jar.sha1 b/plugins/crypto-kms/licenses/http-auth-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..491ffe4dd0584 --- /dev/null +++ b/plugins/crypto-kms/licenses/http-auth-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +513519f79635441d5205fc31d56c2e0d5826d27f \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/http-auth-spi-LICENSE.txt b/plugins/crypto-kms/licenses/http-auth-spi-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/crypto-kms/licenses/http-auth-spi-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/http-auth-spi-NOTICE.txt b/plugins/crypto-kms/licenses/http-auth-spi-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/crypto-kms/licenses/http-auth-spi-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/crypto-kms/licenses/http-client-spi-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/http-client-spi-2.20.86.jar.sha1 deleted file mode 100644 index 0662e15b1f3e6..0000000000000 --- a/plugins/crypto-kms/licenses/http-client-spi-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -776bfc86fabc6e8c792ea4650a281d0bec5e9708 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/http-client-spi-2.30.31.jar.sha1 b/plugins/crypto-kms/licenses/http-client-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..d86fa139f535c --- /dev/null +++ b/plugins/crypto-kms/licenses/http-client-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +5fa894c333793b7481aa03aa87512b20e11b057d \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/identity-spi-2.30.31.jar.sha1 b/plugins/crypto-kms/licenses/identity-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..9eeab9ad13dba --- /dev/null +++ b/plugins/crypto-kms/licenses/identity-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +46da74ac074b176c25fba07c6541737422622c1d \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/identity-spi-LICENSE.txt b/plugins/crypto-kms/licenses/identity-spi-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/crypto-kms/licenses/identity-spi-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/identity-spi-NOTICE.txt b/plugins/crypto-kms/licenses/identity-spi-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/crypto-kms/licenses/identity-spi-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/crypto-kms/licenses/json-utils-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/json-utils-2.20.86.jar.sha1 deleted file mode 100644 index 7011f8c3e6c78..0000000000000 --- a/plugins/crypto-kms/licenses/json-utils-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5dd418ad48e3bfd8c3fa05ff29a955b91c1af666 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/json-utils-2.30.31.jar.sha1 b/plugins/crypto-kms/licenses/json-utils-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..5019f6d48fa0a --- /dev/null +++ b/plugins/crypto-kms/licenses/json-utils-2.30.31.jar.sha1 @@ -0,0 +1 @@ +7f0ef4b49299df2fd39f92113d94524729c61032 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/kms-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/kms-2.20.86.jar.sha1 deleted file mode 100644 index 32c4e9f432898..0000000000000 --- a/plugins/crypto-kms/licenses/kms-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6a81c2f14acaa7b9dcdc80c715d6e44d815a818a \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/kms-2.30.31.jar.sha1 b/plugins/crypto-kms/licenses/kms-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..becd3d624ef17 --- /dev/null +++ b/plugins/crypto-kms/licenses/kms-2.30.31.jar.sha1 @@ -0,0 +1 @@ +0bb8a87a83edf1eb0c4dddb2afb1158ac858626d \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/metrics-spi-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/metrics-spi-2.20.86.jar.sha1 deleted file mode 100644 index bbd88bb9e1b0c..0000000000000 --- a/plugins/crypto-kms/licenses/metrics-spi-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -74a65d0f8decd0b3057fb500ca5409ff5778752a \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/metrics-spi-2.30.31.jar.sha1 b/plugins/crypto-kms/licenses/metrics-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..69ab3ec6f79ff --- /dev/null +++ b/plugins/crypto-kms/licenses/metrics-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +57a979cbc99d0bf4113d96aaf4f453303a015966 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/profiles-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/profiles-2.20.86.jar.sha1 deleted file mode 100644 index 425ce9b92f9f2..0000000000000 --- a/plugins/crypto-kms/licenses/profiles-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -27a8f4aa488d1d3ef947865ee0190f16d10a3cc7 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/profiles-2.30.31.jar.sha1 b/plugins/crypto-kms/licenses/profiles-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..6d4d2a1ac8d65 --- /dev/null +++ b/plugins/crypto-kms/licenses/profiles-2.30.31.jar.sha1 @@ -0,0 +1 @@ +d6d2d5788695972140dfe8b012ea7ccd97b82eef \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/protocol-core-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/protocol-core-2.20.86.jar.sha1 deleted file mode 100644 index 8de58699d8d82..0000000000000 --- a/plugins/crypto-kms/licenses/protocol-core-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bd85984ac6327a50d20e7957ecebf4fa3ad7766b \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/protocol-core-2.30.31.jar.sha1 b/plugins/crypto-kms/licenses/protocol-core-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..caae2a4302976 --- /dev/null +++ b/plugins/crypto-kms/licenses/protocol-core-2.30.31.jar.sha1 @@ -0,0 +1 @@ +ee17b25525aee497b6d520c8e499f39de7204fbc \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/regions-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/regions-2.20.86.jar.sha1 deleted file mode 100644 index 266bc76ad6f77..0000000000000 --- a/plugins/crypto-kms/licenses/regions-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -04fd460ce1c633986ecef1b4218d3e7067a7087d \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/regions-2.30.31.jar.sha1 b/plugins/crypto-kms/licenses/regions-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..8e9876686a144 --- /dev/null +++ b/plugins/crypto-kms/licenses/regions-2.30.31.jar.sha1 @@ -0,0 +1 @@ +7ce1df66496dcf9b124edb78ab9675e1e7d5c427 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/retries-spi-2.30.31.jar.sha1 b/plugins/crypto-kms/licenses/retries-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..854e3d7e4aebf --- /dev/null +++ b/plugins/crypto-kms/licenses/retries-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +4d9166189594243f88045fbf0c871a81e3914c0b \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/retries-spi-LICENSE.txt b/plugins/crypto-kms/licenses/retries-spi-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/crypto-kms/licenses/retries-spi-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/retries-spi-NOTICE.txt b/plugins/crypto-kms/licenses/retries-spi-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/crypto-kms/licenses/retries-spi-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/crypto-kms/licenses/sdk-core-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/sdk-core-2.20.86.jar.sha1 deleted file mode 100644 index 9eca40e6b9a9a..0000000000000 --- a/plugins/crypto-kms/licenses/sdk-core-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1b9df9ca5e4918fab05db3b703b2873e83104c30 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/sdk-core-2.30.31.jar.sha1 b/plugins/crypto-kms/licenses/sdk-core-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..ee3d7e3bff68d --- /dev/null +++ b/plugins/crypto-kms/licenses/sdk-core-2.30.31.jar.sha1 @@ -0,0 +1 @@ +b95c07d4796105c2e61c4c6ab60e3189886b2787 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/third-party-jackson-core-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/third-party-jackson-core-2.20.86.jar.sha1 deleted file mode 100644 index c9c3d4dc53505..0000000000000 --- a/plugins/crypto-kms/licenses/third-party-jackson-core-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5ad6e7f7d52d8a5390b2daf2fd8ffcab97fe3102 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/third-party-jackson-core-2.30.31.jar.sha1 b/plugins/crypto-kms/licenses/third-party-jackson-core-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..a07a8eda62447 --- /dev/null +++ b/plugins/crypto-kms/licenses/third-party-jackson-core-2.30.31.jar.sha1 @@ -0,0 +1 @@ +100d8022939bd59cd7d2461bd4fb0fd9fa028499 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/utils-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/utils-2.20.86.jar.sha1 deleted file mode 100644 index b91a3b3047570..0000000000000 --- a/plugins/crypto-kms/licenses/utils-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7a61f8b3c54ecf3dc785830d4f482f19ca52bc57 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/utils-2.30.31.jar.sha1 b/plugins/crypto-kms/licenses/utils-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..184ff1cc5f9ce --- /dev/null +++ b/plugins/crypto-kms/licenses/utils-2.30.31.jar.sha1 @@ -0,0 +1 @@ +3340adacb87ff28f90a039d57c81311b296db89e \ No newline at end of file diff --git a/plugins/crypto-kms/src/test/java/org/opensearch/crypto/kms/KmsServiceTests.java b/plugins/crypto-kms/src/test/java/org/opensearch/crypto/kms/KmsServiceTests.java index 1424cce473592..8d63d1c0eccd7 100644 --- a/plugins/crypto-kms/src/test/java/org/opensearch/crypto/kms/KmsServiceTests.java +++ b/plugins/crypto-kms/src/test/java/org/opensearch/crypto/kms/KmsServiceTests.java @@ -31,7 +31,7 @@ public void testAWSDefaultConfiguration() { KmsClientSettings.getClientSettings(Settings.EMPTY) ); - assertNull(proxyConfiguration.scheme()); + assertEquals("http", proxyConfiguration.scheme()); assertNull(proxyConfiguration.host()); assertEquals(proxyConfiguration.port(), 0); assertNull(proxyConfiguration.username()); @@ -131,14 +131,11 @@ public void testClientSettingsReInit() { assertTrue(credentials instanceof AwsBasicCredentials); } - assertEquals( - mockKmsClientTest.proxyConfiguration.toString(), - "ProxyConfiguration(endpoint=https://proxy-host-1:881, username=proxy_username_1, preemptiveBasicAuthenticationEnabled=false)" - ); assertEquals(mockKmsClientTest.proxyConfiguration.host(), "proxy-host-1"); assertEquals(mockKmsClientTest.proxyConfiguration.port(), 881); assertEquals(mockKmsClientTest.proxyConfiguration.username(), "proxy_username_1"); assertEquals(mockKmsClientTest.proxyConfiguration.password(), "proxy_password_1"); + assertFalse(mockKmsClientTest.proxyConfiguration.preemptiveBasicAuthenticationEnabled()); } // reload secure settings2 plugin.reload(settings2); @@ -155,14 +152,11 @@ public void testClientSettingsReInit() { assertTrue(credentials instanceof AwsBasicCredentials); } - assertEquals( - mockKmsClientTest.proxyConfiguration.toString(), - "ProxyConfiguration(endpoint=https://proxy-host-1:881, username=proxy_username_1, preemptiveBasicAuthenticationEnabled=false)" - ); assertEquals(mockKmsClientTest.proxyConfiguration.host(), "proxy-host-1"); assertEquals(mockKmsClientTest.proxyConfiguration.port(), 881); assertEquals(mockKmsClientTest.proxyConfiguration.username(), "proxy_username_1"); assertEquals(mockKmsClientTest.proxyConfiguration.password(), "proxy_password_1"); + assertFalse(mockKmsClientTest.proxyConfiguration.preemptiveBasicAuthenticationEnabled()); } } try (AmazonKmsClientReference clientReference = plugin.kmsService.client(cryptoMetadata)) { @@ -179,14 +173,11 @@ public void testClientSettingsReInit() { assertTrue(credentials instanceof AwsBasicCredentials); } - assertEquals( - mockKmsClientTest.proxyConfiguration.toString(), - "ProxyConfiguration(endpoint=https://proxy-host-2:882, username=proxy_username_2, preemptiveBasicAuthenticationEnabled=false)" - ); assertEquals(mockKmsClientTest.proxyConfiguration.host(), "proxy-host-2"); assertEquals(mockKmsClientTest.proxyConfiguration.port(), 882); assertEquals(mockKmsClientTest.proxyConfiguration.username(), "proxy_username_2"); assertEquals(mockKmsClientTest.proxyConfiguration.password(), "proxy_password_2"); + assertFalse(mockKmsClientTest.proxyConfiguration.preemptiveBasicAuthenticationEnabled()); } } } diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index 8d615e0bf8d9d..7a7eb8da24fb6 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -43,6 +43,15 @@ dependencies { api "software.amazon.awssdk:aws-core:${versions.aws}" api "software.amazon.awssdk:utils:${versions.aws}" api "software.amazon.awssdk:auth:${versions.aws}" + api "software.amazon.awssdk:identity-spi:${versions.aws}" + api "software.amazon.awssdk:checksums:${versions.aws}" + api "software.amazon.awssdk:checksums-spi:${versions.aws}" + api "software.amazon.awssdk.crt:aws-crt:${versions.awscrt}" + api "software.amazon.awssdk:http-auth:${versions.aws}" + api "software.amazon.awssdk:http-auth-aws:${versions.aws}" + api "software.amazon.awssdk:http-auth-spi:${versions.aws}" + api "software.amazon.awssdk:retries:${versions.aws}" + api "software.amazon.awssdk:retries-spi:${versions.aws}" api "software.amazon.awssdk:ec2:${versions.aws}" api "software.amazon.awssdk:http-client-spi:${versions.aws}" api "software.amazon.awssdk:apache-client:${versions.aws}" @@ -158,6 +167,8 @@ tasks.named("thirdPartyAudit").configure { 'org.slf4j.impl.StaticMarkerBinder', 'software.amazon.eventstream.HeaderValue', 'software.amazon.eventstream.Message', - 'software.amazon.eventstream.MessageDecoder' + 'software.amazon.eventstream.MessageDecoder', + 'org.graalvm.nativeimage.hosted.Feature', + 'org.graalvm.nativeimage.hosted.Feature$AfterImageWriteAccess' ) } diff --git a/plugins/discovery-ec2/licenses/annotations-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/annotations-2.20.86.jar.sha1 deleted file mode 100644 index 8d30ad649916b..0000000000000 --- a/plugins/discovery-ec2/licenses/annotations-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7e785e9ecb1230e52e9daa713335f38809ddcb74 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/annotations-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/annotations-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..d45f8758c9405 --- /dev/null +++ b/plugins/discovery-ec2/licenses/annotations-2.30.31.jar.sha1 @@ -0,0 +1 @@ +c5acc1da9567290302d80ffa1633785afa4ce630 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/apache-client-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/apache-client-2.20.86.jar.sha1 deleted file mode 100644 index e7ae36581925c..0000000000000 --- a/plugins/discovery-ec2/licenses/apache-client-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -af31c4d3abec23b73061c6965364a6e3abbcc01a \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/apache-client-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/apache-client-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..97331cbda2c1b --- /dev/null +++ b/plugins/discovery-ec2/licenses/apache-client-2.30.31.jar.sha1 @@ -0,0 +1 @@ +d1c602dba702782a0afec0a08c919322693a3bf8 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/auth-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/auth-2.20.86.jar.sha1 deleted file mode 100644 index e4c1b29cea894..0000000000000 --- a/plugins/discovery-ec2/licenses/auth-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f2da82f33776ce4814a3ab53b5ccb82a5d135936 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/auth-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/auth-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..c1e199ca02fc8 --- /dev/null +++ b/plugins/discovery-ec2/licenses/auth-2.30.31.jar.sha1 @@ -0,0 +1 @@ +8887962b04ce5f1a9f46d44acd806949b17082da \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-core-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/aws-core-2.20.86.jar.sha1 deleted file mode 100644 index d42a15c4da413..0000000000000 --- a/plugins/discovery-ec2/licenses/aws-core-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ea126147c3d17a8b3075e3122ec9c2b94fe1f6d5 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-core-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/aws-core-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..16050fd1d8c6d --- /dev/null +++ b/plugins/discovery-ec2/licenses/aws-core-2.30.31.jar.sha1 @@ -0,0 +1 @@ +5016fadbd7146171b4afe09eb0675b710b0f2d12 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-crt-0.35.0.jar.sha1 b/plugins/discovery-ec2/licenses/aws-crt-0.35.0.jar.sha1 new file mode 100644 index 0000000000000..1097f5bb4d814 --- /dev/null +++ b/plugins/discovery-ec2/licenses/aws-crt-0.35.0.jar.sha1 @@ -0,0 +1 @@ +33041403e1a9dd94f40330206eda5ffc22ee185c \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-crt-LICENSE.txt b/plugins/discovery-ec2/licenses/aws-crt-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/discovery-ec2/licenses/aws-crt-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/discovery-ec2/licenses/aws-crt-NOTICE.txt b/plugins/discovery-ec2/licenses/aws-crt-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/discovery-ec2/licenses/aws-crt-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/discovery-ec2/licenses/aws-json-protocol-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/aws-json-protocol-2.20.86.jar.sha1 deleted file mode 100644 index ee08d240fbfba..0000000000000 --- a/plugins/discovery-ec2/licenses/aws-json-protocol-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8b9d09c1aa9d3f2119267f0b6549ae1810512c7b \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-json-protocol-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/aws-json-protocol-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..bfc742d8687d1 --- /dev/null +++ b/plugins/discovery-ec2/licenses/aws-json-protocol-2.30.31.jar.sha1 @@ -0,0 +1 @@ +4600659276f84e114c1fabeb1478911c581a7739 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-query-protocol-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/aws-query-protocol-2.20.86.jar.sha1 deleted file mode 100644 index 9b19f570d56fb..0000000000000 --- a/plugins/discovery-ec2/licenses/aws-query-protocol-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e001792ec1a681f5bc6ee4157d572173416304ad \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-query-protocol-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/aws-query-protocol-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..9508295147c96 --- /dev/null +++ b/plugins/discovery-ec2/licenses/aws-query-protocol-2.30.31.jar.sha1 @@ -0,0 +1 @@ +61596c0cb577a4a6c438a5a7ee0391d2d825b3fe \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/checksums-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/checksums-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..4447b86f6e872 --- /dev/null +++ b/plugins/discovery-ec2/licenses/checksums-2.30.31.jar.sha1 @@ -0,0 +1 @@ +6d00287bc0ceb013dd5c74f1c4eb296ae61b34d4 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/checksums-LICENSE.txt b/plugins/discovery-ec2/licenses/checksums-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/discovery-ec2/licenses/checksums-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/discovery-ec2/licenses/checksums-NOTICE.txt b/plugins/discovery-ec2/licenses/checksums-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/discovery-ec2/licenses/checksums-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/discovery-ec2/licenses/checksums-spi-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/checksums-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..078cab150c5ad --- /dev/null +++ b/plugins/discovery-ec2/licenses/checksums-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +b5a5b0a39403acf41c21fd16cd11c7c8d887601b \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/checksums-spi-LICENSE.txt b/plugins/discovery-ec2/licenses/checksums-spi-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/discovery-ec2/licenses/checksums-spi-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/discovery-ec2/licenses/checksums-spi-NOTICE.txt b/plugins/discovery-ec2/licenses/checksums-spi-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/discovery-ec2/licenses/checksums-spi-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/discovery-ec2/licenses/ec2-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/ec2-2.20.86.jar.sha1 deleted file mode 100644 index 18c43cfc7516d..0000000000000 --- a/plugins/discovery-ec2/licenses/ec2-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3522a0829622a9c80152e6e2528bb79166f0b709 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/ec2-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/ec2-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..e5982e9b99aa7 --- /dev/null +++ b/plugins/discovery-ec2/licenses/ec2-2.30.31.jar.sha1 @@ -0,0 +1 @@ +e1df5c01dc20de548b572d4bcfd75bba360411f2 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/endpoints-spi-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/endpoints-spi-2.20.86.jar.sha1 deleted file mode 100644 index 16f9db1fd6327..0000000000000 --- a/plugins/discovery-ec2/licenses/endpoints-spi-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2b9075dd0ed32da97f95229f55c01425353e8cba \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/endpoints-spi-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/endpoints-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..4dbc884c3da6f --- /dev/null +++ b/plugins/discovery-ec2/licenses/endpoints-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +0734f4b9c68f19201896dd47639035b4e0a7964d \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/http-auth-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/http-auth-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..79893fb4fbf58 --- /dev/null +++ b/plugins/discovery-ec2/licenses/http-auth-2.30.31.jar.sha1 @@ -0,0 +1 @@ +b7baeb158b0af0e400d89a32595c9127db2bbb6e \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/http-auth-LICENSE.txt b/plugins/discovery-ec2/licenses/http-auth-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/discovery-ec2/licenses/http-auth-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/discovery-ec2/licenses/http-auth-NOTICE.txt b/plugins/discovery-ec2/licenses/http-auth-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/discovery-ec2/licenses/http-auth-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/discovery-ec2/licenses/http-auth-aws-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/http-auth-aws-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..d190c6ca52e98 --- /dev/null +++ b/plugins/discovery-ec2/licenses/http-auth-aws-2.30.31.jar.sha1 @@ -0,0 +1 @@ +f2a7d383158746c82b0f41b021e0da23a2597b35 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/http-auth-aws-LICENSE.txt b/plugins/discovery-ec2/licenses/http-auth-aws-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/discovery-ec2/licenses/http-auth-aws-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/discovery-ec2/licenses/http-auth-aws-NOTICE.txt b/plugins/discovery-ec2/licenses/http-auth-aws-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/discovery-ec2/licenses/http-auth-aws-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/discovery-ec2/licenses/http-auth-spi-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/http-auth-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..491ffe4dd0584 --- /dev/null +++ b/plugins/discovery-ec2/licenses/http-auth-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +513519f79635441d5205fc31d56c2e0d5826d27f \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/http-auth-spi-LICENSE.txt b/plugins/discovery-ec2/licenses/http-auth-spi-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/discovery-ec2/licenses/http-auth-spi-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/discovery-ec2/licenses/http-auth-spi-NOTICE.txt b/plugins/discovery-ec2/licenses/http-auth-spi-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/discovery-ec2/licenses/http-auth-spi-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/discovery-ec2/licenses/http-client-spi-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/http-client-spi-2.20.86.jar.sha1 deleted file mode 100644 index 0662e15b1f3e6..0000000000000 --- a/plugins/discovery-ec2/licenses/http-client-spi-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -776bfc86fabc6e8c792ea4650a281d0bec5e9708 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/http-client-spi-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/http-client-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..d86fa139f535c --- /dev/null +++ b/plugins/discovery-ec2/licenses/http-client-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +5fa894c333793b7481aa03aa87512b20e11b057d \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/identity-spi-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/identity-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..9eeab9ad13dba --- /dev/null +++ b/plugins/discovery-ec2/licenses/identity-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +46da74ac074b176c25fba07c6541737422622c1d \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/identity-spi-LICENSE.txt b/plugins/discovery-ec2/licenses/identity-spi-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/discovery-ec2/licenses/identity-spi-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/discovery-ec2/licenses/identity-spi-NOTICE.txt b/plugins/discovery-ec2/licenses/identity-spi-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/discovery-ec2/licenses/identity-spi-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/discovery-ec2/licenses/json-utils-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/json-utils-2.20.86.jar.sha1 deleted file mode 100644 index 7011f8c3e6c78..0000000000000 --- a/plugins/discovery-ec2/licenses/json-utils-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5dd418ad48e3bfd8c3fa05ff29a955b91c1af666 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/json-utils-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/json-utils-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..5019f6d48fa0a --- /dev/null +++ b/plugins/discovery-ec2/licenses/json-utils-2.30.31.jar.sha1 @@ -0,0 +1 @@ +7f0ef4b49299df2fd39f92113d94524729c61032 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/metrics-spi-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/metrics-spi-2.20.86.jar.sha1 deleted file mode 100644 index bbd88bb9e1b0c..0000000000000 --- a/plugins/discovery-ec2/licenses/metrics-spi-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -74a65d0f8decd0b3057fb500ca5409ff5778752a \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/metrics-spi-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/metrics-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..69ab3ec6f79ff --- /dev/null +++ b/plugins/discovery-ec2/licenses/metrics-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +57a979cbc99d0bf4113d96aaf4f453303a015966 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/profiles-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/profiles-2.20.86.jar.sha1 deleted file mode 100644 index 425ce9b92f9f2..0000000000000 --- a/plugins/discovery-ec2/licenses/profiles-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -27a8f4aa488d1d3ef947865ee0190f16d10a3cc7 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/profiles-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/profiles-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..6d4d2a1ac8d65 --- /dev/null +++ b/plugins/discovery-ec2/licenses/profiles-2.30.31.jar.sha1 @@ -0,0 +1 @@ +d6d2d5788695972140dfe8b012ea7ccd97b82eef \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/protocol-core-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/protocol-core-2.20.86.jar.sha1 deleted file mode 100644 index 8de58699d8d82..0000000000000 --- a/plugins/discovery-ec2/licenses/protocol-core-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bd85984ac6327a50d20e7957ecebf4fa3ad7766b \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/protocol-core-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/protocol-core-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..caae2a4302976 --- /dev/null +++ b/plugins/discovery-ec2/licenses/protocol-core-2.30.31.jar.sha1 @@ -0,0 +1 @@ +ee17b25525aee497b6d520c8e499f39de7204fbc \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/regions-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/regions-2.20.86.jar.sha1 deleted file mode 100644 index 266bc76ad6f77..0000000000000 --- a/plugins/discovery-ec2/licenses/regions-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -04fd460ce1c633986ecef1b4218d3e7067a7087d \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/regions-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/regions-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..8e9876686a144 --- /dev/null +++ b/plugins/discovery-ec2/licenses/regions-2.30.31.jar.sha1 @@ -0,0 +1 @@ +7ce1df66496dcf9b124edb78ab9675e1e7d5c427 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/retries-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/retries-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..98b46e3439ac7 --- /dev/null +++ b/plugins/discovery-ec2/licenses/retries-2.30.31.jar.sha1 @@ -0,0 +1 @@ +b490f67c9d3f000ae40928d9aa3c9debceac0966 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/retries-LICENSE.txt b/plugins/discovery-ec2/licenses/retries-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/discovery-ec2/licenses/retries-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/discovery-ec2/licenses/retries-NOTICE.txt b/plugins/discovery-ec2/licenses/retries-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/discovery-ec2/licenses/retries-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/discovery-ec2/licenses/retries-spi-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/retries-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..854e3d7e4aebf --- /dev/null +++ b/plugins/discovery-ec2/licenses/retries-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +4d9166189594243f88045fbf0c871a81e3914c0b \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/retries-spi-LICENSE.txt b/plugins/discovery-ec2/licenses/retries-spi-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/discovery-ec2/licenses/retries-spi-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/discovery-ec2/licenses/retries-spi-NOTICE.txt b/plugins/discovery-ec2/licenses/retries-spi-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/discovery-ec2/licenses/retries-spi-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/discovery-ec2/licenses/sdk-core-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/sdk-core-2.20.86.jar.sha1 deleted file mode 100644 index 9eca40e6b9a9a..0000000000000 --- a/plugins/discovery-ec2/licenses/sdk-core-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1b9df9ca5e4918fab05db3b703b2873e83104c30 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/sdk-core-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/sdk-core-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..ee3d7e3bff68d --- /dev/null +++ b/plugins/discovery-ec2/licenses/sdk-core-2.30.31.jar.sha1 @@ -0,0 +1 @@ +b95c07d4796105c2e61c4c6ab60e3189886b2787 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/third-party-jackson-core-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/third-party-jackson-core-2.20.86.jar.sha1 deleted file mode 100644 index c9c3d4dc53505..0000000000000 --- a/plugins/discovery-ec2/licenses/third-party-jackson-core-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5ad6e7f7d52d8a5390b2daf2fd8ffcab97fe3102 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/third-party-jackson-core-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/third-party-jackson-core-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..a07a8eda62447 --- /dev/null +++ b/plugins/discovery-ec2/licenses/third-party-jackson-core-2.30.31.jar.sha1 @@ -0,0 +1 @@ +100d8022939bd59cd7d2461bd4fb0fd9fa028499 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/utils-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/utils-2.20.86.jar.sha1 deleted file mode 100644 index b91a3b3047570..0000000000000 --- a/plugins/discovery-ec2/licenses/utils-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7a61f8b3c54ecf3dc785830d4f482f19ca52bc57 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/utils-2.30.31.jar.sha1 b/plugins/discovery-ec2/licenses/utils-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..184ff1cc5f9ce --- /dev/null +++ b/plugins/discovery-ec2/licenses/utils-2.30.31.jar.sha1 @@ -0,0 +1 @@ +3340adacb87ff28f90a039d57c81311b296db89e \ No newline at end of file diff --git a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/AwsEc2ServiceImplTests.java b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/AwsEc2ServiceImplTests.java index 3164abe456515..d5f1a5e2d0e45 100644 --- a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/AwsEc2ServiceImplTests.java +++ b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/AwsEc2ServiceImplTests.java @@ -131,7 +131,7 @@ public void testAWSDefaultConfiguration() { Ec2ClientSettings.getClientSettings(Settings.EMPTY) ); - assertNull(proxyConfiguration.scheme()); + assertEquals("http", proxyConfiguration.scheme()); assertNull(proxyConfiguration.host()); assertThat(proxyConfiguration.port(), is(0)); assertNull(proxyConfiguration.username()); diff --git a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryPluginTests.java b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryPluginTests.java index bde508a0afe96..40c7ba4fc53d7 100644 --- a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryPluginTests.java +++ b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryPluginTests.java @@ -180,14 +180,11 @@ public void testClientSettingsReInit() throws IOException { assertThat(credentials, instanceOf(AwsBasicCredentials.class)); } - assertEquals( - mockEc2Client.proxyConfiguration.toString(), - "ProxyConfiguration(endpoint=https://proxy-host-1:881, username=proxy_username_1, preemptiveBasicAuthenticationEnabled=false)" - ); assertEquals(mockEc2Client.proxyConfiguration.host(), "proxy-host-1"); assertEquals(mockEc2Client.proxyConfiguration.port(), 881); assertEquals(mockEc2Client.proxyConfiguration.username(), "proxy_username_1"); assertEquals(mockEc2Client.proxyConfiguration.password(), "proxy_password_1"); + assertFalse(mockEc2Client.proxyConfiguration.preemptiveBasicAuthenticationEnabled()); } // reload secure settings2 plugin.reload(settings2); @@ -204,14 +201,11 @@ public void testClientSettingsReInit() throws IOException { assertThat(credentials, instanceOf(AwsBasicCredentials.class)); } - assertEquals( - mockEc2Client.proxyConfiguration.toString(), - "ProxyConfiguration(endpoint=https://proxy-host-1:881, username=proxy_username_1, preemptiveBasicAuthenticationEnabled=false)" - ); assertEquals(mockEc2Client.proxyConfiguration.host(), "proxy-host-1"); assertEquals(mockEc2Client.proxyConfiguration.port(), 881); assertEquals(mockEc2Client.proxyConfiguration.username(), "proxy_username_1"); assertEquals(mockEc2Client.proxyConfiguration.password(), "proxy_password_1"); + assertFalse(mockEc2Client.proxyConfiguration.preemptiveBasicAuthenticationEnabled()); } } try (AmazonEc2ClientReference clientReference = plugin.ec2Service.client()) { @@ -228,14 +222,11 @@ public void testClientSettingsReInit() throws IOException { assertThat(credentials, instanceOf(AwsBasicCredentials.class)); } - assertEquals( - mockEc2Client.proxyConfiguration.toString(), - "ProxyConfiguration(endpoint=https://proxy-host-2:882, username=proxy_username_2, preemptiveBasicAuthenticationEnabled=false)" - ); assertEquals(mockEc2Client.proxyConfiguration.host(), "proxy-host-2"); assertEquals(mockEc2Client.proxyConfiguration.port(), 882); assertEquals(mockEc2Client.proxyConfiguration.username(), "proxy_username_2"); assertEquals(mockEc2Client.proxyConfiguration.password(), "proxy_password_2"); + assertFalse(mockEc2Client.proxyConfiguration.preemptiveBasicAuthenticationEnabled()); } } } diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 6e84edddcc252..de9c5420ba034 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -51,6 +51,15 @@ dependencies { api "software.amazon.awssdk:annotations:${versions.aws}" api "software.amazon.awssdk:aws-core:${versions.aws}" api "software.amazon.awssdk:auth:${versions.aws}" + api "software.amazon.awssdk:identity-spi:${versions.aws}" + api "software.amazon.awssdk:checksums:${versions.aws}" + api "software.amazon.awssdk:checksums-spi:${versions.aws}" + api "software.amazon.awssdk.crt:aws-crt:${versions.awscrt}" + api "software.amazon.awssdk:http-auth:${versions.aws}" + api "software.amazon.awssdk:http-auth-aws:${versions.aws}" + api "software.amazon.awssdk:http-auth-spi:${versions.aws}" + api "software.amazon.awssdk:retries:${versions.aws}" + api "software.amazon.awssdk:retries-spi:${versions.aws}" api "software.amazon.awssdk:endpoints-spi:${versions.aws}" api "software.amazon.awssdk:http-client-spi:${versions.aws}" api "software.amazon.awssdk:apache-client:${versions.aws}" @@ -517,34 +526,11 @@ thirdPartyAudit { 'reactor.blockhound.BlockHound$Builder', 'reactor.blockhound.integration.BlockHoundIntegration', + 'org.graalvm.nativeimage.hosted.Feature', + 'org.graalvm.nativeimage.hosted.Feature$AfterImageWriteAccess', + 'software.amazon.awssdk.arns.Arn', 'software.amazon.awssdk.arns.ArnResource', - 'software.amazon.awssdk.crt.CRT', - 'software.amazon.awssdk.crt.auth.credentials.Credentials', - 'software.amazon.awssdk.crt.auth.credentials.CredentialsProvider', - 'software.amazon.awssdk.crt.auth.credentials.DelegateCredentialsProvider$DelegateCredentialsProviderBuilder', - 'software.amazon.awssdk.crt.http.HttpHeader', - 'software.amazon.awssdk.crt.http.HttpMonitoringOptions', - 'software.amazon.awssdk.crt.http.HttpProxyOptions', - 'software.amazon.awssdk.crt.http.HttpRequest', - 'software.amazon.awssdk.crt.http.HttpRequestBodyStream', - 'software.amazon.awssdk.crt.io.ClientBootstrap', - 'software.amazon.awssdk.crt.io.ExponentialBackoffRetryOptions', - 'software.amazon.awssdk.crt.io.StandardRetryOptions', - 'software.amazon.awssdk.crt.io.TlsCipherPreference', - 'software.amazon.awssdk.crt.io.TlsContext', - 'software.amazon.awssdk.crt.io.TlsContextOptions', - 'software.amazon.awssdk.crt.s3.ChecksumAlgorithm', - 'software.amazon.awssdk.crt.s3.ChecksumConfig', - 'software.amazon.awssdk.crt.s3.ChecksumConfig$ChecksumLocation', - 'software.amazon.awssdk.crt.s3.ResumeToken', - 'software.amazon.awssdk.crt.s3.S3Client', - 'software.amazon.awssdk.crt.s3.S3ClientOptions', - 'software.amazon.awssdk.crt.s3.S3FinishedResponseContext', - 'software.amazon.awssdk.crt.s3.S3MetaRequest', - 'software.amazon.awssdk.crt.s3.S3MetaRequestOptions', - 'software.amazon.awssdk.crt.s3.S3MetaRequestOptions$MetaRequestType', - 'software.amazon.awssdk.crt.s3.S3MetaRequestResponseHandler', 'software.amazon.awssdk.crtcore.CrtConfigurationUtils', 'software.amazon.awssdk.crtcore.CrtConnectionHealthConfiguration', 'software.amazon.awssdk.crtcore.CrtConnectionHealthConfiguration$Builder', diff --git a/plugins/repository-s3/licenses/annotations-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/annotations-2.20.86.jar.sha1 deleted file mode 100644 index 8d30ad649916b..0000000000000 --- a/plugins/repository-s3/licenses/annotations-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7e785e9ecb1230e52e9daa713335f38809ddcb74 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/annotations-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/annotations-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..d45f8758c9405 --- /dev/null +++ b/plugins/repository-s3/licenses/annotations-2.30.31.jar.sha1 @@ -0,0 +1 @@ +c5acc1da9567290302d80ffa1633785afa4ce630 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/apache-client-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/apache-client-2.20.86.jar.sha1 deleted file mode 100644 index e7ae36581925c..0000000000000 --- a/plugins/repository-s3/licenses/apache-client-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -af31c4d3abec23b73061c6965364a6e3abbcc01a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/apache-client-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/apache-client-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..97331cbda2c1b --- /dev/null +++ b/plugins/repository-s3/licenses/apache-client-2.30.31.jar.sha1 @@ -0,0 +1 @@ +d1c602dba702782a0afec0a08c919322693a3bf8 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/auth-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/auth-2.20.86.jar.sha1 deleted file mode 100644 index e4c1b29cea894..0000000000000 --- a/plugins/repository-s3/licenses/auth-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f2da82f33776ce4814a3ab53b5ccb82a5d135936 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/auth-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/auth-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..c1e199ca02fc8 --- /dev/null +++ b/plugins/repository-s3/licenses/auth-2.30.31.jar.sha1 @@ -0,0 +1 @@ +8887962b04ce5f1a9f46d44acd806949b17082da \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-core-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/aws-core-2.20.86.jar.sha1 deleted file mode 100644 index d42a15c4da413..0000000000000 --- a/plugins/repository-s3/licenses/aws-core-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ea126147c3d17a8b3075e3122ec9c2b94fe1f6d5 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-core-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/aws-core-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..16050fd1d8c6d --- /dev/null +++ b/plugins/repository-s3/licenses/aws-core-2.30.31.jar.sha1 @@ -0,0 +1 @@ +5016fadbd7146171b4afe09eb0675b710b0f2d12 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-crt-0.35.0.jar.sha1 b/plugins/repository-s3/licenses/aws-crt-0.35.0.jar.sha1 new file mode 100644 index 0000000000000..1097f5bb4d814 --- /dev/null +++ b/plugins/repository-s3/licenses/aws-crt-0.35.0.jar.sha1 @@ -0,0 +1 @@ +33041403e1a9dd94f40330206eda5ffc22ee185c \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-crt-LICENSE.txt b/plugins/repository-s3/licenses/aws-crt-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/repository-s3/licenses/aws-crt-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-s3/licenses/aws-crt-NOTICE.txt b/plugins/repository-s3/licenses/aws-crt-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/repository-s3/licenses/aws-crt-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/repository-s3/licenses/aws-json-protocol-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/aws-json-protocol-2.20.86.jar.sha1 deleted file mode 100644 index ee08d240fbfba..0000000000000 --- a/plugins/repository-s3/licenses/aws-json-protocol-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8b9d09c1aa9d3f2119267f0b6549ae1810512c7b \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-json-protocol-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/aws-json-protocol-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..bfc742d8687d1 --- /dev/null +++ b/plugins/repository-s3/licenses/aws-json-protocol-2.30.31.jar.sha1 @@ -0,0 +1 @@ +4600659276f84e114c1fabeb1478911c581a7739 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-query-protocol-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/aws-query-protocol-2.20.86.jar.sha1 deleted file mode 100644 index 9b19f570d56fb..0000000000000 --- a/plugins/repository-s3/licenses/aws-query-protocol-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e001792ec1a681f5bc6ee4157d572173416304ad \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-query-protocol-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/aws-query-protocol-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..9508295147c96 --- /dev/null +++ b/plugins/repository-s3/licenses/aws-query-protocol-2.30.31.jar.sha1 @@ -0,0 +1 @@ +61596c0cb577a4a6c438a5a7ee0391d2d825b3fe \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-xml-protocol-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/aws-xml-protocol-2.20.86.jar.sha1 deleted file mode 100644 index 50940d73f4f7b..0000000000000 --- a/plugins/repository-s3/licenses/aws-xml-protocol-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b78a1182a9cf3cccf416cc5a441d08174b08682d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-xml-protocol-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/aws-xml-protocol-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..79a09fa635a20 --- /dev/null +++ b/plugins/repository-s3/licenses/aws-xml-protocol-2.30.31.jar.sha1 @@ -0,0 +1 @@ +ad1620b4e221840e2215348a296cc762c23a59c3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/checksums-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/checksums-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..4447b86f6e872 --- /dev/null +++ b/plugins/repository-s3/licenses/checksums-2.30.31.jar.sha1 @@ -0,0 +1 @@ +6d00287bc0ceb013dd5c74f1c4eb296ae61b34d4 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/checksums-LICENSE.txt b/plugins/repository-s3/licenses/checksums-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/repository-s3/licenses/checksums-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-s3/licenses/checksums-NOTICE.txt b/plugins/repository-s3/licenses/checksums-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/repository-s3/licenses/checksums-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/repository-s3/licenses/checksums-spi-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/checksums-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..078cab150c5ad --- /dev/null +++ b/plugins/repository-s3/licenses/checksums-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +b5a5b0a39403acf41c21fd16cd11c7c8d887601b \ No newline at end of file diff --git a/plugins/repository-s3/licenses/checksums-spi-LICENSE.txt b/plugins/repository-s3/licenses/checksums-spi-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/repository-s3/licenses/checksums-spi-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-s3/licenses/checksums-spi-NOTICE.txt b/plugins/repository-s3/licenses/checksums-spi-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/repository-s3/licenses/checksums-spi-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/repository-s3/licenses/endpoints-spi-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/endpoints-spi-2.20.86.jar.sha1 deleted file mode 100644 index 16f9db1fd6327..0000000000000 --- a/plugins/repository-s3/licenses/endpoints-spi-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2b9075dd0ed32da97f95229f55c01425353e8cba \ No newline at end of file diff --git a/plugins/repository-s3/licenses/endpoints-spi-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/endpoints-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..4dbc884c3da6f --- /dev/null +++ b/plugins/repository-s3/licenses/endpoints-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +0734f4b9c68f19201896dd47639035b4e0a7964d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/http-auth-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/http-auth-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..79893fb4fbf58 --- /dev/null +++ b/plugins/repository-s3/licenses/http-auth-2.30.31.jar.sha1 @@ -0,0 +1 @@ +b7baeb158b0af0e400d89a32595c9127db2bbb6e \ No newline at end of file diff --git a/plugins/repository-s3/licenses/http-auth-LICENSE.txt b/plugins/repository-s3/licenses/http-auth-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/repository-s3/licenses/http-auth-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-s3/licenses/http-auth-NOTICE.txt b/plugins/repository-s3/licenses/http-auth-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/repository-s3/licenses/http-auth-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/repository-s3/licenses/http-auth-aws-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/http-auth-aws-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..d190c6ca52e98 --- /dev/null +++ b/plugins/repository-s3/licenses/http-auth-aws-2.30.31.jar.sha1 @@ -0,0 +1 @@ +f2a7d383158746c82b0f41b021e0da23a2597b35 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/http-auth-aws-LICENSE.txt b/plugins/repository-s3/licenses/http-auth-aws-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/repository-s3/licenses/http-auth-aws-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-s3/licenses/http-auth-aws-NOTICE.txt b/plugins/repository-s3/licenses/http-auth-aws-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/repository-s3/licenses/http-auth-aws-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/repository-s3/licenses/http-auth-spi-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/http-auth-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..491ffe4dd0584 --- /dev/null +++ b/plugins/repository-s3/licenses/http-auth-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +513519f79635441d5205fc31d56c2e0d5826d27f \ No newline at end of file diff --git a/plugins/repository-s3/licenses/http-auth-spi-LICENSE.txt b/plugins/repository-s3/licenses/http-auth-spi-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/repository-s3/licenses/http-auth-spi-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-s3/licenses/http-auth-spi-NOTICE.txt b/plugins/repository-s3/licenses/http-auth-spi-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/repository-s3/licenses/http-auth-spi-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/repository-s3/licenses/http-client-spi-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/http-client-spi-2.20.86.jar.sha1 deleted file mode 100644 index 0662e15b1f3e6..0000000000000 --- a/plugins/repository-s3/licenses/http-client-spi-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -776bfc86fabc6e8c792ea4650a281d0bec5e9708 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/http-client-spi-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/http-client-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..d86fa139f535c --- /dev/null +++ b/plugins/repository-s3/licenses/http-client-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +5fa894c333793b7481aa03aa87512b20e11b057d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/identity-spi-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/identity-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..9eeab9ad13dba --- /dev/null +++ b/plugins/repository-s3/licenses/identity-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +46da74ac074b176c25fba07c6541737422622c1d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/identity-spi-LICENSE.txt b/plugins/repository-s3/licenses/identity-spi-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/repository-s3/licenses/identity-spi-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-s3/licenses/identity-spi-NOTICE.txt b/plugins/repository-s3/licenses/identity-spi-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/repository-s3/licenses/identity-spi-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/repository-s3/licenses/json-utils-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/json-utils-2.20.86.jar.sha1 deleted file mode 100644 index 7011f8c3e6c78..0000000000000 --- a/plugins/repository-s3/licenses/json-utils-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5dd418ad48e3bfd8c3fa05ff29a955b91c1af666 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/json-utils-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/json-utils-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..5019f6d48fa0a --- /dev/null +++ b/plugins/repository-s3/licenses/json-utils-2.30.31.jar.sha1 @@ -0,0 +1 @@ +7f0ef4b49299df2fd39f92113d94524729c61032 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/metrics-spi-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/metrics-spi-2.20.86.jar.sha1 deleted file mode 100644 index bbd88bb9e1b0c..0000000000000 --- a/plugins/repository-s3/licenses/metrics-spi-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -74a65d0f8decd0b3057fb500ca5409ff5778752a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/metrics-spi-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/metrics-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..69ab3ec6f79ff --- /dev/null +++ b/plugins/repository-s3/licenses/metrics-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +57a979cbc99d0bf4113d96aaf4f453303a015966 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-nio-client-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/netty-nio-client-2.20.86.jar.sha1 deleted file mode 100644 index 4ae8b2ec5a23c..0000000000000 --- a/plugins/repository-s3/licenses/netty-nio-client-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -29195a65eeea36cf1960d1939bca6586d5842dad \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-nio-client-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/netty-nio-client-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..f49d74cc59e37 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-nio-client-2.30.31.jar.sha1 @@ -0,0 +1 @@ +a7226fc3811c7a071e44a33273e081f212e581e3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/profiles-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/profiles-2.20.86.jar.sha1 deleted file mode 100644 index 425ce9b92f9f2..0000000000000 --- a/plugins/repository-s3/licenses/profiles-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -27a8f4aa488d1d3ef947865ee0190f16d10a3cc7 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/profiles-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/profiles-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..6d4d2a1ac8d65 --- /dev/null +++ b/plugins/repository-s3/licenses/profiles-2.30.31.jar.sha1 @@ -0,0 +1 @@ +d6d2d5788695972140dfe8b012ea7ccd97b82eef \ No newline at end of file diff --git a/plugins/repository-s3/licenses/protocol-core-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/protocol-core-2.20.86.jar.sha1 deleted file mode 100644 index 8de58699d8d82..0000000000000 --- a/plugins/repository-s3/licenses/protocol-core-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bd85984ac6327a50d20e7957ecebf4fa3ad7766b \ No newline at end of file diff --git a/plugins/repository-s3/licenses/protocol-core-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/protocol-core-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..caae2a4302976 --- /dev/null +++ b/plugins/repository-s3/licenses/protocol-core-2.30.31.jar.sha1 @@ -0,0 +1 @@ +ee17b25525aee497b6d520c8e499f39de7204fbc \ No newline at end of file diff --git a/plugins/repository-s3/licenses/regions-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/regions-2.20.86.jar.sha1 deleted file mode 100644 index 266bc76ad6f77..0000000000000 --- a/plugins/repository-s3/licenses/regions-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -04fd460ce1c633986ecef1b4218d3e7067a7087d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/regions-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/regions-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..8e9876686a144 --- /dev/null +++ b/plugins/repository-s3/licenses/regions-2.30.31.jar.sha1 @@ -0,0 +1 @@ +7ce1df66496dcf9b124edb78ab9675e1e7d5c427 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/retries-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/retries-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..98b46e3439ac7 --- /dev/null +++ b/plugins/repository-s3/licenses/retries-2.30.31.jar.sha1 @@ -0,0 +1 @@ +b490f67c9d3f000ae40928d9aa3c9debceac0966 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/retries-LICENSE.txt b/plugins/repository-s3/licenses/retries-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/repository-s3/licenses/retries-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-s3/licenses/retries-NOTICE.txt b/plugins/repository-s3/licenses/retries-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/repository-s3/licenses/retries-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/repository-s3/licenses/retries-spi-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/retries-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..854e3d7e4aebf --- /dev/null +++ b/plugins/repository-s3/licenses/retries-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +4d9166189594243f88045fbf0c871a81e3914c0b \ No newline at end of file diff --git a/plugins/repository-s3/licenses/retries-spi-LICENSE.txt b/plugins/repository-s3/licenses/retries-spi-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/repository-s3/licenses/retries-spi-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-s3/licenses/retries-spi-NOTICE.txt b/plugins/repository-s3/licenses/retries-spi-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/repository-s3/licenses/retries-spi-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/repository-s3/licenses/s3-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/s3-2.20.86.jar.sha1 deleted file mode 100644 index 7125793759db5..0000000000000 --- a/plugins/repository-s3/licenses/s3-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6a37f591abd11a3f848f091f1724825741daaeb2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/s3-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/s3-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..eb9aa9d13fe83 --- /dev/null +++ b/plugins/repository-s3/licenses/s3-2.30.31.jar.sha1 @@ -0,0 +1 @@ +958f263cf6b7e2ce6eb453627d57debd7fdd449b \ No newline at end of file diff --git a/plugins/repository-s3/licenses/sdk-core-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/sdk-core-2.20.86.jar.sha1 deleted file mode 100644 index 9eca40e6b9a9a..0000000000000 --- a/plugins/repository-s3/licenses/sdk-core-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1b9df9ca5e4918fab05db3b703b2873e83104c30 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/sdk-core-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/sdk-core-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..ee3d7e3bff68d --- /dev/null +++ b/plugins/repository-s3/licenses/sdk-core-2.30.31.jar.sha1 @@ -0,0 +1 @@ +b95c07d4796105c2e61c4c6ab60e3189886b2787 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/signer-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/signer-2.20.86.jar.sha1 deleted file mode 100644 index cb73b19e14fcf..0000000000000 --- a/plugins/repository-s3/licenses/signer-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -52b92753b944d3e1b8c6814bc9d6c93119ca6421 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/signer-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/signer-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..a03a173e4e2ad --- /dev/null +++ b/plugins/repository-s3/licenses/signer-2.30.31.jar.sha1 @@ -0,0 +1 @@ +e3d07951f347b85e5129cc31ed613a70f9259cac \ No newline at end of file diff --git a/plugins/repository-s3/licenses/sts-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/sts-2.20.86.jar.sha1 deleted file mode 100644 index 1f40b6dcd8417..0000000000000 --- a/plugins/repository-s3/licenses/sts-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7415d850a4aea10935f84766065dd76a3d327a54 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/sts-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/sts-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..3752d0003bc8d --- /dev/null +++ b/plugins/repository-s3/licenses/sts-2.30.31.jar.sha1 @@ -0,0 +1 @@ +fb85a774f8e7265ed4bc4255e6df8a80ee8cf4b9 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/third-party-jackson-core-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/third-party-jackson-core-2.20.86.jar.sha1 deleted file mode 100644 index c9c3d4dc53505..0000000000000 --- a/plugins/repository-s3/licenses/third-party-jackson-core-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5ad6e7f7d52d8a5390b2daf2fd8ffcab97fe3102 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/third-party-jackson-core-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/third-party-jackson-core-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..a07a8eda62447 --- /dev/null +++ b/plugins/repository-s3/licenses/third-party-jackson-core-2.30.31.jar.sha1 @@ -0,0 +1 @@ +100d8022939bd59cd7d2461bd4fb0fd9fa028499 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/utils-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/utils-2.20.86.jar.sha1 deleted file mode 100644 index b91a3b3047570..0000000000000 --- a/plugins/repository-s3/licenses/utils-2.20.86.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7a61f8b3c54ecf3dc785830d4f482f19ca52bc57 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/utils-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/utils-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..184ff1cc5f9ce --- /dev/null +++ b/plugins/repository-s3/licenses/utils-2.30.31.jar.sha1 @@ -0,0 +1 @@ +3340adacb87ff28f90a039d57c81311b296db89e \ No newline at end of file From 82bbdfb71f127367ba1c6f36d7a1e68f4fa182cd Mon Sep 17 00:00:00 2001 From: Shailesh Singh Date: Wed, 5 Mar 2025 07:17:55 +0530 Subject: [PATCH 049/550] Fix Bug - Handle unsigned long in sorting order assertion of LongHashSet (#17207) * Fix Bug - handle unsigned long in assertion of LongHashSet Signed-off-by: Shailesh Singh * renamed TestDocValuesUnsignedLongHashSet.java to DocValuesUnsignedLongHashSetTests.java Signed-off-by: Shailesh Singh * Update server/src/main/java/org/opensearch/lucene/util/UnsignedLongHashSet.java Co-authored-by: Andriy Redko Signed-off-by: Shailesh Singh --------- Signed-off-by: Shailesh Singh Signed-off-by: Shailesh Singh Co-authored-by: Shailesh Singh Co-authored-by: Andriy Redko --- CHANGELOG.md | 1 + .../SortedUnsignedLongDocValuesSetQuery.java | 6 +- .../lucene/util/UnsignedLongHashSet.java | 139 +++++++++++++++++ .../DocValuesUnsignedLongHashSetTests.java | 141 ++++++++++++++++++ 4 files changed, 284 insertions(+), 3 deletions(-) create mode 100644 server/src/main/java/org/opensearch/lucene/util/UnsignedLongHashSet.java create mode 100644 server/src/test/java/org/opensearch/lucene/util/DocValuesUnsignedLongHashSetTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 091e832e01cd1..f40100aa2650e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Introduce a setting to disable download of full cluster state from remote on term mismatch([#16798](https://github.com/opensearch-project/OpenSearch/pull/16798/)) - Added ability to retrieve value from DocValues in a flat_object filed([#16802](https://github.com/opensearch-project/OpenSearch/pull/16802)) - Improve performace of NumericTermAggregation by avoiding unnecessary sorting([#17252](https://github.com/opensearch-project/OpenSearch/pull/17252)) +- Fix Bug - Handle unsigned long in sorting order assertion of LongHashSet ([#17207](https://github.com/opensearch-project/OpenSearch/pull/17207)) - Implemented computation of segment replication stats at shard level ([#17055](https://github.com/opensearch-project/OpenSearch/pull/17055)) - [Rule Based Auto-tagging] Add in-memory attribute value store ([#17342](https://github.com/opensearch-project/OpenSearch/pull/17342)) diff --git a/server/src/main/java/org/opensearch/index/document/SortedUnsignedLongDocValuesSetQuery.java b/server/src/main/java/org/opensearch/index/document/SortedUnsignedLongDocValuesSetQuery.java index 932f523ae071e..3d677aa6a8dfe 100644 --- a/server/src/main/java/org/opensearch/index/document/SortedUnsignedLongDocValuesSetQuery.java +++ b/server/src/main/java/org/opensearch/index/document/SortedUnsignedLongDocValuesSetQuery.java @@ -25,7 +25,7 @@ import org.apache.lucene.search.ScorerSupplier; import org.apache.lucene.search.TwoPhaseIterator; import org.apache.lucene.search.Weight; -import org.opensearch.lucene.util.LongHashSet; +import org.opensearch.lucene.util.UnsignedLongHashSet; import java.io.IOException; import java.math.BigInteger; @@ -40,12 +40,12 @@ public abstract class SortedUnsignedLongDocValuesSetQuery extends Query { private final String field; - private final LongHashSet numbers; + private final UnsignedLongHashSet numbers; SortedUnsignedLongDocValuesSetQuery(String field, BigInteger[] numbers) { this.field = Objects.requireNonNull(field); Arrays.sort(numbers); - this.numbers = new LongHashSet(Arrays.stream(numbers).mapToLong(n -> n.longValue()).toArray()); + this.numbers = new UnsignedLongHashSet(Arrays.stream(numbers).mapToLong(n -> n.longValue()).toArray()); } @Override diff --git a/server/src/main/java/org/opensearch/lucene/util/UnsignedLongHashSet.java b/server/src/main/java/org/opensearch/lucene/util/UnsignedLongHashSet.java new file mode 100644 index 0000000000000..ab9d4c4c91afc --- /dev/null +++ b/server/src/main/java/org/opensearch/lucene/util/UnsignedLongHashSet.java @@ -0,0 +1,139 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.lucene.util; + +import org.apache.lucene.util.Accountable; +import org.apache.lucene.util.RamUsageEstimator; +import org.apache.lucene.util.packed.PackedInts; +import org.opensearch.common.Numbers; + +import java.util.Arrays; +import java.util.Objects; +import java.util.stream.Collectors; +import java.util.stream.LongStream; + +/** Set of unsigned-longs, optimized for docvalues usage */ +public final class UnsignedLongHashSet implements Accountable { + private static final long BASE_RAM_BYTES = RamUsageEstimator.shallowSizeOfInstance(UnsignedLongHashSet.class); + + private static final long MISSING = Numbers.MIN_UNSIGNED_LONG_VALUE_AS_LONG; + + final long[] table; + final int mask; + final boolean hasMissingValue; + final int size; + /** minimum value in the set, or Numbers.MAX_UNSIGNED_LONG_VALUE_AS_LONG for an empty set */ + public final long minValue; + /** maximum value in the set, or Numbers.MIN_UNSIGNED_LONG_VALUE_AS_LONG for an empty set */ + public final long maxValue; + + /** Construct a set. Values must be in sorted order. */ + public UnsignedLongHashSet(long[] values) { + int tableSize = Math.toIntExact(values.length * 3L / 2); + tableSize = 1 << PackedInts.bitsRequired(tableSize); // make it a power of 2 + assert tableSize >= values.length * 3L / 2; + table = new long[tableSize]; + Arrays.fill(table, MISSING); + mask = tableSize - 1; + boolean hasMissingValue = false; + int size = 0; + long previousValue = Numbers.MIN_UNSIGNED_LONG_VALUE_AS_LONG; // for assert + for (long value : values) { + if (value == MISSING) { + size += hasMissingValue ? 0 : 1; + hasMissingValue = true; + } else if (add(value)) { + ++size; + } + assert Long.compareUnsigned(value, previousValue) >= 0 : " values must be provided in sorted order"; + previousValue = value; + } + this.hasMissingValue = hasMissingValue; + this.size = size; + this.minValue = values.length == 0 ? Numbers.MAX_UNSIGNED_LONG_VALUE_AS_LONG : values[0]; + this.maxValue = values.length == 0 ? Numbers.MIN_UNSIGNED_LONG_VALUE_AS_LONG : values[values.length - 1]; + } + + private boolean add(long l) { + assert l != MISSING; + final int slot = Long.hashCode(l) & mask; + for (int i = slot;; i = (i + 1) & mask) { + if (table[i] == MISSING) { + table[i] = l; + return true; + } else if (table[i] == l) { + // already added + return false; + } + } + } + + /** + * check for membership in the set. + * + *

You should use {@link #minValue} and {@link #maxValue} to guide/terminate iteration before + * calling this. + */ + public boolean contains(long l) { + if (l == MISSING) { + return hasMissingValue; + } + final int slot = Long.hashCode(l) & mask; + for (int i = slot;; i = (i + 1) & mask) { + if (table[i] == MISSING) { + return false; + } else if (table[i] == l) { + return true; + } + } + } + + /** returns a stream of all values contained in this set */ + public LongStream stream() { + LongStream stream = Arrays.stream(table).filter(v -> v != MISSING); + if (hasMissingValue) { + stream = LongStream.concat(LongStream.of(MISSING), stream); + } + return stream; + } + + @Override + public int hashCode() { + return Objects.hash(size, minValue, maxValue, mask, hasMissingValue, Arrays.hashCode(table)); + } + + @Override + public boolean equals(Object obj) { + if (obj != null && obj instanceof UnsignedLongHashSet) { + UnsignedLongHashSet that = (UnsignedLongHashSet) obj; + return size == that.size + && minValue == that.minValue + && maxValue == that.maxValue + && mask == that.mask + && hasMissingValue == that.hasMissingValue + && Arrays.equals(table, that.table); + } + return false; + } + + @Override + public String toString() { + return stream().mapToObj(Long::toUnsignedString).collect(Collectors.joining(", ", "[", "]")); + } + + /** number of elements in the set */ + public int size() { + return size; + } + + @Override + public long ramBytesUsed() { + return BASE_RAM_BYTES + RamUsageEstimator.sizeOfObject(table); + } +} diff --git a/server/src/test/java/org/opensearch/lucene/util/DocValuesUnsignedLongHashSetTests.java b/server/src/test/java/org/opensearch/lucene/util/DocValuesUnsignedLongHashSetTests.java new file mode 100644 index 0000000000000..a22ae031b97b5 --- /dev/null +++ b/server/src/test/java/org/opensearch/lucene/util/DocValuesUnsignedLongHashSetTests.java @@ -0,0 +1,141 @@ +package org.opensearch.lucene.util; + +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +import org.apache.lucene.tests.util.LuceneTestCase; +import org.opensearch.common.Numbers; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.LongStream; + +public class DocValuesUnsignedLongHashSetTests extends LuceneTestCase { + + private void assertEquals(Set set1, UnsignedLongHashSet unsignedLongHashSet) { + assertEquals(set1.size(), unsignedLongHashSet.size()); + + Set set2 = unsignedLongHashSet.stream().boxed().collect(Collectors.toSet()); + LuceneTestCase.assertEquals(set1, set2); + + if (set1.isEmpty() == false) { + Set set3 = new HashSet<>(set1); + long removed = set3.iterator().next(); + while (true) { + long next = random().nextLong(); + if (next != removed && set3.add(next)) { + assertFalse(unsignedLongHashSet.contains(next)); + break; + } + } + assertNotEquals(set3, unsignedLongHashSet); + } + + assertTrue(set1.stream().allMatch(unsignedLongHashSet::contains)); + } + + private void assertNotEquals(Set set1, UnsignedLongHashSet unsignedLongHashSet) { + Set set2 = unsignedLongHashSet.stream().boxed().collect(Collectors.toSet()); + + LuceneTestCase.assertNotEquals(set1, set2); + + UnsignedLongHashSet set3 = new UnsignedLongHashSet( + set1.stream().sorted(Long::compareUnsigned).mapToLong(Long::longValue).toArray() + ); + + LuceneTestCase.assertNotEquals(set2, set3.stream().boxed().collect(Collectors.toSet())); + + assertFalse(set1.stream().allMatch(unsignedLongHashSet::contains)); + } + + public void testEmpty() { + Set set1 = new HashSet<>(); + UnsignedLongHashSet set2 = new UnsignedLongHashSet(new long[] {}); + assertEquals(0, set2.size()); + assertEquals(Numbers.MAX_UNSIGNED_LONG_VALUE_AS_LONG, set2.minValue); + assertEquals(Numbers.MIN_UNSIGNED_LONG_VALUE_AS_LONG, set2.maxValue); + assertEquals(set1, set2); + } + + public void testOneValue() { + Set set1 = new HashSet<>(Arrays.asList(42L)); + UnsignedLongHashSet set2 = new UnsignedLongHashSet(new long[] { 42L }); + assertEquals(1, set2.size()); + assertEquals(42L, set2.minValue); + assertEquals(42L, set2.maxValue); + assertEquals(set1, set2); + + set1 = new HashSet<>(Arrays.asList(Numbers.MIN_UNSIGNED_LONG_VALUE_AS_LONG)); + set2 = new UnsignedLongHashSet(new long[] { Numbers.MIN_UNSIGNED_LONG_VALUE_AS_LONG }); + assertEquals(1, set2.size()); + assertEquals(Numbers.MIN_UNSIGNED_LONG_VALUE_AS_LONG, set2.minValue); + assertEquals(Numbers.MIN_UNSIGNED_LONG_VALUE_AS_LONG, set2.maxValue); + assertEquals(set1, set2); + } + + public void testTwoValues() { + Set set1 = new HashSet<>(Arrays.asList(42L, Numbers.MAX_UNSIGNED_LONG_VALUE_AS_LONG)); + UnsignedLongHashSet set2 = new UnsignedLongHashSet(new long[] { 42L, Numbers.MAX_UNSIGNED_LONG_VALUE_AS_LONG }); + assertEquals(2, set2.size()); + assertEquals(42, set2.minValue); + assertEquals(Numbers.MAX_UNSIGNED_LONG_VALUE_AS_LONG, set2.maxValue); + assertEquals(set1, set2); + + set1 = new HashSet<>(Arrays.asList(Numbers.MIN_UNSIGNED_LONG_VALUE_AS_LONG, 42L)); + set2 = new UnsignedLongHashSet(new long[] { Numbers.MIN_UNSIGNED_LONG_VALUE_AS_LONG, 42L }); + assertEquals(2, set2.size()); + assertEquals(Numbers.MIN_UNSIGNED_LONG_VALUE_AS_LONG, set2.minValue); + assertEquals(42, set2.maxValue); + assertEquals(set1, set2); + } + + public void testSameValue() { + UnsignedLongHashSet set2 = new UnsignedLongHashSet(new long[] { 42L, 42L }); + assertEquals(1, set2.size()); + assertEquals(42L, set2.minValue); + assertEquals(42L, set2.maxValue); + } + + public void testSameMissingPlaceholder() { + UnsignedLongHashSet set2 = new UnsignedLongHashSet(new long[] { Long.MIN_VALUE, Long.MIN_VALUE }); + assertEquals(1, set2.size()); + assertEquals(Long.MIN_VALUE, set2.minValue); + assertEquals(Long.MIN_VALUE, set2.maxValue); + } + + public void testRandom() { + final int iters = atLeast(10); + for (int iter = 0; iter < iters; ++iter) { + long[] values = new long[random().nextInt(1 << random().nextInt(16))]; + for (int i = 0; i < values.length; ++i) { + if (i == 0 || random().nextInt(10) < 9) { + values[i] = random().nextLong(); + } else { + values[i] = values[random().nextInt(i)]; + } + } + if (values.length > 0 && random().nextBoolean()) { + values[values.length / 2] = Long.MIN_VALUE; + } + Set set1 = LongStream.of(values).boxed().collect(Collectors.toSet()); + Long[] longObjects = Arrays.stream(values).boxed().toArray(Long[]::new); + // Sort using compareUnsigned + Arrays.sort(longObjects, Long::compareUnsigned); + + long[] arr = new long[values.length]; + // Convert back to long[] + for (int i = 0; i < arr.length; i++) { + arr[i] = longObjects[i]; + } + UnsignedLongHashSet set2 = new UnsignedLongHashSet(arr); + assertEquals(set1, set2); + } + } +} From 7330a88ebc5899c302422a65bc0169a83a880aa6 Mon Sep 17 00:00:00 2001 From: Daniel Widdis Date: Tue, 4 Mar 2025 22:25:45 -0500 Subject: [PATCH 050/550] Add Windows 2025+Java 21 combination to build matrix (#17508) Signed-off-by: Daniel Widdis --- .github/workflows/precommit.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/precommit.yml b/.github/workflows/precommit.yml index 7e20912fe1f60..a7b9496481a5d 100644 --- a/.github/workflows/precommit.yml +++ b/.github/workflows/precommit.yml @@ -9,6 +9,10 @@ jobs: matrix: java: [ 21, 23 ] os: [ubuntu-latest, windows-latest, macos-latest, macos-13, ubuntu-24.04-arm] + include: + - java: 21 + os: 'windows-2025' + experimental: true steps: - uses: actions/checkout@v4 - name: Set up JDK ${{ matrix.java }} @@ -18,6 +22,7 @@ jobs: distribution: temurin cache: gradle - name: Run Gradle (precommit) + continue-on-error: ${{ matrix.experimental }} shell: bash run: | ./gradlew javadoc precommit --parallel From cc82be9cda7414f919cf64a52880135c2f4937f0 Mon Sep 17 00:00:00 2001 From: Peter Zhu Date: Wed, 5 Mar 2025 16:27:14 -0500 Subject: [PATCH 051/550] Switch main/3.x to use JDK21 LTS version (#17515) * Switch main/3.x to use JDK21 LTS version Signed-off-by: Peter Zhu * Update changelog 3.0 Signed-off-by: Peter Zhu --------- Signed-off-by: Peter Zhu --- CHANGELOG-3.0.md | 1 + .../java/org/opensearch/gradle/test/DistroTestPlugin.java | 4 ++-- gradle/libs.versions.toml | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index 7e82efd268007..5128d2f9ef3a0 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -29,6 +29,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Update Apache Lucene to 10.1.0 ([#16366](https://github.com/opensearch-project/OpenSearch/pull/16366)) - Bump Apache HttpCore5/HttpClient5 dependencies from 5.2.5/5.3.1 to 5.3.1/5.4.1 to support ExtendedSocketOption in HttpAsyncClient ([#16757](https://github.com/opensearch-project/OpenSearch/pull/16757)) - Bumps `jetty` version from 9.4.55.v20240627 to 9.4.57.v20241219 +- Switch main/3.x to use JDK21 LTS version ([#17515](https://github.com/opensearch-project/OpenSearch/pull/17515)) ### Changed - Changed locale provider from COMPAT to CLDR ([#14345](https://github.com/opensearch-project/OpenSearch/pull/14345)) diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java index 654af7da65662..888cd8d4bf5b5 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java @@ -77,9 +77,9 @@ import java.util.stream.Stream; public class DistroTestPlugin implements Plugin { - private static final String SYSTEM_JDK_VERSION = "23.0.2+7"; + private static final String SYSTEM_JDK_VERSION = "21.0.6+7"; private static final String SYSTEM_JDK_VENDOR = "adoptium"; - private static final String GRADLE_JDK_VERSION = "23.0.2+7"; + private static final String GRADLE_JDK_VERSION = "21.0.6+7"; private static final String GRADLE_JDK_VENDOR = "adoptium"; // all distributions used by distro tests. this is temporary until tests are per distribution diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index 90518ca71ec53..8d8c49e531e77 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -3,7 +3,7 @@ opensearch = "3.0.0" lucene = "10.1.0" bundled_jdk_vendor = "adoptium" -bundled_jdk = "23.0.2+7" +bundled_jdk = "21.0.6+7" # optional dependencies spatial4j = "0.7" From 02055cc1388dc0af73572f812cac803d2c697b56 Mon Sep 17 00:00:00 2001 From: Rajat Gupta <72070007+RajatGupta02@users.noreply.github.com> Date: Thu, 6 Mar 2025 04:46:22 +0530 Subject: [PATCH 052/550] Add sample integ tests for latest systemd unit file (#17410) * Add integration tests for systemd Signed-off-by: Rajat Gupta * Fix indentation Signed-off-by: Rajat Gupta * Remove unit file mount Signed-off-by: Rajat Gupta * Use centos image Signed-off-by: Rajat Gupta * Change method name Signed-off-by: Rajat Gupta * Add sample systemd integ tests to verify behavior Signed-off-by: Rajat Gupta * Update su with sudo probably need to have a privileged mode Signed-off-by: Peter Zhu * Additional tests Signed-off-by: Rajat Gupta * Wrap commands with su -c Signed-off-by: Rajat Gupta * Add sudo Signed-off-by: Rajat Gupta * Remove sudo for test process exit Signed-off-by: Rajat Gupta * Minor fixes Signed-off-by: Rajat Gupta * Fixed script string Signed-off-by: Rajat Gupta * Remove redundant code Signed-off-by: Rajat Gupta * Add terminate script Signed-off-by: Rajat Gupta * Modified terminate script Signed-off-by: Rajat Gupta * Add Changelog-3.0 entry Signed-off-by: Rajat Gupta * Fix for gradle precommit workflow Signed-off-by: Rajat Gupta * Fix testing conventions gradle precommit check Signed-off-by: Rajat Gupta * Fix imports Signed-off-by: Rajat Gupta * Only run as part of build integTest, remove gradle check Signed-off-by: Rajat Gupta * Remove bash Signed-off-by: Rajat Gupta * add sudo for systemctl command Signed-off-by: Rajat Gupta * Remove OpenSearchIntegTest class Signed-off-by: Rajat Gupta * Rename test file Signed-off-by: Rajat Gupta * Add test script Signed-off-by: Rajat Gupta * Extend LuceneTestCase class Signed-off-by: Rajat Gupta * Remove test bash script Signed-off-by: Rajat Gupta * Modify build.gradle Signed-off-by: Rajat Gupta --------- Signed-off-by: Rajat Gupta Signed-off-by: Peter Zhu Signed-off-by: Rajat Gupta <72070007+RajatGupta02@users.noreply.github.com> Co-authored-by: Rajat Gupta Co-authored-by: Peter Zhu --- CHANGELOG-3.0.md | 2 + qa/systemd-test/build.gradle | 5 + .../systemdinteg/SystemdIntegTests.java | 177 ++++++++++++++++++ .../src/test/resources/scripts/terminate.sh | 12 ++ 4 files changed, 196 insertions(+) create mode 100644 qa/systemd-test/build.gradle create mode 100644 qa/systemd-test/src/test/java/org/opensearch/systemdinteg/SystemdIntegTests.java create mode 100755 qa/systemd-test/src/test/resources/scripts/terminate.sh diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index 5128d2f9ef3a0..99b636822fb72 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -21,10 +21,12 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add execution_hint to cardinality aggregator request (#[17312](https://github.com/opensearch-project/OpenSearch/pull/17312)) - Arrow Flight RPC plugin with Flight server bootstrap logic and client for internode communication ([#16962](https://github.com/opensearch-project/OpenSearch/pull/16962)) - Added offset management for the pull-based Ingestion ([#17354](https://github.com/opensearch-project/OpenSearch/pull/17354)) +- Added integ tests for systemd configs ([#17410](https://github.com/opensearch-project/OpenSearch/pull/17410)) - Add filter function for AbstractQueryBuilder, BoolQueryBuilder, ConstantScoreQueryBuilder([#17409](https://github.com/opensearch-project/OpenSearch/pull/17409)) - [Star Tree] [Search] Resolving keyword & numeric bucket aggregation with metric aggregation using star-tree ([#17165](https://github.com/opensearch-project/OpenSearch/pull/17165)) - Added error handling support for the pull-based ingestion ([#17427](https://github.com/opensearch-project/OpenSearch/pull/17427)) + ### Dependencies - Update Apache Lucene to 10.1.0 ([#16366](https://github.com/opensearch-project/OpenSearch/pull/16366)) - Bump Apache HttpCore5/HttpClient5 dependencies from 5.2.5/5.3.1 to 5.3.1/5.4.1 to support ExtendedSocketOption in HttpAsyncClient ([#16757](https://github.com/opensearch-project/OpenSearch/pull/16757)) diff --git a/qa/systemd-test/build.gradle b/qa/systemd-test/build.gradle new file mode 100644 index 0000000000000..7db5ddbf9ff12 --- /dev/null +++ b/qa/systemd-test/build.gradle @@ -0,0 +1,5 @@ +apply plugin: 'opensearch.standalone-rest-test' + +tasks.register("integTest", Test){ + include "**/*IntegTests.class" +} diff --git a/qa/systemd-test/src/test/java/org/opensearch/systemdinteg/SystemdIntegTests.java b/qa/systemd-test/src/test/java/org/opensearch/systemdinteg/SystemdIntegTests.java new file mode 100644 index 0000000000000..2beadd9445412 --- /dev/null +++ b/qa/systemd-test/src/test/java/org/opensearch/systemdinteg/SystemdIntegTests.java @@ -0,0 +1,177 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ +/* +* Modifications Copyright OpenSearch Contributors. See +* GitHub history for details. +*/ + +package org.opensearch.systemdinteg; +import org.apache.lucene.tests.util.LuceneTestCase; + +import org.apache.hc.core5.http.HttpHeaders; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.HttpStatus; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.net.URISyntaxException; +import java.nio.charset.StandardCharsets; +import java.io.BufferedReader; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardOpenOption; +import java.nio.file.attribute.PosixFilePermissions; +import java.util.Locale; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + + +public class SystemdIntegTests extends LuceneTestCase { + + private static String opensearchPid; + + @BeforeClass + public static void setup() throws IOException, InterruptedException { + opensearchPid = getOpenSearchPid(); + + if (opensearchPid.isEmpty()) { + throw new RuntimeException("Failed to find OpenSearch process ID"); + } + } + + private static String getOpenSearchPid() throws IOException, InterruptedException { + String command = "systemctl show --property=MainPID opensearch"; + String output = executeCommand(command, "Failed to get OpenSearch PID"); + return output.replace("MainPID=", "").trim(); + } + + private boolean checkPathExists(String path) throws IOException, InterruptedException { + String command = String.format(Locale.ROOT, "test -e %s && echo true || echo false", path); + return Boolean.parseBoolean(executeCommand(command, "Failed to check path existence")); + } + + private boolean checkPathReadable(String path) throws IOException, InterruptedException { + String command = String.format(Locale.ROOT, "sudo su opensearch -s /bin/sh -c 'test -r %s && echo true || echo false'", path); + return Boolean.parseBoolean(executeCommand(command, "Failed to check read permission")); + } + + private boolean checkPathWritable(String path) throws IOException, InterruptedException { + String command = String.format(Locale.ROOT, "sudo su opensearch -s /bin/sh -c 'test -w %s && echo true || echo false'", path); + return Boolean.parseBoolean(executeCommand(command, "Failed to check write permission")); + } + + private String getPathOwnership(String path) throws IOException, InterruptedException { + String command = String.format(Locale.ROOT, "stat -c '%%U:%%G' %s", path); + return executeCommand(command, "Failed to get path ownership"); + } + + private static String executeCommand(String command, String errorMessage) throws IOException, InterruptedException { + Process process = Runtime.getRuntime().exec(new String[]{"bash", "-c", command}); + try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8))) { + StringBuilder output = new StringBuilder(); + String line; + while ((line = reader.readLine()) != null) { + output.append(line).append("\n"); + } + if (process.waitFor() != 0) { + throw new RuntimeException(errorMessage); + } + return output.toString().trim(); + } + } + + public void testReadOnlyPaths() throws IOException, InterruptedException { + String[] readOnlyPaths = { + "/etc/os-release", "/usr/lib/os-release", "/etc/system-release", + "/proc/self/mountinfo", "/proc/diskstats", + "/proc/self/cgroup", "/sys/fs/cgroup/cpu", "/sys/fs/cgroup/cpu/-", + "/sys/fs/cgroup/cpuacct", "/sys/fs/cgroup/cpuacct/-", + "/sys/fs/cgroup/memory", "/sys/fs/cgroup/memory/-" + }; + + for (String path : readOnlyPaths) { + if (checkPathExists(path)) { + assertTrue("Path should be readable: " + path, checkPathReadable(path)); + assertFalse("Path should not be writable: " + path, checkPathWritable(path)); + } + } + } + + public void testReadWritePaths() throws IOException, InterruptedException { + String[] readWritePaths = {"/var/log/opensearch", "/var/lib/opensearch"}; + for (String path : readWritePaths) { + assertTrue("Path should exist: " + path, checkPathExists(path)); + assertTrue("Path should be readable: " + path, checkPathReadable(path)); + assertTrue("Path should be writable: " + path, checkPathWritable(path)); + assertEquals("Path should be owned by opensearch:opensearch", "opensearch:opensearch", getPathOwnership(path)); + } + } + + public void testMaxProcesses() throws IOException, InterruptedException { + String limits = executeCommand("sudo su -c 'cat /proc/" + opensearchPid + "/limits'", "Failed to read process limits"); + assertTrue("Max processes limit should be 4096 or unlimited", + limits.contains("Max processes 4096 4096") || + limits.contains("Max processes unlimited unlimited")); + } + + public void testFileDescriptorLimit() throws IOException, InterruptedException { + String limits = executeCommand("sudo su -c 'cat /proc/" + opensearchPid + "/limits'", "Failed to read process limits"); + assertTrue("File descriptor limit should be at least 65535", + limits.contains("Max open files 65535 65535") || + limits.contains("Max open files unlimited unlimited")); + } + + public void testSystemCallFilter() throws IOException, InterruptedException { + // Check if Seccomp is enabled + String seccomp = executeCommand("sudo su -c 'grep Seccomp /proc/" + opensearchPid + "/status'", "Failed to read Seccomp status"); + assertFalse("Seccomp should be enabled", seccomp.contains("0")); + + // Test specific system calls that should be blocked + String rebootResult = executeCommand("sudo su opensearch -c 'kill -s SIGHUP 1' 2>&1 || echo 'Operation not permitted'", "Failed to test reboot system call"); + assertTrue("Reboot system call should be blocked", rebootResult.contains("Operation not permitted")); + + String swapResult = executeCommand("sudo su opensearch -c 'swapon -a' 2>&1 || echo 'Operation not permitted'", "Failed to test swap system call"); + assertTrue("Swap system call should be blocked", swapResult.contains("Operation not permitted")); + } + + public void testOpenSearchProcessCannotExit() throws IOException, InterruptedException { + + String scriptPath; + try { + scriptPath = SystemdIntegTests.class.getResource("/scripts/terminate.sh").toURI().getPath(); + } catch (URISyntaxException e) { + throw new RuntimeException("Failed to convert URL to URI", e); + } + + if (scriptPath == null) { + throw new IllegalStateException("Could not find terminate.sh script in resources"); + } + ProcessBuilder processBuilder = new ProcessBuilder(scriptPath, opensearchPid); + Process process = processBuilder.start(); + + // Wait a moment for any potential termination to take effect + Thread.sleep(2000); + + // Verify the OpenSearch service status + String serviceStatus = executeCommand( + "systemctl is-active opensearch", + "Failed to check OpenSearch service status" + ); + + assertEquals("OpenSearch service should be active", "active", serviceStatus.trim()); + } + +} diff --git a/qa/systemd-test/src/test/resources/scripts/terminate.sh b/qa/systemd-test/src/test/resources/scripts/terminate.sh new file mode 100755 index 0000000000000..21ea62a475e70 --- /dev/null +++ b/qa/systemd-test/src/test/resources/scripts/terminate.sh @@ -0,0 +1,12 @@ +#!/bin/sh + +if [ $# -ne 1 ]; then + echo "Usage: $0 " + exit 1 +fi + +if kill -15 $1 2>/dev/null; then + echo "SIGTERM signal sent to process $1" +else + echo "Failed to send SIGTERM to process $1" +fi \ No newline at end of file From 3966ed93beb2ff13854d2a6e1ff51dfa32295830 Mon Sep 17 00:00:00 2001 From: Eric Pugh Date: Wed, 5 Mar 2025 19:08:12 -0500 Subject: [PATCH 053/550] Fix small typo in DEVELOPER_GUIDE.md (#17512) Signed-off-by: Eric Pugh --- DEVELOPER_GUIDE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DEVELOPER_GUIDE.md b/DEVELOPER_GUIDE.md index b40f5d9b3f21a..e7ad1d8120ea6 100644 --- a/DEVELOPER_GUIDE.md +++ b/DEVELOPER_GUIDE.md @@ -127,7 +127,7 @@ All distributions built will be under `distributions/archives`. #### Generated Code OpenSearch uses code generators like [Protobuf](https://protobuf.dev/). -OpenSearch build system already takes a dependency of generating code from protobuf, incase you run into compilation errors, run: +OpenSearch build system already takes a dependency of generating code from protobuf, if you run into compilation errors, run: ``` ./gradlew generateProto From 342c6458f3ced88aa351e8807364f18b35c272ec Mon Sep 17 00:00:00 2001 From: Ashish Singh Date: Thu, 6 Mar 2025 19:04:17 +0530 Subject: [PATCH 054/550] Fix red index on close for remote enabled clusters (#17521) Signed-off-by: Ashish Singh --- .../org/opensearch/index/engine/ReadOnlyEngine.java | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java index 1852f2fa92b74..5d42a7b830de0 100644 --- a/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java @@ -40,6 +40,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.Lock; import org.opensearch.Version; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; @@ -166,7 +167,7 @@ public ReadOnlyEngine( } protected void ensureMaxSeqNoEqualsToGlobalCheckpoint(final SeqNoStats seqNoStats) { - if (requireCompleteHistory == false) { + if (requireCompleteHistory == false || isClosedRemoteIndex()) { return; } // Before 3.0 the global checkpoint is not known and up to date when the engine is created after @@ -187,6 +188,14 @@ protected void ensureMaxSeqNoEqualsToGlobalCheckpoint(final SeqNoStats seqNoStat } } + /** + * Returns true if this is a remote store index (included if migrating as well) which is closed. + */ + private boolean isClosedRemoteIndex() { + return this.engineConfig.getIndexSettings().isAssignedOnRemoteNode() + && this.engineConfig.getIndexSettings().getIndexMetadata().getState() == IndexMetadata.State.CLOSE; + } + protected boolean assertMaxSeqNoEqualsToGlobalCheckpoint(final long maxSeqNo, final long globalCheckpoint) { assert maxSeqNo == globalCheckpoint : "max seq. no. [" + maxSeqNo + "] does not match [" + globalCheckpoint + "]"; return true; From 73882054afcdb74244c07c5be1f54a629ffd0bc2 Mon Sep 17 00:00:00 2001 From: Divyansh Pandey <98746046+pandeydivyansh1803@users.noreply.github.com> Date: Thu, 6 Mar 2025 20:21:20 +0530 Subject: [PATCH 055/550] Update validator for index update request (#17529) Signed-off-by: Divyansh Pandey Co-authored-by: Divyansh Pandey --- ...AllocationDeciderRemoteStoreEnabledIT.java | 71 +++++++++++++++++++ .../metadata/MetadataCreateIndexService.java | 3 +- .../MetadataUpdateSettingsService.java | 32 ++++++++- 3 files changed, 103 insertions(+), 3 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDeciderRemoteStoreEnabledIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDeciderRemoteStoreEnabledIT.java index 401db7790de92..72c849cb395af 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDeciderRemoteStoreEnabledIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDeciderRemoteStoreEnabledIT.java @@ -8,6 +8,8 @@ package org.opensearch.cluster.routing.allocation.decider; +import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.ShardRouting; @@ -99,6 +101,75 @@ public void testIndexPrimaryShardLimit() throws Exception { }); } + public void testUpdatingIndexPrimaryShardLimit() throws Exception { + // Create first index with primary shard limit + Settings firstIndexSettings = Settings.builder() + .put(remoteStoreIndexSettings(0, 4)) // 4 shards, 0 replicas + .put(INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey(), 1) + .build(); + + // Create first index + createIndex("test1", firstIndexSettings); + + // Update the index settings to set INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING + UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest("test1"); + Settings updatedSettings = Settings.builder().put(INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey(), 1).build(); + updateSettingsRequest.settings(updatedSettings); + + AcknowledgedResponse response = client().admin().indices().updateSettings(updateSettingsRequest).actionGet(); + + assertTrue(response.isAcknowledged()); + + // Create second index + createIndex("test2", remoteStoreIndexSettings(0, 4)); + + assertBusy(() -> { + ClusterState state = client().admin().cluster().prepareState().get().getState(); + + // Check total number of shards (8 total: 4 from each index) + assertEquals("Total shards should be 8", 8, state.getRoutingTable().allShards().size()); + + // Count assigned and unassigned shards for test1 + int test1AssignedShards = 0; + int test1UnassignedShards = 0; + Map nodePrimaryCount = new HashMap<>(); + + // Check test1 shard distribution + for (IndexShardRoutingTable shardRouting : state.routingTable().index("test1")) { + for (ShardRouting shard : shardRouting) { + if (shard.assignedToNode()) { + test1AssignedShards++; + // Count primaries per node for test1 + String nodeId = shard.currentNodeId(); + nodePrimaryCount.merge(nodeId, 1, Integer::sum); + } else { + test1UnassignedShards++; + } + } + } + + // Check test2 shard assignment + int test2UnassignedShards = 0; + for (IndexShardRoutingTable shardRouting : state.routingTable().index("test2")) { + for (ShardRouting shard : shardRouting) { + if (!shard.assignedToNode()) { + test2UnassignedShards++; + } + } + } + + // Assertions + assertEquals("test1 should have 3 assigned shards", 3, test1AssignedShards); + assertEquals("test1 should have 1 unassigned shard", 1, test1UnassignedShards); + assertEquals("test2 should have no unassigned shards", 0, test2UnassignedShards); + + // Verify no node has more than one primary shard of test1 + for (Integer count : nodePrimaryCount.values()) { + assertTrue("No node should have more than 1 primary shard of test1", count <= 1); + } + }); + } + public void testClusterPrimaryShardLimitss() throws Exception { // Update cluster setting to limit primary shards per node updateClusterSetting(CLUSTER_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey(), 1); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index a81fe01f0e7f4..2bdd31b23aee3 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -1847,7 +1847,8 @@ public static void validateRefreshIntervalSettings(Settings requestSettings, Clu } /** - * Validates {@code index.routing.allocation.total_primary_shards_per_node} is only set for remote store enabled cluster + * Validates the {@code index.routing.allocation.total_primary_shards_per_node} setting during index creation. + * Ensures this setting is only specified for remote store enabled clusters. */ // TODO : Update this check for SegRep to DocRep migration on need basis public static void validateIndexTotalPrimaryShardsPerNodeSetting(Settings indexSettings) { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java index eb10fd5d04288..469bec7220721 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java @@ -43,6 +43,7 @@ import org.opensearch.cluster.ack.ClusterStateUpdateResponse; import org.opensearch.cluster.block.ClusterBlock; import org.opensearch.cluster.block.ClusterBlocks; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.routing.allocation.AwarenessReplicaBalance; @@ -78,12 +79,12 @@ import static org.opensearch.action.support.ContextPreservingActionListener.wrapPreservingContext; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; -import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateIndexTotalPrimaryShardsPerNodeSetting; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateOverlap; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateRefreshIntervalSettings; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateTranslogDurabilitySettings; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateTranslogFlushIntervalSettingsForCompositeIndex; import static org.opensearch.cluster.metadata.MetadataIndexTemplateService.findComponentTemplate; +import static org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider.INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING; import static org.opensearch.common.settings.AbstractScopedSettings.ARCHIVED_SETTINGS_PREFIX; import static org.opensearch.index.IndexSettings.same; @@ -140,7 +141,7 @@ public void updateSettings( validateRefreshIntervalSettings(normalizedSettings, clusterService.getClusterSettings()); validateTranslogDurabilitySettings(normalizedSettings, clusterService.getClusterSettings(), clusterService.getSettings()); - validateIndexTotalPrimaryShardsPerNodeSetting(normalizedSettings); + validateIndexTotalPrimaryShardsPerNodeSetting(normalizedSettings, clusterService); final int defaultReplicaCount = clusterService.getClusterSettings().get(Metadata.DEFAULT_REPLICA_COUNT_SETTING); Settings.Builder settingsForClosedIndices = Settings.builder(); @@ -549,4 +550,31 @@ private void validateSearchReplicaCountSettings(Settings requestSettings, Index[ } } } + + /** + * Validates the 'index.routing.allocation.total_primary_shards_per_node' setting during index settings update. + * Ensures this setting can only be modified for existing indices in remote store enabled clusters. + */ + public static void validateIndexTotalPrimaryShardsPerNodeSetting(Settings indexSettings, ClusterService clusterService) { + // Get the setting value + int indexPrimaryShardsPerNode = INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.get(indexSettings); + + // If default value (-1), no validation needed + if (indexPrimaryShardsPerNode == -1) { + return; + } + + // Check if remote store is enabled + boolean isRemoteStoreEnabled = clusterService.state() + .nodes() + .getNodes() + .values() + .stream() + .allMatch(DiscoveryNode::isRemoteStoreNode); + if (!isRemoteStoreEnabled) { + throw new IllegalArgumentException( + "Setting [" + INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING.getKey() + "] can only be used with remote store enabled clusters" + ); + } + } } From cb869c06861c63aa338120dcb33d1b3470c54155 Mon Sep 17 00:00:00 2001 From: Sandeep Kumawat <2025sandeepkumawat@gmail.com> Date: Fri, 7 Mar 2025 09:29:39 +0530 Subject: [PATCH 056/550] Writable warm replica replication/recovery (#17390) Signed-off-by: Sandeep Kumawat --- .../replication/SegmentReplicationBaseIT.java | 2 - .../WarmIndexSegmentReplicationIT.java | 1665 +++++++++++++++++ .../SegmentReplicationUsingRemoteStoreIT.java | 2 - .../allocation/IndexMetadataUpdater.java | 3 +- .../allocator/RemoteShardsBalancer.java | 5 +- .../index/engine/NRTReplicationEngine.java | 9 +- .../opensearch/index/shard/IndexShard.java | 6 +- .../index/store/CompositeDirectory.java | 55 +- .../store/remote/utils/FileTypeUtils.java | 4 +- .../store/remote/utils/cache/LRUCache.java | 13 +- .../remote/utils/cache/SegmentedCache.java | 7 +- .../replication/SegmentReplicationTarget.java | 7 + .../index/store/CompositeDirectoryTests.java | 12 +- 13 files changed, 1773 insertions(+), 17 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/opensearch/indices/replication/WarmIndexSegmentReplicationIT.java diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java index 8b4913fcb2704..ac2862806c858 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java @@ -49,8 +49,6 @@ import java.util.stream.Collectors; import static java.util.Arrays.asList; -import static org.opensearch.test.OpenSearchIntegTestCase.client; -import static org.opensearch.test.OpenSearchTestCase.assertBusy; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; public class SegmentReplicationBaseIT extends OpenSearchIntegTestCase { diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/WarmIndexSegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/WarmIndexSegmentReplicationIT.java new file mode 100644 index 0000000000000..d7f1c2209f798 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/WarmIndexSegmentReplicationIT.java @@ -0,0 +1,1665 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import com.carrotsearch.randomizedtesting.RandomizedTest; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.SortedDocValuesField; +import org.apache.lucene.document.StringField; +import org.apache.lucene.document.TextField; +import org.apache.lucene.index.Fields; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.tests.util.TestUtil; +import org.apache.lucene.util.BytesRef; +import org.opensearch.action.admin.cluster.stats.ClusterStatsResponse; +import org.opensearch.action.admin.indices.alias.Alias; +import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; +import org.opensearch.action.admin.indices.flush.FlushRequest; +import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse; +import org.opensearch.action.get.GetResponse; +import org.opensearch.action.get.MultiGetRequest; +import org.opensearch.action.get.MultiGetResponse; +import org.opensearch.action.index.IndexResponse; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.search.SearchType; +import org.opensearch.action.support.WriteRequest; +import org.opensearch.action.termvectors.TermVectorsRequestBuilder; +import org.opensearch.action.termvectors.TermVectorsResponse; +import org.opensearch.action.update.UpdateResponse; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.health.ClusterHealthStatus; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.Preference; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.cluster.routing.allocation.command.CancelAllocationCommand; +import org.opensearch.common.action.ActionFuture; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.common.util.set.Sets; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.IndexModule; +import org.opensearch.index.ReplicationStats; +import org.opensearch.index.SegmentReplicationPerGroupStats; +import org.opensearch.index.SegmentReplicationPressureService; +import org.opensearch.index.SegmentReplicationShardStats; +import org.opensearch.index.codec.CodecService; +import org.opensearch.index.engine.EngineConfig; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.store.remote.file.CleanerDaemonThreadLeakFilter; +import org.opensearch.index.store.remote.filecache.FileCache; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.node.Node; +import org.opensearch.search.sort.SortOrder; +import org.opensearch.test.BackgroundIndexer; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.junit.annotations.TestLogging; +import org.opensearch.test.transport.MockTransportService; +import org.opensearch.transport.TransportService; +import org.opensearch.transport.client.Requests; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; + +import static java.util.Arrays.asList; +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.index.query.QueryBuilders.boolQuery; +import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.index.query.QueryBuilders.matchQuery; +import static org.opensearch.index.query.QueryBuilders.rangeQuery; +import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAllSuccessful; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchHits; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +/** + * This class runs Segment Replication Integ test suite with partial locality indices (warm indices). + */ +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +@ThreadLeakFilters(filters = CleanerDaemonThreadLeakFilter.class) +public class WarmIndexSegmentReplicationIT extends SegmentReplicationBaseIT { + + protected static final String REPOSITORY_NAME = "test-remote-store-repo"; + protected Path absolutePath; + + @Before + private void setup() { + internalCluster().startClusterManagerOnlyNode(); + } + + private static String indexOrAlias() { + return randomBoolean() ? INDEX_NAME : "alias"; + } + + @Override + public Settings indexSettings() { + return Settings.builder() + .put(super.indexSettings()) + .put(IndexModule.INDEX_STORE_LOCALITY_SETTING.getKey(), IndexModule.DataLocalityType.PARTIAL.name()) + .build(); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + if (absolutePath == null) { + absolutePath = randomRepoPath().toAbsolutePath(); + } + ByteSizeValue cacheSize = new ByteSizeValue(16, ByteSizeUnit.GB); + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(remoteStoreClusterSettings(REPOSITORY_NAME, absolutePath)) + .put(Node.NODE_SEARCH_CACHE_SIZE_SETTING.getKey(), cacheSize.toString()) + .build(); + } + + @Override + protected Settings featureFlagSettings() { + Settings.Builder featureSettings = Settings.builder(); + featureSettings.put(FeatureFlags.TIERED_REMOTE_INDEX, true); + return featureSettings.build(); + } + + @Override + protected boolean addMockIndexStorePlugin() { + return false; + } + + protected boolean warmIndexSegmentReplicationEnabled() { + return true; + } + + @After + public void teardown() throws Exception { + assertAcked(client().admin().indices().delete(new DeleteIndexRequest(INDEX_NAME)).get()); + for (String nodeName : internalCluster().getNodeNames()) { + FileCache fileCache = internalCluster().getInstance(Node.class, nodeName).fileCache(); + if (fileCache != null) { + fileCache.clear(); + } + } + clusterAdmin().prepareCleanupRepository(REPOSITORY_NAME).get(); + } + + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/17526") + public void testRestartPrimary_NoReplicas() throws Exception { + final String primary = internalCluster().startDataAndSearchNodes(1).get(0); + createIndex(INDEX_NAME); + ensureYellow(INDEX_NAME); + + assertEquals(getNodeContainingPrimaryShard().getName(), primary); + + client().prepareIndex(INDEX_NAME).setId("1").setSource("foo", "bar").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + if (randomBoolean()) { + flush(INDEX_NAME); + } else { + refresh(INDEX_NAME); + } + FileCache fileCache = internalCluster().getInstance(Node.class, primary).fileCache(); + internalCluster().restartNode(primary); + ensureYellow(INDEX_NAME); + assertDocCounts(1, primary); + fileCache.prune(); + } + + public void testPrimaryStopped_ReplicaPromoted() throws Exception { + final String primary = internalCluster().startDataAndSearchNodes(1).get(0); + createIndex(INDEX_NAME); + ensureYellowAndNoInitializingShards(INDEX_NAME); + final String replica = internalCluster().startDataAndSearchNodes(1).get(0); + ensureGreen(INDEX_NAME); + + client().prepareIndex(INDEX_NAME).setId("1").setSource("foo", "bar").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + refresh(INDEX_NAME); + + waitForSearchableDocs(1, primary, replica); + + // index another doc but don't refresh, we will ensure this is searchable once replica is promoted. + client().prepareIndex(INDEX_NAME).setId("2").setSource("bar", "baz").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + + FileCache fileCache1 = internalCluster().getInstance(Node.class, primary).fileCache(); + // stop the primary node - we only have one shard on here. + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primary)); + ensureYellowAndNoInitializingShards(INDEX_NAME); + final ShardRouting replicaShardRouting = getShardRoutingForNodeName(replica); + assertNotNull(replicaShardRouting); + assertTrue(replicaShardRouting + " should be promoted as a primary", replicaShardRouting.primary()); + // new primary should have at least the doc count from the first set of segments. + assertBusy(() -> { + final SearchResponse response = client(replica).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(); + assertTrue(response.getHits().getTotalHits().value() >= 1); + }, 1, TimeUnit.MINUTES); + + // assert we can index into the new primary. + client().prepareIndex(INDEX_NAME).setId("3").setSource("bar", "baz").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + assertHitCount(client(replica).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), 3); + + // start another node, index another doc and replicate. + String nodeC = internalCluster().startDataAndSearchNodes(1).get(0); + ensureGreen(INDEX_NAME); + client().prepareIndex(INDEX_NAME).setId("4").setSource("baz", "baz").get(); + refresh(INDEX_NAME); + waitForSearchableDocs(4, nodeC, replica); + verifyStoreContent(); + fileCache1.prune(); + } + + public void testRestartPrimary() throws Exception { + final String primary = internalCluster().startDataAndSearchNodes(1).get(0); + createIndex(INDEX_NAME); + ensureYellowAndNoInitializingShards(INDEX_NAME); + final String replica = internalCluster().startDataAndSearchNodes(1).get(0); + ensureGreen(INDEX_NAME); + + assertEquals(getNodeContainingPrimaryShard().getName(), primary); + + final int initialDocCount = 1; + client().prepareIndex(INDEX_NAME).setId("1").setSource("foo", "bar").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + refresh(INDEX_NAME); + + FileCache fileCache = internalCluster().getInstance(Node.class, primary).fileCache(); + waitForSearchableDocs(initialDocCount, replica, primary); + internalCluster().restartNode(primary); + ensureGreen(INDEX_NAME); + + assertEquals(getNodeContainingPrimaryShard().getName(), replica); + + flushAndRefresh(INDEX_NAME); + waitForSearchableDocs(initialDocCount, replica, primary); + verifyStoreContent(); + fileCache.prune(); + } + + public void testCancelPrimaryAllocation() throws Exception { + // this test cancels allocation on the primary - promoting the new replica and recreating the former primary as a replica. + final String primary = internalCluster().startDataAndSearchNodes(1).get(0); + createIndex(INDEX_NAME); + ensureYellowAndNoInitializingShards(INDEX_NAME); + final String replica = internalCluster().startDataAndSearchNodes(1).get(0); + ensureGreen(INDEX_NAME); + + final int initialDocCount = 1; + + client().prepareIndex(INDEX_NAME).setId("1").setSource("foo", "bar").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + refresh(INDEX_NAME); + + waitForSearchableDocs(initialDocCount, replica, primary); + + final IndexShard indexShard = getIndexShard(primary, INDEX_NAME); + client().admin() + .cluster() + .prepareReroute() + .add(new CancelAllocationCommand(INDEX_NAME, indexShard.shardId().id(), primary, true)) + .execute() + .actionGet(); + ensureGreen(INDEX_NAME); + + assertEquals(getNodeContainingPrimaryShard().getName(), replica); + + flushAndRefresh(INDEX_NAME); + waitForSearchableDocs(initialDocCount, replica, primary); + verifyStoreContent(); + } + + public void testReplicationAfterPrimaryRefreshAndFlush() throws Exception { + final String nodeA = internalCluster().startDataAndSearchNodes(1).get(0); + final String nodeB = internalCluster().startDataAndSearchNodes(1).get(0); + final Settings settings = Settings.builder() + .put(indexSettings()) + .put(EngineConfig.INDEX_CODEC_SETTING.getKey(), randomFrom(new ArrayList<>(CODECS) { + { + add(CodecService.LUCENE_DEFAULT_CODEC); + } + })) + .build(); + createIndex(INDEX_NAME, settings); + ensureGreen(INDEX_NAME); + + final int initialDocCount = scaledRandomIntBetween(0, 10); + try ( + BackgroundIndexer indexer = new BackgroundIndexer( + INDEX_NAME, + "_doc", + client(), + -1, + RandomizedTest.scaledRandomIntBetween(2, 5), + false, + random() + ) + ) { + indexer.start(initialDocCount); + waitForDocs(initialDocCount, indexer); + refresh(INDEX_NAME); + waitForSearchableDocs(initialDocCount, nodeA, nodeB); + + final int additionalDocCount = scaledRandomIntBetween(0, 10); + final int expectedHitCount = initialDocCount + additionalDocCount; + indexer.start(additionalDocCount); + waitForDocs(expectedHitCount, indexer); + + flushAndRefresh(INDEX_NAME); + waitForSearchableDocs(expectedHitCount, nodeA, nodeB); + + ensureGreen(INDEX_NAME); + verifyStoreContent(); + } + } + + public void testIndexReopenClose() throws Exception { + final String primary = internalCluster().startDataAndSearchNodes(1).get(0); + final String replica = internalCluster().startDataAndSearchNodes(1).get(0); + createIndex(INDEX_NAME); + ensureGreen(INDEX_NAME); + + final int initialDocCount = scaledRandomIntBetween(1, 10); + try ( + BackgroundIndexer indexer = new BackgroundIndexer( + INDEX_NAME, + "_doc", + client(), + -1, + RandomizedTest.scaledRandomIntBetween(2, 5), + false, + random() + ) + ) { + indexer.start(initialDocCount); + waitForDocs(initialDocCount, indexer); + flush(INDEX_NAME); + waitForSearchableDocs(initialDocCount, primary, replica); + } + logger.info("--> Closing the index "); + client().admin().indices().prepareClose(INDEX_NAME).get(); + + logger.info("--> Opening the index"); + client().admin().indices().prepareOpen(INDEX_NAME).get(); + ensureGreen(INDEX_NAME); + waitForSearchableDocs(initialDocCount, primary, replica); + verifyStoreContent(); + } + + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/17526") + public void testStartReplicaAfterPrimaryIndexesDocs() throws Exception { + final String primaryNode = internalCluster().startDataAndSearchNodes(1).get(0); + createIndex(INDEX_NAME, Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build()); + ensureGreen(INDEX_NAME); + + // Index a doc to create the first set of segments. _s1.si + client().prepareIndex(INDEX_NAME).setId("1").setSource("foo", "bar").get(); + // Flush segments to disk and create a new commit point (Primary: segments_3, _s1.si) + flushAndRefresh(INDEX_NAME); + assertHitCount(client(primaryNode).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), 1); + + // Index to create another segment + client().prepareIndex(INDEX_NAME).setId("2").setSource("foo", "bar").get(); + + // Force a merge here so that the in memory SegmentInfos does not reference old segments on disk. + client().admin().indices().prepareForceMerge(INDEX_NAME).setMaxNumSegments(1).setFlush(false).get(); + refresh(INDEX_NAME); + + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)) + ); + final String replicaNode = internalCluster().startDataAndSearchNodes(1).get(0); + ensureGreen(INDEX_NAME); + + assertHitCount(client(primaryNode).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), 2); + assertHitCount(client(replicaNode).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), 2); + + client().prepareIndex(INDEX_NAME).setId("3").setSource("foo", "bar").get(); + refresh(INDEX_NAME); + waitForSearchableDocs(3, primaryNode, replicaNode); + assertHitCount(client(primaryNode).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), 3); + assertHitCount(client(replicaNode).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), 3); + if (!warmIndexSegmentReplicationEnabled()) { + verifyStoreContent(); + } + } + + /** + * This tests that the max seqNo we send to replicas is accurate and that after failover + * the new primary starts indexing from the correct maxSeqNo and replays the correct count of docs + * from xlog. + */ + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/17527") + public void testReplicationPostDeleteAndForceMerge() throws Exception { + final String primary = internalCluster().startDataAndSearchNodes(1).get(0); + createIndex(INDEX_NAME); + final String replica = internalCluster().startDataAndSearchNodes(1).get(0); + ensureGreen(INDEX_NAME); + final int initialDocCount = scaledRandomIntBetween(1, 10); + for (int i = 0; i < initialDocCount; i++) { + client().prepareIndex(INDEX_NAME).setId(String.valueOf(i)).setSource("foo", "bar").get(); + } + refresh(INDEX_NAME); + waitForSearchableDocs(initialDocCount, primary, replica); + + final int deletedDocCount = randomIntBetween(1, initialDocCount); + for (int i = 0; i < deletedDocCount; i++) { + client(primary).prepareDelete(INDEX_NAME, String.valueOf(i)).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + } + client().admin().indices().prepareForceMerge(INDEX_NAME).setMaxNumSegments(1).setFlush(false).get(); + + // randomly flush here after the force merge to wipe any old segments. + if (randomBoolean()) { + flush(INDEX_NAME); + } + + final IndexShard primaryShard = getIndexShard(primary, INDEX_NAME); + final IndexShard replicaShard = getIndexShard(replica, INDEX_NAME); + assertBusy( + () -> assertEquals( + primaryShard.getLatestReplicationCheckpoint().getSegmentInfosVersion(), + replicaShard.getLatestReplicationCheckpoint().getSegmentInfosVersion() + ) + ); + + // add some docs to the xlog and drop primary. + final int additionalDocs = randomIntBetween(1, 5); + for (int i = initialDocCount; i < initialDocCount + additionalDocs; i++) { + client().prepareIndex(INDEX_NAME).setId(String.valueOf(i)).setSource("foo", "bar").get(); + } + // Drop the primary and wait until replica is promoted. + FileCache fileCache1 = internalCluster().getInstance(Node.class, primary).fileCache(); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primary)); + ensureYellowAndNoInitializingShards(INDEX_NAME); + + final ShardRouting replicaShardRouting = getShardRoutingForNodeName(replica); + assertNotNull(replicaShardRouting); + assertTrue(replicaShardRouting + " should be promoted as a primary", replicaShardRouting.primary()); + refresh(INDEX_NAME); + final long expectedHitCount = initialDocCount + additionalDocs - deletedDocCount; + // waitForSearchableDocs(initialDocCount, replica, primary); + assertHitCount(client(replica).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), expectedHitCount); + + int expectedMaxSeqNo = initialDocCount + deletedDocCount + additionalDocs - 1; + assertEquals(expectedMaxSeqNo, replicaShard.seqNoStats().getMaxSeqNo()); + + // index another doc. + client().prepareIndex(INDEX_NAME).setId(String.valueOf(expectedMaxSeqNo + 1)).setSource("another", "doc").get(); + refresh(INDEX_NAME); + assertHitCount(client(replica).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), expectedHitCount + 1); + fileCache1.clear(); + } + + public void testScrollWithConcurrentIndexAndSearch() throws Exception { + final String primary = internalCluster().startDataAndSearchNodes(1).get(0); + final String replica = internalCluster().startDataAndSearchNodes(1).get(0); + createIndex(INDEX_NAME); + ensureGreen(INDEX_NAME); + final List> pendingIndexResponses = new ArrayList<>(); + final List> pendingSearchResponse = new ArrayList<>(); + final int searchCount = randomIntBetween(1, 2); + final WriteRequest.RefreshPolicy refreshPolicy = randomFrom(WriteRequest.RefreshPolicy.values()); + + for (int i = 0; i < searchCount; i++) { + pendingIndexResponses.add( + client().prepareIndex(INDEX_NAME) + .setId(Integer.toString(i)) + .setRefreshPolicy(refreshPolicy) + .setSource("field", "value" + i) + .execute() + ); + flush(INDEX_NAME); + forceMerge(); + } + + final SearchResponse searchResponse = client().prepareSearch() + .setQuery(matchAllQuery()) + .setIndices(INDEX_NAME) + .setRequestCache(false) + .setScroll(TimeValue.timeValueDays(1)) + .setSize(10) + .get(); + + for (int i = searchCount; i < searchCount * 2; i++) { + pendingIndexResponses.add( + client().prepareIndex(INDEX_NAME) + .setId(Integer.toString(i)) + .setRefreshPolicy(refreshPolicy) + .setSource("field", "value" + i) + .execute() + ); + } + flush(INDEX_NAME); + forceMerge(); + client().prepareClearScroll().addScrollId(searchResponse.getScrollId()).get(); + + assertBusy(() -> { + client().admin().indices().prepareRefresh().execute().actionGet(); + assertTrue(pendingIndexResponses.stream().allMatch(ActionFuture::isDone)); + assertTrue(pendingSearchResponse.stream().allMatch(ActionFuture::isDone)); + }, 1, TimeUnit.MINUTES); + verifyStoreContent(); + waitForSearchableDocs(INDEX_NAME, 2 * searchCount, List.of(primary, replica)); + } + + public void testMultipleShards() throws Exception { + Settings indexSettings = Settings.builder() + .put(super.indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 3) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), false) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build(); + final String nodeA = internalCluster().startDataAndSearchNodes(1).get(0); + final String nodeB = internalCluster().startDataAndSearchNodes(1).get(0); + createIndex(INDEX_NAME, indexSettings); + ensureGreen(INDEX_NAME); + + final int initialDocCount = scaledRandomIntBetween(1, 10); + try ( + BackgroundIndexer indexer = new BackgroundIndexer( + INDEX_NAME, + "_doc", + client(), + -1, + RandomizedTest.scaledRandomIntBetween(2, 5), + false, + random() + ) + ) { + indexer.start(initialDocCount); + waitForDocs(initialDocCount, indexer); + refresh(INDEX_NAME); + waitForSearchableDocs(initialDocCount, nodeA, nodeB); + + final int additionalDocCount = scaledRandomIntBetween(0, 10); + final int expectedHitCount = initialDocCount + additionalDocCount; + indexer.start(additionalDocCount); + waitForDocs(expectedHitCount, indexer); + + flushAndRefresh(INDEX_NAME); + waitForSearchableDocs(expectedHitCount, nodeA, nodeB); + + ensureGreen(INDEX_NAME); + if (!warmIndexSegmentReplicationEnabled()) { + verifyStoreContent(); + } + } + } + + public void testReplicationAfterForceMerge() throws Exception { + performReplicationAfterForceMerge(false, SHARD_COUNT * (1 + REPLICA_COUNT)); + } + + public void testReplicationAfterForceMergeOnPrimaryShardsOnly() throws Exception { + performReplicationAfterForceMerge(true, SHARD_COUNT); + } + + private void performReplicationAfterForceMerge(boolean primaryOnly, int expectedSuccessfulShards) throws Exception { + final String nodeA = internalCluster().startDataAndSearchNodes(1).get(0); + final String nodeB = internalCluster().startDataAndSearchNodes(1).get(0); + createIndex(INDEX_NAME); + ensureGreen(INDEX_NAME); + + final int initialDocCount = scaledRandomIntBetween(0, 10); + final int additionalDocCount = scaledRandomIntBetween(0, 10); + final int expectedHitCount = initialDocCount + additionalDocCount; + try ( + BackgroundIndexer indexer = new BackgroundIndexer( + INDEX_NAME, + "_doc", + client(), + -1, + RandomizedTest.scaledRandomIntBetween(2, 5), + false, + random() + ) + ) { + indexer.start(initialDocCount); + waitForDocs(initialDocCount, indexer); + + flush(INDEX_NAME); + waitForSearchableDocs(initialDocCount, nodeA, nodeB); + + // Index a second set of docs so we can merge into one segment. + indexer.start(additionalDocCount); + waitForDocs(expectedHitCount, indexer); + waitForSearchableDocs(expectedHitCount, nodeA, nodeB); + + // Perform force merge only on the primary shards. + final ForceMergeResponse forceMergeResponse = client().admin() + .indices() + .prepareForceMerge(INDEX_NAME) + .setPrimaryOnly(primaryOnly) + .setMaxNumSegments(1) + .setFlush(false) + .get(); + assertThat(forceMergeResponse.getFailedShards(), is(0)); + assertThat(forceMergeResponse.getSuccessfulShards(), is(expectedSuccessfulShards)); + refresh(INDEX_NAME); + if (!warmIndexSegmentReplicationEnabled()) { + verifyStoreContent(); + } + } + } + + /** + * This test verifies that segment replication does not fail for closed indices + */ + public void testClosedIndices() { + List nodes = new ArrayList<>(); + // start 1st node so that it contains the primary + nodes.add(internalCluster().startDataAndSearchNodes(1).get(0)); + createIndex(INDEX_NAME, super.indexSettings()); + ensureYellowAndNoInitializingShards(INDEX_NAME); + // start 2nd node so that it contains the replica + nodes.add(internalCluster().startDataAndSearchNodes(1).get(0)); + ensureGreen(INDEX_NAME); + + logger.info("--> Close index"); + assertAcked(client().admin().indices().prepareClose(INDEX_NAME)); + + logger.info("--> waiting for allocation to have shards assigned"); + waitForRelocation(ClusterHealthStatus.GREEN); + } + + /** + * This test validates the primary node drop does not result in shard failure on replica. + * @throws Exception when issue is encountered + */ + public void testNodeDropWithOngoingReplication() throws Exception { + final String primaryNode = internalCluster().startDataAndSearchNodes(1).get(0); + createIndex( + INDEX_NAME, + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put("index.refresh_interval", -1) + .build() + ); + ensureYellow(INDEX_NAME); + final String replicaNode = internalCluster().startDataAndSearchNodes(1).get(0); + ensureGreen(INDEX_NAME); + ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState(); + // Get replica allocation id + final String replicaAllocationId = state.routingTable() + .index(INDEX_NAME) + .shardsWithState(ShardRoutingState.STARTED) + .stream() + .filter(routing -> routing.primary() == false) + .findFirst() + .get() + .allocationId() + .getId(); + DiscoveryNode primaryDiscovery = state.nodes().resolveNode(primaryNode); + + CountDownLatch blockFileCopy = new CountDownLatch(1); + MockTransportService primaryTransportService = ((MockTransportService) internalCluster().getInstance( + TransportService.class, + primaryNode + )); + primaryTransportService.addSendBehavior( + internalCluster().getInstance(TransportService.class, replicaNode), + (connection, requestId, action, request, options) -> { + if (action.equals(SegmentReplicationTargetService.Actions.FILE_CHUNK)) { + fail("File copy should not happen for warm index replica shards"); + } + connection.sendRequest(requestId, action, request, options); + } + ); + final int docCount = scaledRandomIntBetween(1, 10); + for (int i = 0; i < docCount; i++) { + client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get(); + } + // Refresh, this should trigger round of segment replication + refresh(INDEX_NAME); + blockFileCopy.countDown(); + FileCache fileCache = internalCluster().getInstance(Node.class, primaryNode).fileCache(); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNode)); + ensureYellow(INDEX_NAME); + assertBusy(() -> { assertDocCounts(docCount, replicaNode); }); + state = client().admin().cluster().prepareState().execute().actionGet().getState(); + // replica now promoted as primary should have same allocation id + final String currentAllocationID = state.routingTable() + .index(INDEX_NAME) + .shardsWithState(ShardRoutingState.STARTED) + .stream() + .filter(routing -> routing.primary()) + .findFirst() + .get() + .allocationId() + .getId(); + assertEquals(currentAllocationID, replicaAllocationId); + fileCache.prune(); + } + + public void testCancellation() throws Exception { + final String primaryNode = internalCluster().startDataAndSearchNodes(1).get(0); + createIndex(INDEX_NAME, Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).build()); + ensureYellow(INDEX_NAME); + + final String replicaNode = internalCluster().startDataAndSearchNodes(1).get(0); + + final SegmentReplicationSourceService segmentReplicationSourceService = internalCluster().getInstance( + SegmentReplicationSourceService.class, + primaryNode + ); + final IndexShard primaryShard = getIndexShard(primaryNode, INDEX_NAME); + + CountDownLatch latch = new CountDownLatch(1); + + MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance( + TransportService.class, + primaryNode + )); + mockTransportService.addSendBehavior( + internalCluster().getInstance(TransportService.class, replicaNode), + (connection, requestId, action, request, options) -> { + if (action.equals(SegmentReplicationTargetService.Actions.FILE_CHUNK)) { + fail("File copy should not happen for warm index replica shards"); + } + connection.sendRequest(requestId, action, request, options); + } + ); + + final int docCount = scaledRandomIntBetween(0, 10); + try ( + BackgroundIndexer indexer = new BackgroundIndexer( + INDEX_NAME, + "_doc", + client(), + -1, + RandomizedTest.scaledRandomIntBetween(2, 5), + false, + random() + ) + ) { + indexer.start(docCount); + waitForDocs(docCount, indexer); + + flush(INDEX_NAME); + } + segmentReplicationSourceService.beforeIndexShardClosed(primaryShard.shardId(), primaryShard, indexSettings()); + latch.countDown(); + assertDocCounts(docCount, primaryNode); + } + + @TestLogging(reason = "Getting trace logs from replication package", value = "org.opensearch.indices.replication:TRACE") + public void testDeleteOperations() throws Exception { + final String nodeA = internalCluster().startDataAndSearchNodes(1).get(0); + final String nodeB = internalCluster().startDataAndSearchNodes(1).get(0); + + createIndex(INDEX_NAME); + ensureGreen(INDEX_NAME); + final int initialDocCount = scaledRandomIntBetween(1, 5); + try ( + BackgroundIndexer indexer = new BackgroundIndexer( + INDEX_NAME, + "_doc", + client(), + -1, + RandomizedTest.scaledRandomIntBetween(2, 5), + false, + random() + ) + ) { + indexer.start(initialDocCount); + waitForDocs(initialDocCount, indexer); + refresh(INDEX_NAME); + waitForSearchableDocs(initialDocCount, nodeA, nodeB); + + final int additionalDocCount = scaledRandomIntBetween(0, 2); + final int expectedHitCount = initialDocCount + additionalDocCount; + indexer.start(additionalDocCount); + waitForDocs(expectedHitCount, indexer); + waitForSearchableDocs(expectedHitCount, nodeA, nodeB); + + ensureGreen(INDEX_NAME); + + Set ids = indexer.getIds(); + assertFalse(ids.isEmpty()); + String id = ids.toArray()[0].toString(); + client(nodeA).prepareDelete(INDEX_NAME, id).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + + refresh(INDEX_NAME); + waitForSearchableDocs(expectedHitCount - 1, nodeA, nodeB); + verifyStoreContent(); + } + } + + public void testUpdateOperations() throws Exception { + final String primary = internalCluster().startDataAndSearchNodes(1).get(0); + createIndex(INDEX_NAME); + ensureYellow(INDEX_NAME); + final String replica = internalCluster().startDataAndSearchNodes(1).get(0); + ensureGreen(INDEX_NAME); + + final int initialDocCount = scaledRandomIntBetween(1, 5); + try ( + BackgroundIndexer indexer = new BackgroundIndexer( + INDEX_NAME, + "_doc", + client(), + -1, + RandomizedTest.scaledRandomIntBetween(2, 5), + false, + random() + ) + ) { + indexer.start(initialDocCount); + waitForDocs(initialDocCount, indexer); + refresh(INDEX_NAME); + waitForSearchableDocs(initialDocCount, asList(primary, replica)); + + final int additionalDocCount = scaledRandomIntBetween(0, 5); + final int expectedHitCount = initialDocCount + additionalDocCount; + indexer.start(additionalDocCount); + waitForDocs(expectedHitCount, indexer); + waitForSearchableDocs(expectedHitCount, asList(primary, replica)); + + Set ids = indexer.getIds(); + String id = ids.toArray()[0].toString(); + UpdateResponse updateResponse = client(primary).prepareUpdate(INDEX_NAME, id) + .setDoc(Requests.INDEX_CONTENT_TYPE, "foo", "baz") + .setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL) + .get(); + assertFalse("request shouldn't have forced a refresh", updateResponse.forcedRefresh()); + assertEquals(2, updateResponse.getVersion()); + + refresh(INDEX_NAME); + verifyStoreContent(); + assertSearchHits(client(primary).prepareSearch(INDEX_NAME).setQuery(matchQuery("foo", "baz")).get(), id); + assertSearchHits(client(replica).prepareSearch(INDEX_NAME).setQuery(matchQuery("foo", "baz")).get(), id); + } + } + + public void testDropPrimaryDuringReplication() throws Exception { + final int replica_count = 6; + final Settings settings = Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, replica_count) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build(); + final String primaryNode = internalCluster().startDataAndSearchNodes(1).get(0); + createIndex(INDEX_NAME, settings); + final List dataNodes = internalCluster().startDataAndSearchNodes(6); + ensureGreen(INDEX_NAME); + + int initialDocCount = scaledRandomIntBetween(5, 10); + try ( + BackgroundIndexer indexer = new BackgroundIndexer( + INDEX_NAME, + "_doc", + client(), + -1, + RandomizedTest.scaledRandomIntBetween(2, 5), + false, + random() + ) + ) { + indexer.start(initialDocCount); + waitForDocs(initialDocCount, indexer); + refresh(INDEX_NAME); + // don't wait for replication to complete, stop the primary immediately. + FileCache fileCache = internalCluster().getInstance(Node.class, primaryNode).fileCache(); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNode)); + ensureYellow(INDEX_NAME); + + // start another replica. + dataNodes.add(internalCluster().startDataAndSearchNodes(1).get(0)); + ensureGreen(INDEX_NAME); + waitForSearchableDocs(initialDocCount, dataNodes); + + // index another doc and refresh - without this the new replica won't catch up. + String docId = String.valueOf(initialDocCount + 1); + client().prepareIndex(INDEX_NAME).setId(docId).setSource("foo", "bar").get(); + + flushAndRefresh(INDEX_NAME); + waitForSearchableDocs(initialDocCount + 1, dataNodes); + verifyStoreContent(); + fileCache.prune(); + } + } + + @TestLogging(reason = "Getting trace logs from replication package", value = "org.opensearch.indices.replication:TRACE") + public void testReplicaHasDiffFilesThanPrimary() throws Exception { + final String primaryNode = internalCluster().startDataAndSearchNodes(1).get(0); + createIndex(INDEX_NAME, Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).build()); + ensureYellow(INDEX_NAME); + final String replicaNode = internalCluster().startDataAndSearchNodes(1).get(0); + ensureGreen(INDEX_NAME); + + final IndexShard replicaShard = getIndexShard(replicaNode, INDEX_NAME); + IndexWriterConfig iwc = newIndexWriterConfig().setOpenMode(IndexWriterConfig.OpenMode.APPEND); + + // create a doc to index + int numDocs = 2 + random().nextInt(10); + + List docs = new ArrayList<>(); + for (int i = 0; i < numDocs; i++) { + Document doc = new Document(); + doc.add(new StringField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); + doc.add( + new TextField( + "body", + TestUtil.randomRealisticUnicodeString(random()), + random().nextBoolean() ? Field.Store.YES : Field.Store.NO + ) + ); + doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random())))); + docs.add(doc); + } + // create some segments on the replica before copy. + try (IndexWriter writer = new IndexWriter(replicaShard.store().directory(), iwc)) { + for (Document d : docs) { + writer.addDocument(d); + } + writer.flush(); + writer.commit(); + } + + final SegmentInfos segmentInfos = SegmentInfos.readLatestCommit(replicaShard.store().directory()); + replicaShard.finalizeReplication(segmentInfos); + ensureYellow(INDEX_NAME); + + final int docCount = scaledRandomIntBetween(10, 20); + for (int i = 0; i < docCount; i++) { + client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get(); + // Refresh, this should trigger round of segment replication + refresh(INDEX_NAME); + } + ensureGreen(INDEX_NAME); + waitForSearchableDocs(docCount, primaryNode, replicaNode); + // ToDo: verifyStoreContent() needs to be fixed for warm indices + if (!warmIndexSegmentReplicationEnabled()) { + verifyStoreContent(); + } + final IndexShard replicaAfterFailure = getIndexShard(replicaNode, INDEX_NAME); + assertNotEquals(replicaAfterFailure.routingEntry().allocationId().getId(), replicaShard.routingEntry().allocationId().getId()); + } + + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/17527") + public void testPressureServiceStats() throws Exception { + final String primaryNode = internalCluster().startDataAndSearchNodes(1).get(0); + createIndex(INDEX_NAME); + ensureYellow(INDEX_NAME); + final String replicaNode = internalCluster().startDataAndSearchNodes(1).get(0); + ensureGreen(INDEX_NAME); + + int initialDocCount = scaledRandomIntBetween(10, 20); + try ( + BackgroundIndexer indexer = new BackgroundIndexer( + INDEX_NAME, + "_doc", + client(), + -1, + RandomizedTest.scaledRandomIntBetween(2, 5), + false, + random() + ) + ) { + indexer.start(initialDocCount); + waitForDocs(initialDocCount, indexer); + refresh(INDEX_NAME); + + // get shard references. + final IndexShard primaryShard = getIndexShard(primaryNode, INDEX_NAME); + final IndexShard replicaShard = getIndexShard(replicaNode, INDEX_NAME); + logger.info("Replica aid {}", replicaShard.routingEntry().allocationId()); + logger.info("former primary aid {}", primaryShard.routingEntry().allocationId()); + + // fetch pressure stats from the Primary's Node. + SegmentReplicationPressureService pressureService = internalCluster().getInstance( + SegmentReplicationPressureService.class, + primaryNode + ); + + // Fetch pressure stats from the Replica's Node we will assert replica node returns nothing until it is promoted. + SegmentReplicationPressureService replicaNode_service = internalCluster().getInstance( + SegmentReplicationPressureService.class, + replicaNode + ); + + final Map shardStats = pressureService.nodeStats().getShardStats(); + assertEquals("We should have stats returned for the replication group", 1, shardStats.size()); + + SegmentReplicationPerGroupStats groupStats = shardStats.get(primaryShard.shardId()); + Set replicaStats = groupStats.getReplicaStats(); + assertAllocationIdsInReplicaShardStats(Set.of(replicaShard.routingEntry().allocationId().getId()), replicaStats); + + assertTrue(replicaNode_service.nodeStats().getShardStats().isEmpty()); + + // drop the primary, this won't hand off pressure stats between old/new primary. + FileCache fileCache = internalCluster().getInstance(Node.class, primaryNode).fileCache(); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNode)); + ensureYellowAndNoInitializingShards(INDEX_NAME); + + assertTrue("replica should be promoted as a primary", replicaShard.routingEntry().primary()); + assertEquals( + "We should have stats returned for the replication group", + 1, + replicaNode_service.nodeStats().getShardStats().size() + ); + // after the primary is dropped and replica is promoted we won't have a replica assigned yet, so stats per replica should return + // empty. + replicaStats = replicaNode_service.nodeStats().getShardStats().get(primaryShard.shardId()).getReplicaStats(); + assertTrue(replicaStats.isEmpty()); + + // start another replica. + String replicaNode_2 = internalCluster().startDataAndSearchNodes(1).get(0); + ensureGreen(INDEX_NAME); + final IndexShard secondReplicaShard = getIndexShard(replicaNode_2, INDEX_NAME); + final String second_replica_aid = secondReplicaShard.routingEntry().allocationId().getId(); + waitForSearchableDocs(initialDocCount, replicaNode_2); + + assertEquals( + "We should have stats returned for the replication group", + 1, + replicaNode_service.nodeStats().getShardStats().size() + ); + replicaStats = replicaNode_service.nodeStats().getShardStats().get(replicaShard.shardId()).getReplicaStats(); + assertAllocationIdsInReplicaShardStats(Set.of(second_replica_aid), replicaStats); + final SegmentReplicationShardStats replica_entry = replicaStats.stream().findFirst().get(); + assertEquals(replica_entry.getCheckpointsBehindCount(), 0); + + // test a checkpoint without any new segments + flush(INDEX_NAME); + assertBusy(() -> { + assertEquals(1, replicaNode_service.nodeStats().getShardStats().size()); + final Set shardStatsSet = replicaNode_service.nodeStats() + .getShardStats() + .get(replicaShard.shardId()) + .getReplicaStats(); + assertAllocationIdsInReplicaShardStats(Set.of(second_replica_aid), shardStatsSet); + final SegmentReplicationShardStats stats = shardStatsSet.stream().findFirst().get(); + assertEquals(0, stats.getCheckpointsBehindCount()); + }); + fileCache.prune(); + } + } + + private void assertAllocationIdsInReplicaShardStats(Set expected, Set replicaStats) { + assertEquals(expected, replicaStats.stream().map(SegmentReplicationShardStats::getAllocationId).collect(Collectors.toSet())); + } + + /** + * Tests a scroll query on the replica + * @throws Exception when issue is encountered + */ + public void testScrollCreatedOnReplica() throws Exception { + // create the cluster with one primary node containing primary shard and replica node containing replica shard + final String primary = internalCluster().startDataAndSearchNodes(1).get(0); + prepareCreate( + INDEX_NAME, + Settings.builder() + .put(indexSettings()) + // we want to control refreshes + .put("index.refresh_interval", -1) + ).get(); + ensureYellowAndNoInitializingShards(INDEX_NAME); + final String replica = internalCluster().startDataAndSearchNodes(1).get(0); + ensureGreen(INDEX_NAME); + + client().prepareIndex(INDEX_NAME) + .setId(String.valueOf(0)) + .setSource(jsonBuilder().startObject().field("field", 0).endObject()) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + refresh(INDEX_NAME); + + assertBusy( + () -> assertEquals( + getIndexShard(primary, INDEX_NAME).getLatestReplicationCheckpoint().getSegmentInfosVersion(), + getIndexShard(replica, INDEX_NAME).getLatestReplicationCheckpoint().getSegmentInfosVersion() + ) + ); + + // opens a scrolled query before a flush is called. + // this is for testing scroll segment consistency between refresh and flush + SearchResponse searchResponse = client(replica).prepareSearch() + .setQuery(matchAllQuery()) + .setIndices(INDEX_NAME) + .setRequestCache(false) + .setPreference("_only_local") + .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) + .addSort("field", SortOrder.ASC) + .setSize(10) + .setScroll(TimeValue.timeValueDays(1)) + .get(); + + final IndexShard replicaShard = getIndexShard(replica, INDEX_NAME); + SegmentInfos latestSegmentInfos = getLatestSegmentInfos(replicaShard); + final Set snapshottedSegments = new HashSet<>(latestSegmentInfos.files(false)); + logger.info("Segments {}", snapshottedSegments); + + // index more docs and force merge down to 1 segment + for (int i = 1; i < 5; i++) { + client().prepareIndex(INDEX_NAME) + .setId(String.valueOf(i)) + .setSource(jsonBuilder().startObject().field("field", i).endObject()) + .get(); + refresh(INDEX_NAME); + } + // create new on-disk segments and copy them out. + assertBusy(() -> { + assertEquals( + getIndexShard(primary, INDEX_NAME).getLatestReplicationCheckpoint().getSegmentInfosVersion(), + getIndexShard(replica, INDEX_NAME).getLatestReplicationCheckpoint().getSegmentInfosVersion() + ); + }); + + // force merge and flush. + client().admin().indices().prepareForceMerge(INDEX_NAME).setMaxNumSegments(1).setFlush(true).get(); + // wait for replication to complete + assertBusy(() -> { + assertEquals( + getIndexShard(primary, INDEX_NAME).getLatestReplicationCheckpoint().getSegmentInfosVersion(), + getIndexShard(replica, INDEX_NAME).getLatestReplicationCheckpoint().getSegmentInfosVersion() + ); + }); + logger.info("Local segments after force merge and commit {}", getLatestSegmentInfos(replicaShard).files(false)); + + // Test stats + logger.info("--> Collect all scroll query hits"); + long scrollHits = 0; + do { + scrollHits += searchResponse.getHits().getHits().length; + searchResponse = client(replica).prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueDays(1)).get(); + assertAllSuccessful(searchResponse); + } while (searchResponse.getHits().getHits().length > 0); + assertEquals(1, scrollHits); + + client(replica).prepareClearScroll().addScrollId(searchResponse.getScrollId()).get(); + final Set filesAfterClearScroll = Arrays.stream(replicaShard.store().directory().listAll()).collect(Collectors.toSet()); + // there should be no active readers, snapshots, or on-disk commits containing the snapshotted files, check that they have been + // deleted. + Set latestCommitSegments = new HashSet<>(replicaShard.store().readLastCommittedSegmentsInfo().files(false)); + assertEquals( + "Snapshotted files are no longer part of the latest commit", + Collections.emptySet(), + Sets.intersection(latestCommitSegments, snapshottedSegments) + ); + assertEquals( + "All snapshotted files should be deleted", + Collections.emptySet(), + Sets.intersection(filesAfterClearScroll, snapshottedSegments) + ); + } + + /** + * This tests that if a primary receives docs while a replica is performing round of segrep during recovery + * the replica will catch up to latest checkpoint once recovery completes without requiring an additional primary refresh/flush. + */ + public void testPrimaryReceivesDocsDuringReplicaRecovery() throws Exception { + final List nodes = new ArrayList<>(); + final String primaryNode = internalCluster().startDataAndSearchNodes(1).get(0); + nodes.add(primaryNode); + final Settings settings = Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build(); + createIndex(INDEX_NAME, settings); + ensureGreen(INDEX_NAME); + // start a replica node, initially will be empty with no shard assignment. + final String replicaNode = internalCluster().startDataAndSearchNodes(1).get(0); + nodes.add(replicaNode); + + // index a doc. + client().prepareIndex(INDEX_NAME).setId("1").setSource("foo", randomInt()).get(); + refresh(INDEX_NAME); + + CountDownLatch latch = new CountDownLatch(1); + // block replication + try (final Releasable ignored = blockReplication(List.of(replicaNode), latch)) { + // update to add replica, initiating recovery, this will get stuck at last step + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)) + ); + ensureYellow(INDEX_NAME); + // index another doc while blocked, this would not get replicated to replica. + client().prepareIndex(INDEX_NAME).setId("2").setSource("foo2", randomInt()).get(); + refresh(INDEX_NAME); + } + ensureGreen(INDEX_NAME); + waitForSearchableDocs(2, nodes); + } + + public void testIndexWhileRecoveringReplica() throws Exception { + final String primaryNode = internalCluster().startDataAndSearchNodes(1).get(0); + assertAcked( + prepareCreate(INDEX_NAME).setMapping( + jsonBuilder().startObject() + .startObject("_routing") + .field("required", true) + .endObject() + .startObject("properties") + .startObject("online") + .field("type", "boolean") + .endObject() + .startObject("ts") + .field("type", "date") + .field("ignore_malformed", false) + .field("format", "epoch_millis") + .endObject() + .startObject("bs") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + ) + ); + ensureYellow(INDEX_NAME); + final String replicaNode = internalCluster().startDataAndSearchNodes(1).get(0); + + client().prepareIndex(INDEX_NAME) + .setId("1") + .setRouting("Y") + .setSource("online", false, "bs", "Y", "ts", System.currentTimeMillis() - 100, "type", "s") + .get(); + client().prepareIndex(INDEX_NAME) + .setId("2") + .setRouting("X") + .setSource("online", true, "bs", "X", "ts", System.currentTimeMillis() - 10000000, "type", "s") + .get(); + client().prepareIndex(INDEX_NAME) + .setId("3") + .setRouting(randomAlphaOfLength(2)) + .setSource("online", false, "ts", System.currentTimeMillis() - 100, "type", "bs") + .get(); + client().prepareIndex(INDEX_NAME) + .setId("4") + .setRouting(randomAlphaOfLength(2)) + .setSource("online", true, "ts", System.currentTimeMillis() - 123123, "type", "bs") + .get(); + refresh(); + ensureGreen(INDEX_NAME); + waitForSearchableDocs(4, primaryNode, replicaNode); + + SearchResponse response = client().prepareSearch(INDEX_NAME) + .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) + .setQuery( + boolQuery().must(termQuery("online", true)) + .must( + boolQuery().should( + boolQuery().must(rangeQuery("ts").lt(System.currentTimeMillis() - (15 * 1000))).must(termQuery("type", "bs")) + ) + .should( + boolQuery().must(rangeQuery("ts").lt(System.currentTimeMillis() - (15 * 1000))).must(termQuery("type", "s")) + ) + ) + ) + .setVersion(true) + .setFrom(0) + .setSize(100) + .setExplain(true) + .get(); + assertNoFailures(response); + } + + /** + * Tests whether segment replication supports realtime get requests and reads and parses source from the translog to serve strong reads. + */ + public void testRealtimeGetRequestsSuccessful() { + final String primary = internalCluster().startDataAndSearchNodes(1).get(0); + // refresh interval disabled to ensure refresh rate of index (when data is ready for search) doesn't affect realtime get + assertAcked( + prepareCreate(INDEX_NAME).setSettings(Settings.builder().put("index.refresh_interval", -1).put(indexSettings())) + .addAlias(new Alias("alias")) + ); + final String replica = internalCluster().startDataAndSearchNodes(1).get(0); + ensureGreen(INDEX_NAME); + + final String id = routingKeyForShard(INDEX_NAME, 0); + + GetResponse response = client(replica).prepareGet(indexOrAlias(), "1").get(); + assertFalse(response.isExists()); + + // index doc 1 + client().prepareIndex(indexOrAlias()).setId("1").setSource("foo", "bar").get(); + + // non realtime get 1 + response = client().prepareGet(indexOrAlias(), "1").setRealtime(false).get(); + assertFalse(response.isExists()); + + // realtime get 1 + response = client(replica).prepareGet(indexOrAlias(), "1").get(); + assertTrue(response.isExists()); + assertThat(response.getIndex(), equalTo(INDEX_NAME)); + assertThat(response.getSourceAsMap().get("foo").toString(), equalTo("bar")); + + // index doc 2 + client().prepareIndex(indexOrAlias()).setId("2").setSource("foo2", "bar2").setRouting(id).get(); + + // realtime get 2 (with routing) + response = client(replica).prepareGet(indexOrAlias(), "2").setRouting(id).get(); + assertTrue(response.isExists()); + assertThat(response.getIndex(), equalTo(INDEX_NAME)); + assertThat(response.getSourceAsMap().get("foo2").toString(), equalTo("bar2")); + } + + public void testRealtimeGetRequestsUnsuccessful() { + final String primary = internalCluster().startDataAndSearchNodes(1).get(0); + assertAcked( + prepareCreate(INDEX_NAME).setSettings( + Settings.builder().put("index.refresh_interval", -1).put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) + ).addAlias(new Alias("alias")) + ); + final String replica = internalCluster().startDataAndSearchNodes(1).get(0); + ensureGreen(INDEX_NAME); + + final String id = routingKeyForShard(INDEX_NAME, 0); + final String routingOtherShard = routingKeyForShard(INDEX_NAME, 1); + + // index doc 1 + client().prepareIndex(indexOrAlias()).setId("1").setSource("foo", "bar").setRouting(id).get(); + + // non realtime get 1 + GetResponse response = client().prepareGet(indexOrAlias(), "1").setRealtime(false).get(); + assertFalse(response.isExists()); + + // realtime get 1 (preference = _replica) + response = client(replica).prepareGet(indexOrAlias(), "1").setPreference(Preference.REPLICA.type()).get(); + assertFalse(response.isExists()); + assertThat(response.getIndex(), equalTo(INDEX_NAME)); + + // realtime get 1 (with routing set) + response = client(replica).prepareGet(INDEX_NAME, "1").setRouting(routingOtherShard).get(); + assertFalse(response.isExists()); + assertThat(response.getIndex(), equalTo(INDEX_NAME)); + } + + /** + * Tests whether segment replication supports realtime MultiGet requests and reads and parses source from the translog to serve strong reads. + */ + public void testRealtimeMultiGetRequestsSuccessful() { + final String primary = internalCluster().startDataAndSearchNodes(1).get(0); + // refresh interval disabled to ensure refresh rate of index (when data is ready for search) doesn't affect realtime multi get + assertAcked( + prepareCreate(INDEX_NAME).setSettings( + Settings.builder().put("index.refresh_interval", -1).put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) + ).addAlias(new Alias("alias")) + ); + final String replica = internalCluster().startDataAndSearchNodes(1).get(0); + ensureGreen(INDEX_NAME); + + final String id = routingKeyForShard(INDEX_NAME, 0); + + // index doc 1 + client().prepareIndex(INDEX_NAME).setId("1").setSource("foo", "bar").get(); + + // index doc 2 + client().prepareIndex(INDEX_NAME).setId("2").setSource("foo2", "bar2").setRouting(id).get(); + + // multi get non realtime 1 + MultiGetResponse mgetResponse = client().prepareMultiGet() + .add(new MultiGetRequest.Item(INDEX_NAME, "1")) + .add(new MultiGetRequest.Item("nonExistingIndex", "1")) + .setRealtime(false) + .get(); + assertThat(mgetResponse.getResponses().length, is(2)); + + assertThat(mgetResponse.getResponses()[0].getIndex(), is(INDEX_NAME)); + assertFalse(mgetResponse.getResponses()[0].isFailed()); + assertFalse(mgetResponse.getResponses()[0].getResponse().isExists()); + + // multi get realtime 1 + mgetResponse = client(replica).prepareMultiGet() + .add(new MultiGetRequest.Item(INDEX_NAME, "1")) + .add(new MultiGetRequest.Item(INDEX_NAME, "2").routing(id)) + .add(new MultiGetRequest.Item("nonExistingIndex", "1")) + .get(); + + assertThat(mgetResponse.getResponses().length, is(3)); + assertThat(mgetResponse.getResponses()[0].getIndex(), is(INDEX_NAME)); + assertFalse(mgetResponse.getResponses()[0].isFailed()); + assertThat(mgetResponse.getResponses()[0].getResponse().getSourceAsMap().get("foo").toString(), equalTo("bar")); + + assertThat(mgetResponse.getResponses()[1].getIndex(), is(INDEX_NAME)); + assertFalse(mgetResponse.getResponses()[1].isFailed()); + assertThat(mgetResponse.getResponses()[1].getResponse().getSourceAsMap().get("foo2").toString(), equalTo("bar2")); + + assertThat(mgetResponse.getResponses()[2].getIndex(), is("nonExistingIndex")); + assertTrue(mgetResponse.getResponses()[2].isFailed()); + assertThat(mgetResponse.getResponses()[2].getFailure().getMessage(), is("no such index [nonExistingIndex]")); + } + + public void testRealtimeMultiGetRequestsUnsuccessful() { + final String primary = internalCluster().startDataAndSearchNodes(1).get(0); + assertAcked( + prepareCreate(INDEX_NAME).setSettings( + Settings.builder().put("index.refresh_interval", -1).put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) + ).addAlias(new Alias("alias")) + ); + final String replica = internalCluster().startDataAndSearchNodes(1).get(0); + ensureGreen(INDEX_NAME); + + final String id = routingKeyForShard(INDEX_NAME, 0); + final String routingOtherShard = routingKeyForShard(INDEX_NAME, 1); + + // index doc 1 + client().prepareIndex(indexOrAlias()).setId("1").setSource("foo", "bar").setRouting(id).get(); + + // realtime multi get 1 (preference = _replica) + MultiGetResponse mgetResponse = client(replica).prepareMultiGet() + .add(new MultiGetRequest.Item(INDEX_NAME, "1")) + .setPreference(Preference.REPLICA.type()) + .add(new MultiGetRequest.Item("nonExistingIndex", "1")) + .get(); + assertThat(mgetResponse.getResponses().length, is(2)); + assertThat(mgetResponse.getResponses()[0].getIndex(), is(INDEX_NAME)); + assertFalse(mgetResponse.getResponses()[0].getResponse().isExists()); + + assertThat(mgetResponse.getResponses()[1].getIndex(), is("nonExistingIndex")); + assertTrue(mgetResponse.getResponses()[1].isFailed()); + + // realtime multi get 1 (routing set) + mgetResponse = client(replica).prepareMultiGet() + .add(new MultiGetRequest.Item(INDEX_NAME, "1").routing(routingOtherShard)) + .add(new MultiGetRequest.Item("nonExistingIndex", "1")) + .get(); + assertThat(mgetResponse.getResponses().length, is(2)); + assertThat(mgetResponse.getResponses()[0].getIndex(), is(INDEX_NAME)); + // expecting failure since we explicitly route request to a shard on which it doesn't exist + assertFalse(mgetResponse.getResponses()[0].getResponse().isExists()); + assertThat(mgetResponse.getResponses()[1].getIndex(), is("nonExistingIndex")); + assertTrue(mgetResponse.getResponses()[1].isFailed()); + + } + + /** + * Tests whether segment replication supports realtime termvector requests and reads and parses source from the translog to serve strong reads. + */ + public void testRealtimeTermVectorRequestsSuccessful() throws IOException { + final String primary = internalCluster().startDataAndSearchNodes(1).get(0); + XContentBuilder mapping = jsonBuilder().startObject() + .startObject("properties") + .startObject("field") + .field("type", "text") + .field("term_vector", "with_positions_offsets_payloads") + .field("analyzer", "tv_test") + .endObject() + .endObject() + .endObject(); + // refresh interval disabled to ensure refresh rate of index (when data is ready for search) doesn't affect realtime termvectors + assertAcked( + prepareCreate(INDEX_NAME).setMapping(mapping) + .addAlias(new Alias("alias")) + .setSettings( + Settings.builder() + .put(indexSettings()) + .put("index.analysis.analyzer.tv_test.tokenizer", "standard") + .put("index.refresh_interval", -1) + .putList("index.analysis.analyzer.tv_test.filter", "lowercase") + ) + ); + final String replica = internalCluster().startDataAndSearchNodes(1).get(0); + ensureGreen(INDEX_NAME); + final String id = routingKeyForShard(INDEX_NAME, 0); + + TermVectorsResponse response = client(replica).prepareTermVectors(indexOrAlias(), "1").get(); + assertFalse(response.isExists()); + + // index doc 1 + client().prepareIndex(INDEX_NAME) + .setId(Integer.toString(1)) + .setSource(jsonBuilder().startObject().field("field", "the quick brown fox jumps over the lazy dog").endObject()) + .execute() + .actionGet(); + + // non realtime termvectors 1 + response = client().prepareTermVectors(indexOrAlias(), Integer.toString(1)).setRealtime(false).get(); + assertFalse(response.isExists()); + + // realtime termvectors 1 + TermVectorsRequestBuilder resp = client().prepareTermVectors(indexOrAlias(), Integer.toString(1)) + .setPayloads(true) + .setOffsets(true) + .setPositions(true) + .setRealtime(true) + .setSelectedFields(); + response = resp.execute().actionGet(); + assertThat(response.getIndex(), equalTo(INDEX_NAME)); + assertThat("doc id: " + 1 + " doesn't exists but should", response.isExists(), equalTo(true)); + Fields fields = response.getFields(); + assertThat(fields.size(), equalTo(1)); + + // index doc 2 with routing + client().prepareIndex(INDEX_NAME) + .setId(Integer.toString(2)) + .setRouting(id) + .setSource(jsonBuilder().startObject().field("field", "the quick brown fox jumps over the lazy dog").endObject()) + .execute() + .actionGet(); + + // realtime termvectors 2 with routing + resp = client().prepareTermVectors(indexOrAlias(), Integer.toString(2)) + .setPayloads(true) + .setOffsets(true) + .setPositions(true) + .setRouting(id) + .setSelectedFields(); + response = resp.execute().actionGet(); + assertThat(response.getIndex(), equalTo(INDEX_NAME)); + assertThat("doc id: " + 1 + " doesn't exists but should", response.isExists(), equalTo(true)); + fields = response.getFields(); + assertThat(fields.size(), equalTo(1)); + + } + + public void testRealtimeTermVectorRequestsUnSuccessful() throws IOException { + final String primary = internalCluster().startDataAndSearchNodes(1).get(0); + XContentBuilder mapping = jsonBuilder().startObject() + .startObject("properties") + .startObject("field") + .field("type", "text") + .field("term_vector", "with_positions_offsets_payloads") + .field("analyzer", "tv_test") + .endObject() + .endObject() + .endObject(); + // refresh interval disabled to ensure refresh rate of index (when data is ready for search) doesn't affect realtime termvectors + assertAcked( + prepareCreate(INDEX_NAME).setMapping(mapping) + .addAlias(new Alias("alias")) + .setSettings( + Settings.builder() + .put(indexSettings()) + .put("index.analysis.analyzer.tv_test.tokenizer", "standard") + .put("index.refresh_interval", -1) + .putList("index.analysis.analyzer.tv_test.filter", "lowercase") + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) + ) + ); + final String replica = internalCluster().startDataAndSearchNodes(1).get(0); + ensureGreen(INDEX_NAME); + final String id = routingKeyForShard(INDEX_NAME, 0); + final String routingOtherShard = routingKeyForShard(INDEX_NAME, 1); + + // index doc 1 + client().prepareIndex(INDEX_NAME) + .setId(Integer.toString(1)) + .setSource(jsonBuilder().startObject().field("field", "the quick brown fox jumps over the lazy dog").endObject()) + .setRouting(id) + .execute() + .actionGet(); + + // non realtime termvectors 1 + TermVectorsResponse response = client().prepareTermVectors(indexOrAlias(), Integer.toString(1)).setRealtime(false).get(); + assertFalse(response.isExists()); + + // realtime termvectors (preference = _replica) + TermVectorsRequestBuilder resp = client(replica).prepareTermVectors(indexOrAlias(), Integer.toString(1)) + .setPayloads(true) + .setOffsets(true) + .setPositions(true) + .setPreference(Preference.REPLICA.type()) + .setRealtime(true) + .setSelectedFields(); + response = resp.execute().actionGet(); + + assertFalse(response.isExists()); + assertThat(response.getIndex(), equalTo(INDEX_NAME)); + + // realtime termvectors (with routing set) + resp = client(replica).prepareTermVectors(indexOrAlias(), Integer.toString(1)) + .setPayloads(true) + .setOffsets(true) + .setPositions(true) + .setRouting(routingOtherShard) + .setSelectedFields(); + response = resp.execute().actionGet(); + + assertFalse(response.isExists()); + assertThat(response.getIndex(), equalTo(INDEX_NAME)); + + } + + public void testReplicaAlreadyAtCheckpoint() throws Exception { + final List nodes = new ArrayList<>(); + final String primaryNode = internalCluster().startDataAndSearchNodes(1).get(0); + nodes.add(primaryNode); + final Settings settings = Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build(); + createIndex(INDEX_NAME, settings); + ensureGreen(INDEX_NAME); + // start a replica node, initially will be empty with no shard assignment. + final String replicaNode = internalCluster().startDataAndSearchNodes(1).get(0); + nodes.add(replicaNode); + final String replicaNode2 = internalCluster().startDataAndSearchNodes(1).get(0); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2)) + ); + ensureGreen(INDEX_NAME); + + // index a doc. + client().prepareIndex(INDEX_NAME).setId("1").setSource("foo", randomInt()).get(); + refresh(INDEX_NAME); + waitForSearchableDocs(1, primaryNode, replicaNode, replicaNode2); + + FileCache fileCache = internalCluster().getInstance(Node.class, primaryNode).fileCache(); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNode)); + ensureYellowAndNoInitializingShards(INDEX_NAME); + IndexShard replica_1 = getIndexShard(replicaNode, INDEX_NAME); + IndexShard replica_2 = getIndexShard(replicaNode2, INDEX_NAME); + // wait until a replica is promoted & finishes engine flip, we don't care which one + AtomicReference primary = new AtomicReference<>(); + assertBusy(() -> { + assertTrue("replica should be promoted as a primary", replica_1.routingEntry().primary() || replica_2.routingEntry().primary()); + primary.set(replica_1.routingEntry().primary() ? replica_1 : replica_2); + }); + + FlushRequest request = new FlushRequest(INDEX_NAME); + request.force(true); + primary.get().flush(request); + + assertBusy(() -> { + assertEquals( + replica_1.getLatestReplicationCheckpoint().getSegmentInfosVersion(), + replica_2.getLatestReplicationCheckpoint().getSegmentInfosVersion() + ); + }); + + assertBusy(() -> { + ClusterStatsResponse clusterStatsResponse = client().admin().cluster().prepareClusterStats().get(); + ReplicationStats replicationStats = clusterStatsResponse.getIndicesStats().getSegments().getReplicationStats(); + assertEquals(0L, replicationStats.maxBytesBehind); + assertEquals(0L, replicationStats.maxReplicationLag); + assertEquals(0L, replicationStats.totalBytesBehind); + }); + fileCache.prune(); + } + +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java index 23864c35ad154..8e8f008158b9c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java @@ -16,8 +16,6 @@ import java.nio.file.Path; -import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; - /** * This class runs Segment Replication Integ test suite with remote store enabled. */ diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java index e673c1409a869..6bcbf303adb94 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java @@ -242,7 +242,8 @@ private IndexMetadata.Builder updateInSyncAllocations( allocationId = RecoverySource.ExistingStoreRecoverySource.FORCED_ALLOCATION_ID; } else { assert (recoverySource instanceof RecoverySource.SnapshotRecoverySource - || recoverySource instanceof RecoverySource.RemoteStoreRecoverySource) : recoverySource; + || recoverySource instanceof RecoverySource.RemoteStoreRecoverySource + || recoverySource instanceof RecoverySource.ExistingStoreRecoverySource) : recoverySource; allocationId = updates.initializedPrimary.allocationId().getId(); } // forcing a stale primary resets the in-sync allocations to the singleton set with the stale id diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/RemoteShardsBalancer.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/RemoteShardsBalancer.java index 7999faece52ca..e0543b396728e 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/RemoteShardsBalancer.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/RemoteShardsBalancer.java @@ -33,6 +33,8 @@ import java.util.Queue; import java.util.Set; +import static org.opensearch.action.admin.indices.tiering.TieringUtils.isPartialIndex; + /** * A {@link RemoteShardsBalancer} used by the {@link BalancedShardsAllocator} to perform allocation operations * for remote shards within the cluster. @@ -345,7 +347,8 @@ private void unassignIgnoredRemoteShards(RoutingAllocation routingAllocation) { // Remote shards do not have an existing store to recover from and can be recovered from an empty source // to re-fetch any shard blocks from the repository. if (shard.primary()) { - if (RecoverySource.Type.SNAPSHOT.equals(shard.recoverySource().getType()) == false) { + if (RecoverySource.Type.SNAPSHOT.equals(shard.recoverySource().getType()) == false + && isPartialIndex(allocation.metadata().getIndexSafe(shard.index())) == false) { unassignedShard = shard.updateUnassigned(shard.unassignedInfo(), RecoverySource.EmptyStoreRecoverySource.INSTANCE); } } diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java index d759423ce5a55..7f3010ff0937a 100644 --- a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java @@ -368,6 +368,11 @@ public boolean shouldPeriodicallyFlush() { @Override public void flush(boolean force, boolean waitIfOngoing) throws EngineException { ensureOpen(); + // Skip flushing for indices with partial locality (warm indices) + // For these indices, we don't need to commit as we will sync from the remote store on re-open + if (engineConfig.getIndexSettings().isStoreLocalityPartial()) { + return; + } // readLock is held here to wait/block any concurrent close that acquires the writeLock. try (final ReleasableLock lock = readLock.acquire()) { ensureOpen(); @@ -442,7 +447,9 @@ protected final void closeNoLock(String reason, CountDownLatch closedLatch) { latestSegmentInfos.changed(); } try { - commitSegmentInfos(latestSegmentInfos); + if (engineConfig.getIndexSettings().isStoreLocalityPartial() == false) { + commitSegmentInfos(latestSegmentInfos); + } } catch (IOException e) { // mark the store corrupted unless we are closing as result of engine failure. // in this case Engine#failShard will handle store corruption. diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index bd47a664b729d..173268997895e 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -5142,7 +5142,9 @@ public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal, final Runn } else { storeDirectory = store.directory(); } - copySegmentFiles(storeDirectory, remoteDirectory, null, uploadedSegments, overrideLocal, onFileSync); + if (indexSettings.isStoreLocalityPartial() == false) { + copySegmentFiles(storeDirectory, remoteDirectory, null, uploadedSegments, overrideLocal, onFileSync); + } if (remoteSegmentMetadata != null) { final SegmentInfos infosSnapshot = store.buildSegmentInfos( @@ -5158,7 +5160,7 @@ public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal, final Runn } } assert Arrays.stream(store.directory().listAll()).filter(f -> f.startsWith(IndexFileNames.SEGMENTS)).findAny().isEmpty() - : "There should not be any segments file in the dir"; + || indexSettings.isStoreLocalityPartial() : "There should not be any segments file in the dir"; store.commitSegmentInfos(infosSnapshot, processedLocalCheckpoint, processedLocalCheckpoint); } syncSegmentSuccess = true; diff --git a/server/src/main/java/org/opensearch/index/store/CompositeDirectory.java b/server/src/main/java/org/opensearch/index/store/CompositeDirectory.java index 1c9a8b98fb86e..4733fbcdb0494 100644 --- a/server/src/main/java/org/opensearch/index/store/CompositeDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/CompositeDirectory.java @@ -34,8 +34,12 @@ import java.util.Arrays; import java.util.Collection; import java.util.HashSet; +import java.util.List; import java.util.Set; import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.apache.lucene.index.IndexFileNames.SEGMENTS; /** * Composite Directory will contain both local and remote directory @@ -74,12 +78,37 @@ public CompositeDirectory(Directory localDirectory, Directory remoteDirectory, F ); } + /** + * Returns names of all files stored in local directory + * @throws IOException in case of I/O error + */ + private String[] listLocalFiles() throws IOException { + ensureOpen(); + logger.trace("Composite Directory[{}]: listLocalOnly() called", this::toString); + return localDirectory.listAll(); + } + + /** + * Returns a list of names of all block files stored in the local directory for a given file, + * including the original file itself if present. + * + * @param fileName The name of the file to search for, along with its associated block files. + * @return A list of file names, including the original file (if present) and all its block files. + * @throws IOException in case of I/O error while listing files. + */ + private List listBlockFiles(String fileName) throws IOException { + return Stream.of(listLocalFiles()) + .filter(file -> file.equals(fileName) || file.startsWith(fileName + FileTypeUtils.BLOCK_FILE_IDENTIFIER)) + .collect(Collectors.toList()); + } + /** * Returns names of all files stored in this directory in sorted order * Does not include locally stored block files (having _block_ in their names) and files pending deletion * * @throws IOException in case of I/O error */ + // TODO: https://github.com/opensearch-project/OpenSearch/issues/17527 @Override public String[] listAll() throws IOException { ensureOpen(); @@ -105,6 +134,7 @@ public String[] listAll() throws IOException { * Currently deleting only from local directory as files from remote should not be deleted as that is taken care by garbage collection logic of remote directory * @param name the name of an existing file. * @throws IOException in case of I/O error + * @throws NoSuchFileException when file does not exist in the directory */ @Override public void deleteFile(String name) throws IOException { @@ -115,7 +145,21 @@ public void deleteFile(String name) throws IOException { } else if (Arrays.asList(listAll()).contains(name) == false) { throw new NoSuchFileException("File " + name + " not found in directory"); } else { - fileCache.remove(getFilePath(name)); + List blockFiles = listBlockFiles(name); + if (blockFiles.isEmpty()) { + // Remove this condition when this issue is addressed. + // TODO: https://github.com/opensearch-project/OpenSearch/issues/17526 + logger.debug("The file [{}] or its block files do not exist in local directory", name); + } else { + for (String blockFile : blockFiles) { + if (fileCache.get(getFilePath(blockFile)) == null) { + logger.debug("The file [{}] exists in local but not part of FileCache, deleting it from local", blockFile); + localDirectory.deleteFile(blockFile); + } else { + fileCache.remove(getFilePath(blockFile)); + } + } + } } } @@ -254,6 +298,15 @@ public IndexInput openInput(String name, IOContext context) throws IOException { public void close() throws IOException { ensureOpen(); logger.trace("Composite Directory[{}]: close() called", this::toString); + String[] localFiles = listLocalFiles(); + for (String localFile : localFiles) { + // Delete segments_N file with ref count 1 created during index creation on replica shards + // TODO: https://github.com/opensearch-project/OpenSearch/issues/17534 + if (localFile.startsWith(SEGMENTS)) { + fileCache.remove(getFilePath(localFile)); + } + } + fileCache.prune(); localDirectory.close(); } diff --git a/server/src/main/java/org/opensearch/index/store/remote/utils/FileTypeUtils.java b/server/src/main/java/org/opensearch/index/store/remote/utils/FileTypeUtils.java index e78480bd500ee..ca0e6652f5ea4 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/utils/FileTypeUtils.java +++ b/server/src/main/java/org/opensearch/index/store/remote/utils/FileTypeUtils.java @@ -18,12 +18,14 @@ @ExperimentalApi public class FileTypeUtils { + public static String BLOCK_FILE_IDENTIFIER = "_block_"; + public static boolean isTempFile(String name) { return name.endsWith(".tmp"); } public static boolean isBlockFile(String name) { - return name.contains("_block_"); + return name.contains(BLOCK_FILE_IDENTIFIER); } public static boolean isExtraFSFile(String name) { diff --git a/server/src/main/java/org/opensearch/index/store/remote/utils/cache/LRUCache.java b/server/src/main/java/org/opensearch/index/store/remote/utils/cache/LRUCache.java index 7f7d42e8fbce8..6e30a39e27bb1 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/utils/cache/LRUCache.java +++ b/server/src/main/java/org/opensearch/index/store/remote/utils/cache/LRUCache.java @@ -310,13 +310,20 @@ public CacheStats stats() { public void logCurrentState() { lock.lock(); try { - String allFiles = "\n"; + final StringBuilder allFiles = new StringBuilder("\n"); for (Map.Entry> entry : data.entrySet()) { String path = entry.getKey().toString(); String file = path.substring(path.lastIndexOf('/')); - allFiles += file + " [RefCount: " + entry.getValue().refCount + " , Weight: " + entry.getValue().weight + " ]\n"; + allFiles.append(file) + .append(" [RefCount: ") + .append(entry.getValue().refCount) + .append(" , Weight: ") + .append(entry.getValue().weight) + .append(" ]\n"); + } + if (allFiles.length() > 1) { + logger.trace(() -> "Cache entries : " + allFiles); } - logger.trace("Cache entries : " + allFiles); } finally { lock.unlock(); } diff --git a/server/src/main/java/org/opensearch/index/store/remote/utils/cache/SegmentedCache.java b/server/src/main/java/org/opensearch/index/store/remote/utils/cache/SegmentedCache.java index 9ff6ddb1fb667..ecf9bb2ead0d2 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/utils/cache/SegmentedCache.java +++ b/server/src/main/java/org/opensearch/index/store/remote/utils/cache/SegmentedCache.java @@ -190,8 +190,11 @@ public CacheStats stats() { public void logCurrentState() { int i = 0; for (RefCountedCache cache : table) { - logger.trace("SegmentedCache " + i); - ((LRUCache) cache).logCurrentState(); + if (cache.size() > 0) { + final int segmentIndex = i; + logger.trace(() -> "SegmentedCache " + segmentIndex); + ((LRUCache) cache).logCurrentState(); + } i++; } } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java index 64bd73ebb4611..6922ade22b714 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java @@ -36,6 +36,7 @@ import java.io.IOException; import java.io.UncheckedIOException; +import java.util.Collections; import java.util.List; import java.util.Locale; import java.util.Set; @@ -202,6 +203,12 @@ public void startReplication(ActionListener listener, BiConsumer getFiles(CheckpointInfoResponse checkpointInfo) throws IOException { cancellableThreads.checkForCancel(); state.setStage(SegmentReplicationState.Stage.FILE_DIFF); + + // Return an empty list for warm indices, In this case, replica shards don't require downloading files from remote storage + // as replicas will sync all files from remote in case of failure. + if (indexShard.indexSettings().isStoreLocalityPartial()) { + return Collections.emptyList(); + } final Store.RecoveryDiff diff = Store.segmentReplicationDiff(checkpointInfo.getMetadataMap(), indexShard.getSegmentMetadataMap()); // local files final Set localFiles = Set.of(indexShard.store().directory().listAll()); diff --git a/server/src/test/java/org/opensearch/index/store/CompositeDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/CompositeDirectoryTests.java index d5628cfab9ee7..43340be680a86 100644 --- a/server/src/test/java/org/opensearch/index/store/CompositeDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/CompositeDirectoryTests.java @@ -39,8 +39,16 @@ public class CompositeDirectoryTests extends BaseRemoteSegmentStoreDirectoryTest private FSDirectory localDirectory; private CompositeDirectory compositeDirectory; - private final static String[] LOCAL_FILES = new String[] { "_1.cfe", "_2.cfe", "_0.cfe_block_7", "_0.cfs_block_7", "temp_file.tmp" }; + private final static String[] LOCAL_FILES = new String[] { + "_1.cfe", + "_1.cfe_block_0", + "_1.cfe_block_1", + "_2.cfe", + "_0.cfe_block_7", + "_0.cfs_block_7", + "temp_file.tmp" }; private final static String FILE_PRESENT_LOCALLY = "_1.cfe"; + private final static String BLOCK_FILE_PRESENT_LOCALLY = "_1.cfe_block_0"; private final static String FILE_PRESENT_IN_REMOTE_ONLY = "_0.si"; private final static String NON_EXISTENT_FILE = "non_existent_file"; private final static String NEW_FILE = "new_file"; @@ -67,9 +75,11 @@ public void testListAll() throws IOException { public void testDeleteFile() throws IOException { assertTrue(existsInCompositeDirectory(FILE_PRESENT_LOCALLY)); + assertTrue(existsInLocalDirectory(BLOCK_FILE_PRESENT_LOCALLY)); // Delete the file and assert that it no more is a part of the directory compositeDirectory.deleteFile(FILE_PRESENT_LOCALLY); assertFalse(existsInCompositeDirectory(FILE_PRESENT_LOCALLY)); + assertFalse(existsInCompositeDirectory(BLOCK_FILE_PRESENT_LOCALLY)); // Reading deleted file from directory should result in NoSuchFileException assertThrows(NoSuchFileException.class, () -> compositeDirectory.openInput(FILE_PRESENT_LOCALLY, IOContext.DEFAULT)); } From 588f46d731587bebd54511dc2df21a2f4ffb9f32 Mon Sep 17 00:00:00 2001 From: Sachin Kale Date: Fri, 7 Mar 2025 11:28:47 +0530 Subject: [PATCH 057/550] Change IOContext from READONCE to DEFAULT to avoid WrongThreadException (#17502) --------- Signed-off-by: Sachin Kale --- .../index/shard/RemoteStoreRefreshListener.java | 2 +- .../opensearch/index/store/RemoteDirectory.java | 1 + .../index/store/RemoteDirectoryTests.java | 4 ++-- .../test/store/MockFSDirectoryFactory.java | 16 ++++++++++++++++ 4 files changed, 20 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java index 702928771f131..8ace4848806d7 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java @@ -459,7 +459,7 @@ private void uploadNewSegments( batchUploadListener.onFailure(ex); }); statsListener.beforeUpload(src); - remoteDirectory.copyFrom(storeDirectory, src, IOContext.READONCE, aggregatedListener, isLowPriorityUpload()); + remoteDirectory.copyFrom(storeDirectory, src, IOContext.DEFAULT, aggregatedListener, isLowPriorityUpload()); } } diff --git a/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java index 949113cce87d6..38d1b4d1252cc 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java @@ -383,6 +383,7 @@ private void uploadBlob( ActionListener listener, boolean lowPriorityUpload ) throws Exception { + assert ioContext != IOContext.READONCE : "Remote upload will fail with IoContext.READONCE"; long expectedChecksum = calculateChecksumOfChecksum(from, src); long contentLength; try (IndexInput indexInput = from.openInput(src, ioContext)) { diff --git a/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java index cc8c6085978a8..ea6e6e538caa5 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java @@ -92,7 +92,7 @@ public void testCopyFrom() throws IOException, InterruptedException { storeDirectory, filename, filename, - IOContext.READONCE, + IOContext.DEFAULT, () -> postUploadInvoked.set(true), new ActionListener<>() { @Override @@ -130,7 +130,7 @@ public void testCopyFromWithException() throws IOException, InterruptedException storeDirectory, filename, filename, - IOContext.READONCE, + IOContext.DEFAULT, () -> postUploadInvoked.set(true), new ActionListener<>() { @Override diff --git a/test/framework/src/main/java/org/opensearch/test/store/MockFSDirectoryFactory.java b/test/framework/src/main/java/org/opensearch/test/store/MockFSDirectoryFactory.java index 9c42ea2672601..d8279170ddd92 100644 --- a/test/framework/src/main/java/org/opensearch/test/store/MockFSDirectoryFactory.java +++ b/test/framework/src/main/java/org/opensearch/test/store/MockFSDirectoryFactory.java @@ -36,7 +36,10 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.index.CheckIndex; +import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.tests.store.BaseDirectoryWrapper; import org.apache.lucene.tests.store.MockDirectoryWrapper; @@ -203,6 +206,19 @@ public synchronized void crash() throws IOException { public Set getPendingDeletions() throws IOException { return in.getPendingDeletions(); } + + // In remote store feature, the upload flow is async and IndexInput can be opened and closed + // by different threads, so we always use IOContext.DEFAULT. + // But MockDirectoryWrapper throws an exception if segments_N fil is opened with any IOContext other than READONCE. + // Following change is temporary override to avoid the test failures. We should fix the multiple thread access + // in remote store upload flow. + @Override + public synchronized IndexInput openInput(String name, IOContext context) throws IOException { + if (name.startsWith(IndexFileNames.SEGMENTS)) { + context = IOContext.READONCE; + } + return super.openInput(name, context); + } } static final class CloseableDirectory implements Closeable { From c48efd0fd59e598c983b59d952f41f7b69341812 Mon Sep 17 00:00:00 2001 From: Ashish Singh Date: Fri, 7 Mar 2025 12:50:03 +0530 Subject: [PATCH 058/550] Fix flaky tests in RemoteStoreStatsIT (#17535) Signed-off-by: Ashish Singh --- .../remotestore/RemoteStoreStatsIT.java | 241 +++++++++--------- .../remote/RemoteSegmentTransferTracker.java | 48 ++++ 2 files changed, 166 insertions(+), 123 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java index 86d586cd17146..b8986f5a3bf9a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java @@ -21,6 +21,7 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; +import org.opensearch.common.SetOnce; import org.opensearch.common.settings.Settings; import org.opensearch.index.IndexSettings; import org.opensearch.index.remote.RemoteSegmentTransferTracker; @@ -261,92 +262,59 @@ public void testDownloadStatsCorrectnessSinglePrimarySingleReplica() throws Exce // - Assert that download stats == upload stats // - Repeat this step for random times (between 5 and 10) - // Create index with 1 pri and 1 replica and refresh interval disabled - createIndex( - INDEX_NAME, - Settings.builder().put(remoteStoreIndexSettings(1, 1)).put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1).build() - ); - ensureGreen(INDEX_NAME); - - // Manually invoke a refresh - refresh(INDEX_NAME); - - // Get zero state values - // Extract and assert zero state primary stats - RemoteStoreStatsResponse zeroStateResponse = client().admin().cluster().prepareRemoteStoreStats(INDEX_NAME, "0").get(); - RemoteSegmentTransferTracker.Stats zeroStatePrimaryStats = Arrays.stream(zeroStateResponse.getRemoteStoreStats()) - .filter(remoteStoreStats -> remoteStoreStats.getShardRouting().primary()) - .collect(Collectors.toList()) - .get(0) - .getSegmentStats(); - logger.info( - "Zero state primary stats: {}ms refresh time lag, {}b bytes lag, {}b upload bytes started, {}b upload bytes failed , {} uploads succeeded, {} upload byes succeeded.", - zeroStatePrimaryStats.refreshTimeLagMs, - zeroStatePrimaryStats.bytesLag, - zeroStatePrimaryStats.uploadBytesStarted, - zeroStatePrimaryStats.uploadBytesFailed, - zeroStatePrimaryStats.totalUploadsSucceeded, - zeroStatePrimaryStats.uploadBytesSucceeded - ); - assertTrue( - zeroStatePrimaryStats.totalUploadsStarted == zeroStatePrimaryStats.totalUploadsSucceeded - && zeroStatePrimaryStats.totalUploadsSucceeded == 1 - ); - assertTrue( - zeroStatePrimaryStats.uploadBytesStarted == zeroStatePrimaryStats.uploadBytesSucceeded - && zeroStatePrimaryStats.uploadBytesSucceeded > 0 - ); - assertTrue(zeroStatePrimaryStats.totalUploadsFailed == 0 && zeroStatePrimaryStats.uploadBytesFailed == 0); + // Prepare settings with single replica + Settings.Builder settings = Settings.builder() + .put(remoteStoreIndexSettings(1, 1)) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1); - // Extract and assert zero state replica stats - RemoteSegmentTransferTracker.Stats zeroStateReplicaStats = Arrays.stream(zeroStateResponse.getRemoteStoreStats()) - .filter(remoteStoreStats -> !remoteStoreStats.getShardRouting().primary()) - .collect(Collectors.toList()) - .get(0) - .getSegmentStats(); - assertTrue( - zeroStateReplicaStats.directoryFileTransferTrackerStats.transferredBytesStarted == 0 - && zeroStateReplicaStats.directoryFileTransferTrackerStats.transferredBytesSucceeded == 0 - ); + // Retrieve zero state stats + SetOnce zeroStatePrimaryStats = prepareZeroStateStats(settings, false); - // Index documents + // Iteration logic for (int i = 1; i <= randomIntBetween(5, 10); i++) { indexSingleDoc(INDEX_NAME); - // Running Flush & Refresh manually flushAndRefresh(INDEX_NAME); ensureGreen(INDEX_NAME); + waitForReplication(); - // Poll for RemoteStore Stats assertBusy(() -> { RemoteStoreStatsResponse response = client().admin().cluster().prepareRemoteStoreStats(INDEX_NAME, "0").get(); - // Iterate through the response and extract the relevant segment upload and download stats + + // Existing validation logic List primaryStatsList = Arrays.stream(response.getRemoteStoreStats()) .filter(remoteStoreStats -> remoteStoreStats.getShardRouting().primary()) - .collect(Collectors.toList()); + .toList(); assertEquals(1, primaryStatsList.size()); + List replicaStatsList = Arrays.stream(response.getRemoteStoreStats()) .filter(remoteStoreStats -> !remoteStoreStats.getShardRouting().primary()) - .collect(Collectors.toList()); + .toList(); assertEquals(1, replicaStatsList.size()); - RemoteSegmentTransferTracker.Stats primaryStats = primaryStatsList.get(0).getSegmentStats(); - RemoteSegmentTransferTracker.Stats replicaStats = replicaStatsList.get(0).getSegmentStats(); - // Assert Upload syncs - zero state uploads == download syncs + + RemoteSegmentTransferTracker.Stats primaryStats = primaryStatsList.getFirst().getSegmentStats(); + RemoteSegmentTransferTracker.Stats replicaStats = replicaStatsList.getFirst().getSegmentStats(); + + // Existing assertions assertTrue(primaryStats.totalUploadsStarted > 0); assertTrue(primaryStats.totalUploadsSucceeded > 0); + assertTrue(replicaStats.directoryFileTransferTrackerStats.transferredBytesStarted > 0); + assertTrue( - replicaStats.directoryFileTransferTrackerStats.transferredBytesStarted > 0 - && primaryStats.uploadBytesStarted - - zeroStatePrimaryStats.uploadBytesStarted >= replicaStats.directoryFileTransferTrackerStats.transferredBytesStarted + primaryStats.uploadBytesStarted - zeroStatePrimaryStats + .get().uploadBytesStarted >= replicaStats.directoryFileTransferTrackerStats.transferredBytesStarted ); + + assertTrue(replicaStats.directoryFileTransferTrackerStats.transferredBytesSucceeded > 0); + assertTrue( - replicaStats.directoryFileTransferTrackerStats.transferredBytesSucceeded > 0 - && primaryStats.uploadBytesSucceeded - - zeroStatePrimaryStats.uploadBytesSucceeded >= replicaStats.directoryFileTransferTrackerStats.transferredBytesSucceeded + primaryStats.uploadBytesSucceeded - zeroStatePrimaryStats + .get().uploadBytesSucceeded >= replicaStats.directoryFileTransferTrackerStats.transferredBytesSucceeded ); + // Assert zero failures assertEquals(0, primaryStats.uploadBytesFailed); assertEquals(0, replicaStats.directoryFileTransferTrackerStats.transferredBytesFailed); - }, 60, TimeUnit.SECONDS); + }); } } @@ -361,76 +329,42 @@ public void testDownloadStatsCorrectnessSinglePrimaryMultipleReplicaShards() thr // - Assert that download stats == upload stats // - Repeat this step for random times (between 5 and 10) - // Create index + // Get number of data nodes int dataNodeCount = client().admin().cluster().prepareHealth().get().getNumberOfDataNodes(); - createIndex( - INDEX_NAME, - Settings.builder() - .put(remoteStoreIndexSettings(dataNodeCount - 1, 1)) - .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1) - .build() - ); - ensureGreen(INDEX_NAME); - // Manually invoke a refresh - refresh(INDEX_NAME); - - // Get zero state values - // Extract and assert zero state primary stats - RemoteStoreStatsResponse zeroStateResponse = client().admin().cluster().prepareRemoteStoreStats(INDEX_NAME, "0").get(); - RemoteSegmentTransferTracker.Stats zeroStatePrimaryStats = Arrays.stream(zeroStateResponse.getRemoteStoreStats()) - .filter(remoteStoreStats -> remoteStoreStats.getShardRouting().primary()) - .collect(Collectors.toList()) - .get(0) - .getSegmentStats(); - logger.info( - "Zero state primary stats: {}ms refresh time lag, {}b bytes lag, {}b upload bytes started, {}b upload bytes failed , {} uploads succeeded, {} upload byes succeeded.", - zeroStatePrimaryStats.refreshTimeLagMs, - zeroStatePrimaryStats.bytesLag, - zeroStatePrimaryStats.uploadBytesStarted, - zeroStatePrimaryStats.uploadBytesFailed, - zeroStatePrimaryStats.totalUploadsSucceeded, - zeroStatePrimaryStats.uploadBytesSucceeded - ); - assertTrue( - zeroStatePrimaryStats.totalUploadsStarted == zeroStatePrimaryStats.totalUploadsSucceeded - && zeroStatePrimaryStats.totalUploadsSucceeded == 1 - ); - assertTrue( - zeroStatePrimaryStats.uploadBytesStarted == zeroStatePrimaryStats.uploadBytesSucceeded - && zeroStatePrimaryStats.uploadBytesSucceeded > 0 - ); - assertTrue(zeroStatePrimaryStats.totalUploadsFailed == 0 && zeroStatePrimaryStats.uploadBytesFailed == 0); + // Prepare settings with multiple replicas + Settings.Builder settings = Settings.builder() + .put(remoteStoreIndexSettings(dataNodeCount - 1, 1)) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1); - // Extract and assert zero state replica stats - List zeroStateReplicaStats = Arrays.stream(zeroStateResponse.getRemoteStoreStats()) - .filter(remoteStoreStats -> !remoteStoreStats.getShardRouting().primary()) - .collect(Collectors.toList()); - zeroStateReplicaStats.forEach(stats -> { - assertTrue( - stats.getSegmentStats().directoryFileTransferTrackerStats.transferredBytesStarted == 0 - && stats.getSegmentStats().directoryFileTransferTrackerStats.transferredBytesSucceeded == 0 - ); - }); + // Retrieve zero state stats + SetOnce zeroStatePrimaryStats = prepareZeroStateStats(settings, true); + // Get current nodes in cluster int currentNodesInCluster = client().admin().cluster().prepareHealth().get().getNumberOfDataNodes(); + + // Iteration logic for (int i = 0; i < randomIntBetween(5, 10); i++) { indexSingleDoc(INDEX_NAME); - // Running Flush & Refresh manually flushAndRefresh(INDEX_NAME); + ensureGreen(INDEX_NAME); + waitForReplication(); assertBusy(() -> { RemoteStoreStatsResponse response = client().admin().cluster().prepareRemoteStoreStats(INDEX_NAME, "0").get(); + + // Validate total and successful shards assertEquals(currentNodesInCluster, response.getSuccessfulShards()); - long uploadsStarted = 0, uploadsSucceeded = 0, uploadsFailed = 0; - long uploadBytesStarted = 0, uploadBytesSucceeded = 0, uploadBytesFailed = 0; - List downloadBytesStarted = new ArrayList<>(), downloadBytesSucceeded = new ArrayList<>(), downloadBytesFailed = - new ArrayList<>(); - // Assert that stats for primary shard and replica shard set are equal - for (RemoteStoreStats eachStatsObject : response.getRemoteStoreStats()) { - RemoteSegmentTransferTracker.Stats stats = eachStatsObject.getSegmentStats(); - if (eachStatsObject.getShardRouting().primary()) { + long uploadBytesStarted = 0, uploadBytesSucceeded = 0, uploadBytesFailed = 0; + List downloadBytesStarted = new ArrayList<>(); + List downloadBytesSucceeded = new ArrayList<>(); + List downloadBytesFailed = new ArrayList<>(); + + // Collect stats for primary and replica shards + for (RemoteStoreStats statsObject : response.getRemoteStoreStats()) { + RemoteSegmentTransferTracker.Stats stats = statsObject.getSegmentStats(); + if (statsObject.getShardRouting().primary()) { uploadBytesStarted = stats.uploadBytesStarted; uploadBytesSucceeded = stats.uploadBytesSucceeded; uploadBytesFailed = stats.uploadBytesFailed; @@ -441,17 +375,78 @@ public void testDownloadStatsCorrectnessSinglePrimaryMultipleReplicaShards() thr } } - assertEquals(0, uploadsFailed); + // Assertions assertEquals(0, uploadBytesFailed); for (int j = 0; j < response.getSuccessfulShards() - 1; j++) { - assertTrue(uploadBytesStarted - zeroStatePrimaryStats.uploadBytesStarted > downloadBytesStarted.get(j)); - assertTrue(uploadBytesSucceeded - zeroStatePrimaryStats.uploadBytesSucceeded > downloadBytesSucceeded.get(j)); + assertTrue(uploadBytesStarted - zeroStatePrimaryStats.get().uploadBytesStarted > downloadBytesStarted.get(j)); + assertTrue(uploadBytesSucceeded - zeroStatePrimaryStats.get().uploadBytesSucceeded > downloadBytesSucceeded.get(j)); assertEquals(0, (long) downloadBytesFailed.get(j)); } - }, 60, TimeUnit.SECONDS); + }); + } + } + + // New helper method to validate zero state primary stats + private void validateZeroStatePrimaryStats(RemoteSegmentTransferTracker.Stats primaryStats) { + logger.info("Zero state primary stats: {}", primaryStats); + assertEquals(primaryStats.totalUploadsStarted, primaryStats.totalUploadsSucceeded); + assertTrue(primaryStats.totalUploadsSucceeded >= 1); + assertEquals(primaryStats.uploadBytesStarted, primaryStats.uploadBytesSucceeded); + assertTrue(primaryStats.uploadBytesSucceeded > 0); + assertEquals(0, primaryStats.totalUploadsFailed); + assertEquals(0, primaryStats.uploadBytesFailed); + } + + // helper method to validate zero state replica stats + private void validateZeroStateReplicaStats(RemoteStoreStatsResponse zeroStateResponse, boolean multipleShardsExpected) { + List zeroStateReplicaStats = Arrays.stream(zeroStateResponse.getRemoteStoreStats()) + .filter(remoteStoreStats -> !remoteStoreStats.getShardRouting().primary()) + .toList(); + + if (multipleShardsExpected) { + zeroStateReplicaStats.forEach(stats -> { + assertEquals(0, stats.getSegmentStats().directoryFileTransferTrackerStats.transferredBytesStarted); + assertEquals(0, stats.getSegmentStats().directoryFileTransferTrackerStats.transferredBytesSucceeded); + }); + } else { + RemoteSegmentTransferTracker.Stats replicaStats = zeroStateReplicaStats.getFirst().getSegmentStats(); + assertEquals(0, replicaStats.directoryFileTransferTrackerStats.transferredBytesStarted); + assertEquals(0, replicaStats.directoryFileTransferTrackerStats.transferredBytesSucceeded); } } + // New helper method for common test setup and zero state stats retrieval + private SetOnce prepareZeroStateStats( + Settings.Builder additionalSettings, + boolean multipleShardsExpected + ) throws Exception { + SetOnce zeroStatePrimaryStats = new SetOnce<>(); + + // Create index with specified settings + createIndex(INDEX_NAME, additionalSettings.build()); + ensureGreen(INDEX_NAME); + + // Manually invoke a refresh + refresh(INDEX_NAME); + + assertBusy(() -> { + RemoteStoreStatsResponse zeroStateResponse = client().admin().cluster().prepareRemoteStoreStats(INDEX_NAME, "0").get(); + + RemoteSegmentTransferTracker.Stats primaryStats = Arrays.stream(zeroStateResponse.getRemoteStoreStats()) + .filter(remoteStoreStats -> remoteStoreStats.getShardRouting().primary()) + .toList() + .getFirst() + .getSegmentStats(); + + validateZeroStatePrimaryStats(primaryStats); + validateZeroStateReplicaStats(zeroStateResponse, multipleShardsExpected); + + zeroStatePrimaryStats.set(primaryStats); + }); + + return zeroStatePrimaryStats; + } + public void testStatsOnShardRelocation() { setup(); // Scenario: diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java index a29bd1d840b43..4114a14b455e7 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java @@ -580,5 +580,53 @@ public int hashCode() { directoryFileTransferTrackerStats ); } + + @Override + public String toString() { + return "Stats{" + + "shardId=" + + shardId + + ", localRefreshClockTimeMs=" + + localRefreshClockTimeMs + + ", remoteRefreshClockTimeMs=" + + remoteRefreshClockTimeMs + + ", refreshTimeLagMs=" + + refreshTimeLagMs + + ", localRefreshNumber=" + + localRefreshNumber + + ", remoteRefreshNumber=" + + remoteRefreshNumber + + ", uploadBytesStarted=" + + uploadBytesStarted + + ", uploadBytesFailed=" + + uploadBytesFailed + + ", uploadBytesSucceeded=" + + uploadBytesSucceeded + + ", totalUploadsStarted=" + + totalUploadsStarted + + ", totalUploadsFailed=" + + totalUploadsFailed + + ", totalUploadsSucceeded=" + + totalUploadsSucceeded + + ", rejectionCount=" + + rejectionCount + + ", consecutiveFailuresCount=" + + consecutiveFailuresCount + + ", lastSuccessfulRemoteRefreshBytes=" + + lastSuccessfulRemoteRefreshBytes + + ", uploadBytesMovingAverage=" + + uploadBytesMovingAverage + + ", uploadBytesPerSecMovingAverage=" + + uploadBytesPerSecMovingAverage + + ", totalUploadTimeInMs=" + + totalUploadTimeInMs + + ", uploadTimeMovingAverage=" + + uploadTimeMovingAverage + + ", bytesLag=" + + bytesLag + + ", directoryFileTransferTrackerStats=" + + directoryFileTransferTrackerStats + + '}'; + } } } From 6b4597275fee56b28891272c2c8a216768687ed0 Mon Sep 17 00:00:00 2001 From: Ashish Singh Date: Fri, 7 Mar 2025 15:12:39 +0530 Subject: [PATCH 059/550] Fix compilation issue for PR #17535 during backport (#17546) Signed-off-by: Ashish Singh --- .../opensearch/remotestore/RemoteStoreStatsIT.java | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java index b8986f5a3bf9a..4053ce5f6c678 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java @@ -283,16 +283,16 @@ public void testDownloadStatsCorrectnessSinglePrimarySingleReplica() throws Exce // Existing validation logic List primaryStatsList = Arrays.stream(response.getRemoteStoreStats()) .filter(remoteStoreStats -> remoteStoreStats.getShardRouting().primary()) - .toList(); + .collect(Collectors.toList()); assertEquals(1, primaryStatsList.size()); List replicaStatsList = Arrays.stream(response.getRemoteStoreStats()) .filter(remoteStoreStats -> !remoteStoreStats.getShardRouting().primary()) - .toList(); + .collect(Collectors.toList()); assertEquals(1, replicaStatsList.size()); - RemoteSegmentTransferTracker.Stats primaryStats = primaryStatsList.getFirst().getSegmentStats(); - RemoteSegmentTransferTracker.Stats replicaStats = replicaStatsList.getFirst().getSegmentStats(); + RemoteSegmentTransferTracker.Stats primaryStats = primaryStatsList.get(0).getSegmentStats(); + RemoteSegmentTransferTracker.Stats replicaStats = replicaStatsList.get(0).getSegmentStats(); // Existing assertions assertTrue(primaryStats.totalUploadsStarted > 0); @@ -409,7 +409,7 @@ private void validateZeroStateReplicaStats(RemoteStoreStatsResponse zeroStateRes assertEquals(0, stats.getSegmentStats().directoryFileTransferTrackerStats.transferredBytesSucceeded); }); } else { - RemoteSegmentTransferTracker.Stats replicaStats = zeroStateReplicaStats.getFirst().getSegmentStats(); + RemoteSegmentTransferTracker.Stats replicaStats = zeroStateReplicaStats.get(0).getSegmentStats(); assertEquals(0, replicaStats.directoryFileTransferTrackerStats.transferredBytesStarted); assertEquals(0, replicaStats.directoryFileTransferTrackerStats.transferredBytesSucceeded); } @@ -434,8 +434,8 @@ private SetOnce prepareZeroStateStats( RemoteSegmentTransferTracker.Stats primaryStats = Arrays.stream(zeroStateResponse.getRemoteStoreStats()) .filter(remoteStoreStats -> remoteStoreStats.getShardRouting().primary()) - .toList() - .getFirst() + .collect(Collectors.toList()) + .get(0) .getSegmentStats(); validateZeroStatePrimaryStats(primaryStats); From ffa46cadcc1f8bef7b247d731024de9a279701e0 Mon Sep 17 00:00:00 2001 From: Ashish Singh Date: Fri, 7 Mar 2025 19:39:12 +0530 Subject: [PATCH 060/550] Add test for stale writer correctness with pinned timestamp snapshots (#17547) Signed-off-by: Ashish Singh --- .../index/remote/RemoteStoreUtilsTests.java | 31 +++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java index d134a2bad7b74..1a9b9092ee12e 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java @@ -993,6 +993,37 @@ public void testGetPinnedTimestampLockedFilesWithPinnedTimestampsDifferentPrefix assertEquals(0, metadataFilePinnedTimestampCache.size()); } + /** + * This test checks the case when a stale writer is uploading metadata files with higher timestamp, but lower primary + * term. + */ + public void testGetPinnedTimestampLockedFilesForDivergentWrites() { + setupRemotePinnedTimestampFeature(true); + + Map metadataFilePinnedTimestampCache = new HashMap<>(); + + // Pinned timestamp 7000 + // Primary Term - Timestamp in md file + // 6 - 7002 + // 3 - 6999 + // 4 - 6998 + // 5 - 6995 + // 5 - 6990 + Tuple, Set> metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + Map.of(7002L, 6L, 6999L, 3L, 6998L, 4L, 6995L, 5L, 6990L, 5L), + Set.of(4000L, 5000L, 6000L, 7000L), + metadataFilePinnedTimestampCache + ); + Map metadataFiles = metadataAndLocks.v1(); + Set implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(1, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(6995L))); + // Now we cache all the matches except the last one. + assertEquals(1, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(6995L), metadataFilePinnedTimestampCache.get(7000L)); + } + public void testFilterOutMetadataFilesBasedOnAgeFeatureDisabled() { setupRemotePinnedTimestampFeature(false); List metadataFiles = new ArrayList<>(); From 105aeb5eaed26332cc11c4655c552a3ffe769e01 Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Fri, 7 Mar 2025 15:59:38 -0800 Subject: [PATCH 061/550] [Tiered Caching] Remove PLUGGABLE_CACHE feature flag (#17344) * Remove PLUGGABLE_CACHE feature flag Signed-off-by: Peter Alfonsi * changelog Signed-off-by: Peter Alfonsi * move changelog entry Signed-off-by: Peter Alfonsi * rerun gradle Signed-off-by: Peter Alfonsi * rerun gradle Signed-off-by: Peter Alfonsi * fix IT init failure Signed-off-by: Peter Alfonsi * rerun gradle Signed-off-by: Peter Alfonsi * rerun gradle Signed-off-by: Peter Alfonsi * rerun gradle Signed-off-by: Peter Alfonsi * rerun gradle Signed-off-by: Peter Alfonsi * rerun gradle Signed-off-by: Peter Alfonsi --------- Signed-off-by: Peter Alfonsi Signed-off-by: Peter Alfonsi Co-authored-by: Peter Alfonsi --- CHANGELOG-3.0.md | 1 + distribution/src/config/opensearch.yml | 4 -- .../tier/TieredSpilloverCacheBaseIT.java | 2 - .../tier/TieredSpilloverCachePlugin.java | 5 +- .../tier/TieredSpilloverCachePluginTests.java | 7 +- .../tier/TieredSpilloverCacheTests.java | 13 ---- .../opensearch/cache/EhcacheDiskCacheIT.java | 6 -- .../store/disk/EhCacheDiskCacheTests.java | 2 - .../CacheStatsAPIIndicesRequestCacheIT.java | 17 +---- .../indices/IndicesRequestCacheIT.java | 5 +- .../common/cache/service/CacheService.java | 14 ++-- .../cache/store/OpenSearchOnHeapCache.java | 12 +--- .../common/settings/ClusterSettings.java | 21 +++--- .../common/settings/FeatureFlagSettings.java | 1 - .../opensearch/common/util/FeatureFlags.java | 9 --- .../indices/IndicesRequestCache.java | 6 +- .../cache/service/CacheServiceTests.java | 35 ++-------- .../store/OpenSearchOnHeapCacheTests.java | 54 +++++++-------- .../indices/IndicesRequestCacheTests.java | 67 ++----------------- 19 files changed, 61 insertions(+), 220 deletions(-) diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index 99b636822fb72..fabf6645fb6eb 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -73,6 +73,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Remove package org.opensearch.action.support.master ([#4856](https://github.com/opensearch-project/OpenSearch/issues/4856)) - Remove transport-nio plugin ([#16887](https://github.com/opensearch-project/OpenSearch/issues/16887)) - Remove deprecated 'gateway' settings used to defer cluster recovery ([#3117](https://github.com/opensearch-project/OpenSearch/issues/3117)) +- Remove FeatureFlags.PLUGGABLE_CACHE as the feature is no longer experimental ([#17344](https://github.com/opensearch-project/OpenSearch/pull/17344)) ### Fixed - Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827)) diff --git a/distribution/src/config/opensearch.yml b/distribution/src/config/opensearch.yml index 1ac7c5299964e..5b3babd3acaef 100644 --- a/distribution/src/config/opensearch.yml +++ b/distribution/src/config/opensearch.yml @@ -122,10 +122,6 @@ ${path.logs} # #opensearch.experimental.optimization.datetime_formatter_caching.enabled: false # -# Gates the functionality of enabling Opensearch to use pluggable caches with respective store names via setting. -# -#opensearch.experimental.feature.pluggable.caching.enabled: false -# # Gates the functionality of star tree index, which improves the performance of search aggregations. # #opensearch.experimental.feature.composite_index.star_tree.enabled: true diff --git a/modules/cache-common/src/internalClusterTest/java/org/opensearch/cache/common/tier/TieredSpilloverCacheBaseIT.java b/modules/cache-common/src/internalClusterTest/java/org/opensearch/cache/common/tier/TieredSpilloverCacheBaseIT.java index 01371ca8eeefb..75895b1fc4c11 100644 --- a/modules/cache-common/src/internalClusterTest/java/org/opensearch/cache/common/tier/TieredSpilloverCacheBaseIT.java +++ b/modules/cache-common/src/internalClusterTest/java/org/opensearch/cache/common/tier/TieredSpilloverCacheBaseIT.java @@ -12,14 +12,12 @@ import org.opensearch.common.cache.settings.CacheSettings; import org.opensearch.common.cache.store.OpenSearchOnHeapCache; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.test.OpenSearchIntegTestCase; public class TieredSpilloverCacheBaseIT extends OpenSearchIntegTestCase { public Settings defaultSettings(String onHeapCacheSizeInBytesOrPercentage, int numberOfSegments) { return Settings.builder() - .put(FeatureFlags.PLUGGABLE_CACHE, "true") .put( CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(), TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCachePlugin.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCachePlugin.java index d1d033fae8cd2..3c4079fefd612 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCachePlugin.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCachePlugin.java @@ -12,7 +12,6 @@ import org.opensearch.common.cache.ICache; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.plugins.CachePlugin; import org.opensearch.plugins.Plugin; @@ -64,9 +63,7 @@ public List> getSettings() { ); settingList.add(TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(cacheType)); settingList.add(TOOK_TIME_DISK_TIER_POLICY_CONCRETE_SETTINGS_MAP.get(cacheType)); - if (FeatureFlags.PLUGGABLE_CACHE_SETTING.get(settings)) { - settingList.add(DISK_CACHE_ENABLED_SETTING_MAP.get(cacheType)); - } + settingList.add(DISK_CACHE_ENABLED_SETTING_MAP.get(cacheType)); settingList.add( TieredSpilloverCacheSettings.TIERED_SPILLOVER_SEGMENTS.getConcreteSettingForNamespace(cacheType.getSettingPrefix()) ); diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCachePluginTests.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCachePluginTests.java index 4a96ffe2069ec..54aba3504f42f 100644 --- a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCachePluginTests.java +++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCachePluginTests.java @@ -10,7 +10,6 @@ import org.opensearch.common.cache.ICache; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.test.OpenSearchTestCase; import java.util.Map; @@ -24,10 +23,8 @@ public void testGetCacheFactoryMap() { assertEquals(TieredSpilloverCachePlugin.TIERED_CACHE_SPILLOVER_PLUGIN_NAME, tieredSpilloverCachePlugin.getName()); } - public void testGetSettingsWithFeatureFlagOn() { - TieredSpilloverCachePlugin tieredSpilloverCachePlugin = new TieredSpilloverCachePlugin( - Settings.builder().put(FeatureFlags.PLUGGABLE_CACHE_SETTING.getKey(), true).build() - ); + public void testGetSettings() { + TieredSpilloverCachePlugin tieredSpilloverCachePlugin = new TieredSpilloverCachePlugin(Settings.builder().build()); assertFalse(tieredSpilloverCachePlugin.getSettings().isEmpty()); } } diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java index c74eb371709f6..2dc115b73c378 100644 --- a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java +++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java @@ -30,7 +30,6 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.env.NodeEnvironment; import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; @@ -183,7 +182,6 @@ public void testComputeIfAbsentWithFactoryBasedCacheCreation() throws Exception TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME ) .put(TIERED_SPILLOVER_SEGMENTS.getConcreteSettingForNamespace(CacheType.INDICES_REQUEST_CACHE.getSettingPrefix()).getKey(), 1) - .put(FeatureFlags.PLUGGABLE_CACHE, "true") .build(); String storagePath = getStoragePath(settings); ICache tieredSpilloverICache = new TieredSpilloverCache.TieredSpilloverCacheFactory().create( @@ -283,7 +281,6 @@ public void testComputeIfAbsentWithSegmentedCache() throws Exception { CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(), TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME ) - .put(FeatureFlags.PLUGGABLE_CACHE, "true") .build(); String storagePath = getStoragePath(settings); ICache tieredSpilloverICache = new TieredSpilloverCache.TieredSpilloverCacheFactory().create( @@ -406,7 +403,6 @@ public void testWithFactoryCreationWithOnHeapCacheNotPresent() { CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(), TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME ) - .put(FeatureFlags.PLUGGABLE_CACHE, "true") .build(); IllegalArgumentException ex = assertThrows( @@ -491,7 +487,6 @@ public void testComputeIfAbsentWithEvictionsFromOnHeapCache() throws Exception { CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(), TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME ) - .put(FeatureFlags.PLUGGABLE_CACHE, "true") .put( TieredSpilloverCacheSettings.TIERED_SPILLOVER_ONHEAP_STORE_SIZE.getConcreteSettingForNamespace( CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() @@ -1276,7 +1271,6 @@ public void testConcurrencyForEvictionFlowFromOnHeapToDiskTier() throws Exceptio CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(), TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME ) - .put(FeatureFlags.PLUGGABLE_CACHE, "true") .put( TieredSpilloverCacheSettings.TIERED_SPILLOVER_ONHEAP_STORE_SIZE.getConcreteSettingForNamespace( CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() @@ -2160,7 +2154,6 @@ public void testWithInvalidSegmentNumber() throws Exception { TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME ) .put(TIERED_SPILLOVER_SEGMENTS.getConcreteSettingForNamespace(CacheType.INDICES_REQUEST_CACHE.getSettingPrefix()).getKey(), 1) - .put(FeatureFlags.PLUGGABLE_CACHE, "true") .put(TIERED_SPILLOVER_SEGMENTS.getConcreteSettingForNamespace(CacheType.INDICES_REQUEST_CACHE.getSettingPrefix()).getKey(), 3) .build(); String storagePath = getStoragePath(settings); @@ -2226,7 +2219,6 @@ public void testWithVeryLowDiskCacheSize() throws Exception { ).getKey(), 1L ) - .put(FeatureFlags.PLUGGABLE_CACHE, "true") .put(TIERED_SPILLOVER_SEGMENTS.getConcreteSettingForNamespace(CacheType.INDICES_REQUEST_CACHE.getSettingPrefix()).getKey(), 2) .build(); String storagePath = getStoragePath(settings); @@ -2285,7 +2277,6 @@ public void testTieredCacheDefaultSegmentCount() { CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(), TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME ) - .put(FeatureFlags.PLUGGABLE_CACHE, "true") .build(); String storagePath = getStoragePath(settings); @@ -2419,7 +2410,6 @@ public void testSegmentSizesWhenUsingFactory() { ).getKey(), heapSizeFromImplSetting + "b" ) - .put(FeatureFlags.PLUGGABLE_CACHE, "true") .put( TIERED_SPILLOVER_SEGMENTS.getConcreteSettingForNamespace(CacheType.INDICES_REQUEST_CACHE.getSettingPrefix()).getKey(), numSegments @@ -2466,7 +2456,6 @@ public void testSegmentSizesWhenNotUsingFactory() { CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(), TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME ) - .put(FeatureFlags.PLUGGABLE_CACHE, "true") // The size setting from the OpenSearchOnHeapCache implementation should not be honored .put( OpenSearchOnHeapCacheSettings.MAXIMUM_SIZE_IN_BYTES.getConcreteSettingForNamespace( @@ -2697,7 +2686,6 @@ private TieredSpilloverCache initializeTieredSpilloverCache( CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(), TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME ) - .put(FeatureFlags.PLUGGABLE_CACHE, "true") .put(settings) .build() ) @@ -2750,7 +2738,6 @@ private CacheConfig getCacheConfig( CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(), TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME ) - .put(FeatureFlags.PLUGGABLE_CACHE, "true") .put(settings) .build() ) diff --git a/plugins/cache-ehcache/src/internalClusterTest/java/org/opensearch/cache/EhcacheDiskCacheIT.java b/plugins/cache-ehcache/src/internalClusterTest/java/org/opensearch/cache/EhcacheDiskCacheIT.java index d61cdaa89e093..1968c57ddb940 100644 --- a/plugins/cache-ehcache/src/internalClusterTest/java/org/opensearch/cache/EhcacheDiskCacheIT.java +++ b/plugins/cache-ehcache/src/internalClusterTest/java/org/opensearch/cache/EhcacheDiskCacheIT.java @@ -26,7 +26,6 @@ import org.opensearch.common.cache.settings.CacheSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.env.NodeEnvironment; import org.opensearch.index.cache.request.RequestCacheStats; import org.opensearch.index.query.QueryBuilders; @@ -71,11 +70,6 @@ protected Collection> nodePlugins() { return Arrays.asList(EhcacheCachePlugin.class); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.PLUGGABLE_CACHE, "true").build(); - } - private Settings defaultSettings(long sizeInBytes, TimeValue expirationTime) { if (expirationTime == null) { expirationTime = TimeValue.MAX_VALUE; diff --git a/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java b/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java index 4e879af052c15..2f58bb5df0ebe 100644 --- a/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java +++ b/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java @@ -26,7 +26,6 @@ import org.opensearch.common.metrics.CounterMetric; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; @@ -1221,7 +1220,6 @@ private EhcacheDiskCache setupMaxSizeTest(long maxSizeFromSettin MockRemovalListener listener = new MockRemovalListener<>(); try (NodeEnvironment env = newNodeEnvironment(Settings.builder().build())) { Settings settings = Settings.builder() - .put(FeatureFlags.PLUGGABLE_CACHE, true) .put( CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(), EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java index c7f85c7233869..3f947b36e28de 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java @@ -8,8 +8,6 @@ package org.opensearch.indices; -import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; - import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.opensearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; @@ -24,7 +22,6 @@ import org.opensearch.common.cache.stats.ImmutableCacheStatsHolder; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.xcontent.MediaTypeRegistry; @@ -34,13 +31,10 @@ import org.opensearch.index.cache.request.RequestCacheStats; import org.opensearch.index.query.QueryBuilders; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; import org.opensearch.transport.client.Client; import java.io.IOException; -import java.util.Arrays; -import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -50,16 +44,7 @@ // Use a single data node to simplify logic about cache stats across different shards. @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 1) -public class CacheStatsAPIIndicesRequestCacheIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public CacheStatsAPIIndicesRequestCacheIT(Settings settings) { - super(settings); - } - - @ParametersFactory - public static Collection parameters() { - return Arrays.asList(new Object[] { Settings.builder().put(FeatureFlags.PLUGGABLE_CACHE, "true").build() }); - } - +public class CacheStatsAPIIndicesRequestCacheIT extends OpenSearchIntegTestCase { /** * Test aggregating by indices, indices+shards, shards, or no levels, and check the resulting stats * are as we expect. diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java index cbc453eb5c0b0..dc72291e95184 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java @@ -56,7 +56,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.env.NodeEnvironment; @@ -110,9 +109,7 @@ public IndicesRequestCacheIT(Settings settings) { public static Collection parameters() { return Arrays.asList( new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() }, - new Object[] { Settings.builder().put(FeatureFlags.PLUGGABLE_CACHE, "true").build() }, - new Object[] { Settings.builder().put(FeatureFlags.PLUGGABLE_CACHE, "false").build() } + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } ); } diff --git a/server/src/main/java/org/opensearch/common/cache/service/CacheService.java b/server/src/main/java/org/opensearch/common/cache/service/CacheService.java index da006264094d2..8fc0fc7d07cac 100644 --- a/server/src/main/java/org/opensearch/common/cache/service/CacheService.java +++ b/server/src/main/java/org/opensearch/common/cache/service/CacheService.java @@ -18,7 +18,6 @@ import org.opensearch.common.cache.store.config.CacheConfig; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import java.util.HashMap; import java.util.Map; @@ -47,10 +46,9 @@ public CacheService(Map cacheStoreTypeFactories, Setting public ICache createCache(CacheConfig config, CacheType cacheType) { String storeName = getStoreNameFromSetting(cacheType, settings); - if (!pluggableCachingEnabled(cacheType, settings)) { - // Condition 1: In case feature flag is off, we default to onHeap. - // Condition 2: In case storeName is not explicitly mentioned, we assume user is looking to use older - // settings, so we again fallback to onHeap to maintain backward compatibility. + if (!storeNamePresent(cacheType, settings)) { + // In case storeName is not explicitly mentioned, we assume user is looking to use older + // settings, so we fallback to onHeap to maintain backward compatibility. // It is guaranteed that we will have this store name registered, so // should be safe. storeName = OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME; @@ -73,11 +71,11 @@ public NodeCacheStats stats(CommonStatsFlags flags) { } /** - * Check if pluggable caching is on, and if a store type is present for this cache type. + * Check if a store type is present for this cache type. */ - public static boolean pluggableCachingEnabled(CacheType cacheType, Settings settings) { + public static boolean storeNamePresent(CacheType cacheType, Settings settings) { String storeName = getStoreNameFromSetting(cacheType, settings); - return FeatureFlags.PLUGGABLE_CACHE_SETTING.get(settings) && storeName != null && !storeName.isBlank(); + return storeName != null && !storeName.isBlank(); } private static String getStoreNameFromSetting(CacheType cacheType, Settings settings) { diff --git a/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java b/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java index e1039c5d9ee55..f3a496f07b3e8 100644 --- a/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java +++ b/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java @@ -29,7 +29,6 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.unit.ByteSizeValue; import java.util.List; @@ -182,7 +181,7 @@ public static class OpenSearchOnHeapCacheFactory implements Factory { public ICache create(CacheConfig config, CacheType cacheType, Map cacheFactories) { Map> settingList = OpenSearchOnHeapCacheSettings.getSettingListForCacheType(cacheType); Settings settings = config.getSettings(); - boolean statsTrackingEnabled = statsTrackingEnabled(config.getSettings(), config.getStatsTrackingEnabled()); + boolean statsTrackingEnabled = config.getStatsTrackingEnabled(); ICacheBuilder builder = new Builder().setDimensionNames(config.getDimensionNames()) .setStatsTrackingEnabled(statsTrackingEnabled) .setExpireAfterAccess(((TimeValue) settingList.get(EXPIRE_AFTER_ACCESS_KEY).get(settings))) @@ -197,7 +196,7 @@ public ICache create(CacheConfig config, CacheType cacheType, /* Use the cache config value if present. This can be passed down from the TieredSpilloverCache when creating individual segments, - but is not passed in from the IRC if pluggable caching is on. + but is not passed in from the IRC if a store name setting is present. */ builder.setMaximumWeightInBytes(config.getMaxSizeInBytes()); } else { @@ -209,7 +208,7 @@ public ICache create(CacheConfig config, CacheType cacheType, builder.setNumberOfSegments(-1); // By default it will use 256 segments. } - if (!CacheService.pluggableCachingEnabled(cacheType, settings)) { + if (!CacheService.storeNamePresent(cacheType, settings)) { // For backward compatibility as the user intent is to use older settings. builder.setMaximumWeightInBytes(config.getMaxSizeInBytes()); builder.setExpireAfterAccess(config.getExpireAfterAccess()); @@ -223,11 +222,6 @@ public ICache create(CacheConfig config, CacheType cacheType, public String getCacheName() { return NAME; } - - private boolean statsTrackingEnabled(Settings settings, boolean statsTrackingEnabledConfig) { - // Don't track stats when pluggable caching is off, or when explicitly set to false in the CacheConfig - return FeatureFlags.PLUGGABLE_CACHE_SETTING.get(settings) && statsTrackingEnabledConfig; - } } /** diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index b4b85e0a9d367..c1ce90aaa8efa 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -810,7 +810,16 @@ public void apply(Settings value, Settings current, Settings previous) { ResponseLimitSettings.CAT_SEGMENTS_RESPONSE_LIMIT_SETTING, // Thread pool Settings - ThreadPool.CLUSTER_THREAD_POOL_SIZE_SETTING + ThreadPool.CLUSTER_THREAD_POOL_SIZE_SETTING, + + // Tiered caching settings + CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE), + OpenSearchOnHeapCacheSettings.MAXIMUM_SIZE_IN_BYTES.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ), + OpenSearchOnHeapCacheSettings.EXPIRE_AFTER_ACCESS_SETTING.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ) ) ) ); @@ -831,16 +840,6 @@ public void apply(Settings value, Settings current, Settings previous) { TelemetrySettings.TRACER_FEATURE_ENABLED_SETTING, TelemetrySettings.METRICS_FEATURE_ENABLED_SETTING ), - List.of(FeatureFlags.PLUGGABLE_CACHE), - List.of( - CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE), - OpenSearchOnHeapCacheSettings.MAXIMUM_SIZE_IN_BYTES.getConcreteSettingForNamespace( - CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() - ), - OpenSearchOnHeapCacheSettings.EXPIRE_AFTER_ACCESS_SETTING.getConcreteSettingForNamespace( - CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() - ) - ), List.of(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL), List.of(SearchReplicaAllocationDecider.SEARCH_REPLICA_ROUTING_INCLUDE_GROUP_SETTING) ); diff --git a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java index 6753bb8eac083..b1d13b1ae8693 100644 --- a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java @@ -35,7 +35,6 @@ protected FeatureFlagSettings( FeatureFlags.DATETIME_FORMATTER_CACHING_SETTING, FeatureFlags.TIERED_REMOTE_INDEX_SETTING, FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING, - FeatureFlags.PLUGGABLE_CACHE_SETTING, FeatureFlags.STAR_TREE_INDEX_SETTING, FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES_SETTING, FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL_SETTING, diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java index 4be45aed70023..f0b26d562c52b 100644 --- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -56,12 +56,6 @@ public class FeatureFlags { */ public static final String TIERED_REMOTE_INDEX = "opensearch.experimental.feature.tiered_remote_index.enabled"; - /** - * Gates the functionality of pluggable cache. - * Enables OpenSearch to use pluggable caches with respective store names via setting. - */ - public static final String PLUGGABLE_CACHE = "opensearch.experimental.feature.pluggable.caching.enabled"; - /** * Gates the functionality of background task execution. */ @@ -87,8 +81,6 @@ public class FeatureFlags { public static final Setting TIERED_REMOTE_INDEX_SETTING = Setting.boolSetting(TIERED_REMOTE_INDEX, false, Property.NodeScope); - public static final Setting PLUGGABLE_CACHE_SETTING = Setting.boolSetting(PLUGGABLE_CACHE, false, Property.NodeScope); - public static final Setting READER_WRITER_SPLIT_EXPERIMENTAL_SETTING = Setting.boolSetting( READER_WRITER_SPLIT_EXPERIMENTAL, false, @@ -137,7 +129,6 @@ public class FeatureFlags { TELEMETRY_SETTING, DATETIME_FORMATTER_CACHING_SETTING, TIERED_REMOTE_INDEX_SETTING, - PLUGGABLE_CACHE_SETTING, STAR_TREE_INDEX_SETTING, APPLICATION_BASED_CONFIGURATION_TEMPLATES_SETTING, READER_WRITER_SPLIT_EXPERIMENTAL_SETTING, diff --git a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java index 4f42cd8fe8672..8f1dcbc9b8ee8 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java +++ b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java @@ -233,9 +233,9 @@ CacheConfig getCacheConfig(Settings settings, NodeEnvironme .setClusterSettings(clusterService.getClusterSettings()) .setStoragePath(nodeEnvironment.nodePaths()[0].path.toString() + "/request_cache"); - if (!CacheService.pluggableCachingEnabled(CacheType.INDICES_REQUEST_CACHE, settings)) { - // If pluggable caching is not enabled, use the max size based on the IRC setting into the config. - // If pluggable caching is enabled, cache implementations instead determine their own sizes based on their own implementation + if (!CacheService.storeNamePresent(CacheType.INDICES_REQUEST_CACHE, settings)) { + // If a store name is absent, use the max size based on the IRC setting into the config. + // If a store name is present, cache implementations instead determine their own sizes based on their own implementation // size settings. configBuilder.setMaxSizeInBytes(sizeInBytes); } diff --git a/server/src/test/java/org/opensearch/common/cache/service/CacheServiceTests.java b/server/src/test/java/org/opensearch/common/cache/service/CacheServiceTests.java index b355161f6f310..6abc062d258ea 100644 --- a/server/src/test/java/org/opensearch/common/cache/service/CacheServiceTests.java +++ b/server/src/test/java/org/opensearch/common/cache/service/CacheServiceTests.java @@ -17,7 +17,6 @@ import org.opensearch.common.cache.store.config.CacheConfig; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.plugins.CachePlugin; import org.opensearch.test.OpenSearchTestCase; @@ -30,7 +29,6 @@ import static org.mockito.Mockito.when; public class CacheServiceTests extends OpenSearchTestCase { - public void testWithCreateCacheForIndicesRequestCacheType() { CachePlugin mockPlugin1 = mock(CachePlugin.class); ICache.Factory factory1 = mock(ICache.Factory.class); @@ -50,38 +48,15 @@ public void testWithCreateCacheForIndicesRequestCacheType() { ); CacheConfig config = mock(CacheConfig.class); ICache mockOnHeapCache = mock(OpenSearchOnHeapCache.class); - when(onHeapCacheFactory.create(eq(config), eq(CacheType.INDICES_REQUEST_CACHE), any(Map.class))).thenReturn(mockOnHeapCache); - - ICache ircCache = cacheService.createCache(config, CacheType.INDICES_REQUEST_CACHE); - assertEquals(mockOnHeapCache, ircCache); - } - - public void testWithCreateCacheForIndicesRequestCacheTypeWithFeatureFlagTrue() { - CachePlugin mockPlugin1 = mock(CachePlugin.class); - ICache.Factory factory1 = mock(ICache.Factory.class); - ICache.Factory onHeapCacheFactory = mock(OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.class); - Map factoryMap = Map.of( - "cache1", - factory1, - OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME, - onHeapCacheFactory - ); - when(mockPlugin1.getCacheFactoryMap()).thenReturn(factoryMap); - - Setting indicesRequestCacheSetting = CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE); - CacheService cacheService = new CacheService( - factoryMap, - Settings.builder().put(indicesRequestCacheSetting.getKey(), "cache1").put(FeatureFlags.PLUGGABLE_CACHE, "true").build() - ); - CacheConfig config = mock(CacheConfig.class); - ICache mockOnHeapCache = mock(OpenSearchOnHeapCache.class); when(factory1.create(eq(config), eq(CacheType.INDICES_REQUEST_CACHE), any(Map.class))).thenReturn(mockOnHeapCache); + ICache otherMockOnHeapCache = mock(OpenSearchOnHeapCache.class); + when(onHeapCacheFactory.create(eq(config), eq(CacheType.INDICES_REQUEST_CACHE), any(Map.class))).thenReturn(otherMockOnHeapCache); ICache ircCache = cacheService.createCache(config, CacheType.INDICES_REQUEST_CACHE); assertEquals(mockOnHeapCache, ircCache); } - public void testWithCreateCacheForIndicesRequestCacheTypeWithFeatureFlagTrueAndStoreNameIsNull() { + public void testWithCreateCacheForIndicesRequestCacheTypeWithStoreNameNull() { CachePlugin mockPlugin1 = mock(CachePlugin.class); ICache.Factory factory1 = mock(ICache.Factory.class); ICache.Factory onHeapCacheFactory = mock(OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.class); @@ -93,7 +68,7 @@ public void testWithCreateCacheForIndicesRequestCacheTypeWithFeatureFlagTrueAndS ); when(mockPlugin1.getCacheFactoryMap()).thenReturn(factoryMap); - CacheService cacheService = new CacheService(factoryMap, Settings.builder().put(FeatureFlags.PLUGGABLE_CACHE, "true").build()); + CacheService cacheService = new CacheService(factoryMap, Settings.builder().build()); CacheConfig config = mock(CacheConfig.class); ICache mockOnHeapCache = mock(OpenSearchOnHeapCache.class); when(onHeapCacheFactory.create(eq(config), eq(CacheType.INDICES_REQUEST_CACHE), any(Map.class))).thenReturn(mockOnHeapCache); @@ -149,6 +124,6 @@ public void testWithCreateCacheWithInvalidStoreNameAssociatedForCacheType() { IllegalArgumentException.class, () -> cacheService.createCache(config, CacheType.INDICES_REQUEST_CACHE) ); - assertEquals("No store name: [opensearch_onheap] is registered for cache type: INDICES_REQUEST_CACHE", ex.getMessage()); + assertEquals("No store name: [cache] is registered for cache type: INDICES_REQUEST_CACHE", ex.getMessage()); } } diff --git a/server/src/test/java/org/opensearch/common/cache/store/OpenSearchOnHeapCacheTests.java b/server/src/test/java/org/opensearch/common/cache/store/OpenSearchOnHeapCacheTests.java index 5a989ad8ab777..e4f74d619a6a3 100644 --- a/server/src/test/java/org/opensearch/common/cache/store/OpenSearchOnHeapCacheTests.java +++ b/server/src/test/java/org/opensearch/common/cache/store/OpenSearchOnHeapCacheTests.java @@ -22,7 +22,6 @@ import org.opensearch.common.cache.store.settings.OpenSearchOnHeapCacheSettings; import org.opensearch.common.metrics.CounterMetric; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.test.OpenSearchTestCase; import java.util.ArrayList; @@ -40,7 +39,7 @@ public void testStats() throws Exception { MockRemovalListener listener = new MockRemovalListener<>(); int maxKeys = between(10, 50); int numEvicted = between(10, 20); - OpenSearchOnHeapCache cache = getCache(maxKeys, listener, true, true); + OpenSearchOnHeapCache cache = getCache(maxKeys, listener, true); // When the pluggable caches setting is on, we should get stats as expected from cache.stats(). @@ -82,49 +81,44 @@ public void testStats() throws Exception { } } - public void testStatsWithoutPluggableCaches() throws Exception { - // When the pluggable caches setting is off, or when we manually set statsTrackingEnabled = false in the config, + public void testWithoutStatsTracking() throws Exception { + // When we manually set statsTrackingEnabled = false in the config, // we should get all-zero stats from cache.stats(), but count() should still work. MockRemovalListener listener = new MockRemovalListener<>(); int maxKeys = between(10, 50); int numEvicted = between(10, 20); - OpenSearchOnHeapCache pluggableCachesOffCache = getCache(maxKeys, listener, false, true); - OpenSearchOnHeapCache manuallySetNoopStatsCache = getCache(maxKeys, listener, true, false); - List> caches = List.of(pluggableCachesOffCache, manuallySetNoopStatsCache); - - for (OpenSearchOnHeapCache cache : caches) { - int numAdded = maxKeys + numEvicted; - for (int i = 0; i < numAdded; i++) { - ICacheKey key = getICacheKey(UUID.randomUUID().toString()); - cache.computeIfAbsent(key, getLoadAwareCacheLoader()); + OpenSearchOnHeapCache manuallySetNoopStatsCache = getCache(maxKeys, listener, false); + int numAdded = maxKeys + numEvicted; + for (int i = 0; i < numAdded; i++) { + ICacheKey key = getICacheKey(UUID.randomUUID().toString()); + manuallySetNoopStatsCache.computeIfAbsent(key, getLoadAwareCacheLoader()); - assertEquals(Math.min(maxKeys, i + 1), cache.count()); - ImmutableCacheStatsHolder stats = cache.stats(); - assertZeroStats(cache.stats()); - } + assertEquals(Math.min(maxKeys, i + 1), manuallySetNoopStatsCache.count()); + ImmutableCacheStatsHolder stats = manuallySetNoopStatsCache.stats(); + assertZeroStats(manuallySetNoopStatsCache.stats()); } } - public void testWithCacheConfigSizeSettings_WhenPluggableCachingOff() { - // The "pluggable caching off" case can happen when the PLUGGABLE_CACHE setting is false, or if the store name is blank. - // The cache should get its size from the config, not the setting, in either case. - Settings.Builder settingsBuilder = Settings.builder().put(FeatureFlags.PLUGGABLE_CACHE, false); + public void testWithCacheConfigSizeSettings_WhenStoreNameBlank() { + // If the store name is blank, the cache should get its size from the config, not the setting. long maxSizeFromSetting = between(1000, 2000); long maxSizeFromConfig = between(3000, 4000); - OpenSearchOnHeapCache onHeapCache = setupMaxSizeTest(settingsBuilder, maxSizeFromSetting, maxSizeFromConfig, true); - assertEquals(maxSizeFromConfig, onHeapCache.getMaximumWeight()); - Settings.Builder storeNameBlankSettingsBuilder = Settings.builder().put(FeatureFlags.PLUGGABLE_CACHE, true); - onHeapCache = setupMaxSizeTest(storeNameBlankSettingsBuilder, maxSizeFromSetting, maxSizeFromConfig, true); + Settings.Builder storeNameBlankSettingsBuilder = Settings.builder(); + OpenSearchOnHeapCache onHeapCache = setupMaxSizeTest( + storeNameBlankSettingsBuilder, + maxSizeFromSetting, + maxSizeFromConfig, + true + ); assertEquals(maxSizeFromConfig, onHeapCache.getMaximumWeight()); } - public void testWithCacheConfigSettings_WhenPluggableCachingOn() { - // When pluggable caching is on, the cache should get its size from the config if present, and otherwise should get it from the + public void testWithCacheConfigSettings_WhenStoreNameNotBlank() { + // When the store name is not blank, the cache should get its size from the config if present, and otherwise should get it from the // setting. Settings.Builder settingsBuilder = Settings.builder() - .put(FeatureFlags.PLUGGABLE_CACHE, true) .put( CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(), OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME @@ -178,7 +172,6 @@ private void assertZeroStats(ImmutableCacheStatsHolder stats) { private OpenSearchOnHeapCache getCache( int maxSizeKeys, MockRemovalListener listener, - boolean pluggableCachesSetting, boolean statsTrackingEnabled ) { ICache.Factory onHeapCacheFactory = new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory(); @@ -189,7 +182,6 @@ private OpenSearchOnHeapCache getCache( .getKey(), maxSizeKeys * keyValueSize + "b" ) - .put(FeatureFlags.PLUGGABLE_CACHE, pluggableCachesSetting) .build(); CacheConfig cacheConfig = new CacheConfig.Builder().setKeyType(String.class) @@ -207,7 +199,7 @@ private OpenSearchOnHeapCache getCache( public void testInvalidateWithDropDimensions() throws Exception { MockRemovalListener listener = new MockRemovalListener<>(); int maxKeys = 50; - OpenSearchOnHeapCache cache = getCache(maxKeys, listener, true, true); + OpenSearchOnHeapCache cache = getCache(maxKeys, listener, true); List> keysAdded = new ArrayList<>(); diff --git a/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java b/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java index d60bbd62bdd13..a60e9b381a8d3 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java @@ -67,7 +67,6 @@ import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.common.bytes.AbstractBytesReference; import org.opensearch.core.common.bytes.BytesReference; @@ -199,58 +198,6 @@ public void testBasicOperationsCache() throws Exception { assertEquals(0, cache.numRegisteredCloseListeners()); } - public void testBasicOperationsCacheWithFeatureFlag() throws Exception { - threadPool = getThreadPool(); - Settings settings = Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.PLUGGABLE_CACHE, "true").build(); - cache = getIndicesRequestCache(settings); - writer.addDocument(newDoc(0, "foo")); - DirectoryReader reader = getReader(writer, indexShard.shardId()); - - // initial cache - IndicesService.IndexShardCacheEntity entity = new IndicesService.IndexShardCacheEntity(indexShard); - Loader loader = new Loader(reader, 0); - BytesReference value = cache.getOrCompute(entity, loader, reader, getTermBytes()); - assertEquals("foo", value.streamInput().readString()); - ShardRequestCache requestCacheStats = indexShard.requestCache(); - assertEquals(0, requestCacheStats.stats().getHitCount()); - assertEquals(1, requestCacheStats.stats().getMissCount()); - assertEquals(0, requestCacheStats.stats().getEvictions()); - assertFalse(loader.loadedFromCache); - assertEquals(1, cache.count()); - - // cache hit - entity = new IndicesService.IndexShardCacheEntity(indexShard); - loader = new Loader(reader, 0); - value = cache.getOrCompute(entity, loader, reader, getTermBytes()); - assertEquals("foo", value.streamInput().readString()); - requestCacheStats = indexShard.requestCache(); - assertEquals(1, requestCacheStats.stats().getHitCount()); - assertEquals(1, requestCacheStats.stats().getMissCount()); - assertEquals(0, requestCacheStats.stats().getEvictions()); - assertTrue(loader.loadedFromCache); - assertEquals(1, cache.count()); - assertTrue(requestCacheStats.stats().getMemorySize().bytesAsInt() > value.length()); - assertEquals(1, cache.numRegisteredCloseListeners()); - - // Closing the cache doesn't modify an already returned CacheEntity - if (randomBoolean()) { - reader.close(); - } else { - indexShard.close("test", true, true); // closed shard but reader is still open - cache.clear(entity); - } - cache.cacheCleanupManager.cleanCache(); - assertEquals(1, requestCacheStats.stats().getHitCount()); - assertEquals(1, requestCacheStats.stats().getMissCount()); - assertEquals(0, requestCacheStats.stats().getEvictions()); - assertTrue(loader.loadedFromCache); - assertEquals(0, cache.count()); - assertEquals(0, requestCacheStats.stats().getMemorySize().bytesAsInt()); - - IOUtils.close(reader); - assertEquals(0, cache.numRegisteredCloseListeners()); - } - public void testCacheDifferentReaders() throws Exception { threadPool = getThreadPool(); cache = getIndicesRequestCache(Settings.EMPTY); @@ -856,8 +803,8 @@ public void testAddingToCleanupKeyToCountMapWorksAppropriatelyWithMultipleThread assertFalse(concurrentModificationExceptionDetected.get()); } - public void testCacheMaxSize_WhenPluggableCachingOff() throws Exception { - // If pluggable caching is off, the IRC should put a max size value into the cache config that it uses to create its cache. + public void testCacheMaxSize_WhenStoreNameAbsent() throws Exception { + // If a store name is absent, the IRC should put a max size value into the cache config that it uses to create its cache. threadPool = getThreadPool(); long cacheSize = 1000; Settings settings = Settings.builder().put(INDICES_CACHE_QUERY_SIZE.getKey(), cacheSize + "b").build(); @@ -871,12 +818,11 @@ public void testCacheMaxSize_WhenPluggableCachingOff() throws Exception { allowDeprecationWarning(); } - public void testCacheMaxSize_WhenPluggableCachingOn() throws Exception { - // If pluggable caching is on, and a store name is present, the IRC should NOT put a max size value into the cache config. + public void testCacheMaxSize_WhenStoreNamePresent() throws Exception { + // If and a store name is present, the IRC should NOT put a max size value into the cache config. threadPool = getThreadPool(); Settings settings = Settings.builder() .put(INDICES_CACHE_QUERY_SIZE.getKey(), 1000 + "b") - .put(FeatureFlags.PLUGGABLE_CACHE, true) .put( CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(), OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME @@ -953,10 +899,7 @@ public void testClosingIndexWipesStats() throws Exception { } threadPool = getThreadPool(); - Settings settings = Settings.builder() - .put(INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING.getKey(), "0.001%") - .put(FeatureFlags.PLUGGABLE_CACHE, true) - .build(); + Settings settings = Settings.builder().put(INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING.getKey(), "0.001%").build(); try (NodeEnvironment env = newNodeEnvironment(settings)) { cache = new IndicesRequestCache(settings, (shardId -> { IndexService indexService = null; From 659e32d8331dc8300170e8b193ca92ca9706d376 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Fri, 7 Mar 2025 20:14:09 -0800 Subject: [PATCH 062/550] Add bwc version 2.19.2 (#17484) Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Signed-off-by: Craig Perkins Signed-off-by: Andrew Ross Co-authored-by: opensearch-ci-bot <83309141+opensearch-ci-bot@users.noreply.github.com> --- .ci/bwcVersions | 1 + README.md | 2 +- libs/core/src/main/java/org/opensearch/Version.java | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 8ddc5e5811d7a..2167875966fb2 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -44,4 +44,5 @@ BWC_VERSION: - "2.18.1" - "2.19.0" - "2.19.1" + - "2.19.2" - "2.20.0" diff --git a/README.md b/README.md index bf9fef148df3e..77b9e09b9e1a7 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ [![Security Vulnerabilities](https://img.shields.io/github/issues/opensearch-project/OpenSearch/security%20vulnerability?labelColor=red)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"security%20vulnerability") [![Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch)](https://github.com/opensearch-project/OpenSearch/issues) [![Open Pull Requests](https://img.shields.io/github/issues-pr/opensearch-project/OpenSearch)](https://github.com/opensearch-project/OpenSearch/pulls) -[![2.19.1 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v2.19.1)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v2.19.1") +[![2.19.2 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v2.19.2)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v2.19.2") [![2.18.1 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v2.18.1)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v2.18.1") [![3.0.0 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v3.0.0)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v3.0.0") [![GHA gradle check](https://github.com/opensearch-project/OpenSearch/actions/workflows/gradle-check.yml/badge.svg)](https://github.com/opensearch-project/OpenSearch/actions/workflows/gradle-check.yml) diff --git a/libs/core/src/main/java/org/opensearch/Version.java b/libs/core/src/main/java/org/opensearch/Version.java index 5adde08a40d54..35bd1f2051b04 100644 --- a/libs/core/src/main/java/org/opensearch/Version.java +++ b/libs/core/src/main/java/org/opensearch/Version.java @@ -115,6 +115,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_18_1 = new Version(2180199, org.apache.lucene.util.Version.LUCENE_9_12_1); public static final Version V_2_19_0 = new Version(2190099, org.apache.lucene.util.Version.LUCENE_9_12_1); public static final Version V_2_19_1 = new Version(2190199, org.apache.lucene.util.Version.LUCENE_9_12_1); + public static final Version V_2_19_2 = new Version(2190299, org.apache.lucene.util.Version.LUCENE_9_12_1); public static final Version V_2_20_0 = new Version(2200099, org.apache.lucene.util.Version.LUCENE_9_12_1); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_10_1_0); public static final Version CURRENT = V_3_0_0; From 9bef705d321d83b37f92292662c80a5e0cb4ba66 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Sat, 8 Mar 2025 19:41:25 -0500 Subject: [PATCH 063/550] Update Gradle to 8.13 (#17345) Signed-off-by: Andriy Redko --- build.gradle | 2 +- gradle/code-coverage.gradle | 2 +- gradle/wrapper/gradle-wrapper.jar | Bin 43583 -> 43705 bytes gradle/wrapper/gradle-wrapper.properties | 4 ++-- gradlew | 5 ++--- 5 files changed, 6 insertions(+), 7 deletions(-) diff --git a/build.gradle b/build.gradle index 679f7b9299248..187574da9e62a 100644 --- a/build.gradle +++ b/build.gradle @@ -721,7 +721,7 @@ subprojects { reporting { reports { testAggregateTestReport(AggregateTestReport) { - testType = TestSuiteType.UNIT_TEST + testSuiteName = "test" } } } diff --git a/gradle/code-coverage.gradle b/gradle/code-coverage.gradle index eb27dd1a76634..1e41f12e1cc48 100644 --- a/gradle/code-coverage.gradle +++ b/gradle/code-coverage.gradle @@ -38,7 +38,7 @@ if (System.getProperty("tests.coverage")) { reporting { reports { testCodeCoverageReport(JacocoCoverageReport) { - testType = TestSuiteType.UNIT_TEST + testSuiteName = "test" } } } diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar index a4b76b9530d66f5e68d973ea569d8e19de379189..9bbc975c742b298b441bfb90dbc124400a3751b9 100644 GIT binary patch delta 34744 zcmXuJV_+R@)3u$(Y~1X)v28cDZQE*`9qyPrXx!Mg8{4+s*nWFo&-eX5|IMs5>pW(< z=OJ4cAZzeZfy=9lI!r-0aXh8xKdlGq)X)o#ON+mC6t7t0WtgR!HN%?__cvdWdtQC< zrFQ;?l@%CxY55`8y(t7?1P_O7(6pv~(~l!kHB;z2evtUsGHzEDL+y4*no%g#AsI~i zJ%SFMv{j__Yaxnn2NtDK+!1XZX`CB}DGMIT{#8(iAk*`?VagyHx&|p8npkmz=-n!f z3D+^yIjP`D&Lfz500rpq#dJE`vM|-N7=`uN0z86BpiMcCOCS^;6CUG4o1I)W{q6Gv z1vZB6+|7An``GNoG7D!xJGJd_Qv(M-kdVdsIJ?CrXFEH^@Ts83}QX}1%P6KQFNz^-=) z<|qo#qmR!Nonr$p*Uu1Jo2c~KLTrvc*Yw%L+`IL}y|kd+t{NCrXaP=7C00CO?=pgp z!fyr#XFfFXO6z2TP5P1W{H_`$PKzUiGtJd!U52%yAJf}~tgXF`1#}@y`cZl9y{J-A zyUA&-X)+^N?W=2Fm_ce2w$C6>YWp7MgXa{7=kwwy9guBx26=MnPpuSt zB4}vo3{qxa+*{^oHxe7;JMNMp>F`iNv>0!MsFtnb+5eEZ$WI z0M9}rA&cgQ^Q8t_ojofiHaKuhvIB{B9I}3`Dsy3vW8ibigX}Kc912|UZ1uhH?RuHU=i&ePe2w%65)nBkHr7Bx5WwMZj%1B53sUEj0bxI( zEbS%WOUw)3-B0`-m0!{mk7Q%={B#7C^Si>C04@P|qm7$Oxn3ki)G_oNQBTh6CN6d_kt@UKx1Ezdo5)J0Gdf@TcW|{ zdz1V?a>zldA7_5*Pjn6kDj|sbUqt-7X z5+oajeC}*6oi~vxZ#Ac&85cYcC$5OKUnYPv$Y~>H@)mnTtALo*>>5&=0QMr5{5?S; zCDF=RI@94n(!~sa`4Y{JLxgcvRqMM&T!}rRd~Kl#_X4Z&85;})o4W*g>?TaAVXSWB zeY#!8qz^hmC6FERsjTnC)1Xu1UPd7_LfuNvuVqF8(}Jfar=T-K9iChEuZi-FH(P%u zzLrjpq|?}8?g1Vnw^&{eqw~QY0f*9c71&*<5#9f5JlhJmG~IuV*8~nEBLr`KrvOvs zkOLdlZ58K?u>1{vAU0CtT>Il<I{Q8#A!lO7#73V&iN13;oV?Hl?N5xDK63)Rp3%5reb&3n5OQ|9H zDpYEI%JQXcrs^o*SCFY~iYf-VM<`7Tl@+kQS3tfR-fyH_JDaz5SYEMU-bTCLQ=JVG ze?ZPcj95Tci|bVvSZk3^enqQ?pIcZn24V=YT{cf-L|P&{-%%^ql$)^Vu~)Ida=h$bZAMQEi$MM|&b zY8;D;aEba_`W^=VdKfttW)h_zjRA&0A^T*tF*%+}TZQCOvFqKUu=xf1Bx@T?&~S(J zopXniA?s%}Q4p9~F(Ty{8wt$l4oHeT(#U6sAu4>Q+~a;}I>0>??v*wfke}0TwPaeE zj3gWtfNlD{jRgy7;S9PS?su5pnobi%Zoe0LVpw%`<)V=yT~Ht_UUXIna4YUa;p=-T4df6^;bz%;@|$F zK;s9#K@9hqZCST!66N0uPB+FT*kq22%ovtJ%<9ArE%hcX^!(Lz;3?kCZ@Ak*MThjTOKU&t+uJdN*6t$;DDmh zFStdHO>r)8L@qO}K@H~7Z);#f6WU{@Icn7Tc^|IZ`;K^ek9eCWdync`kWCt2s%D-k zE$wyPCui$@gJJ9Q`CtixbMF(GiCCbm`ut(~ce-G|Ji|PZ3~DHlG`Asn;skVhnu0r_ zgGbdmfl|er`87x@uYmd8A+!-3V95GE4&_^9N@hp4SC4 zeFU+Z3Ou&G! zlvZy|iHIIX3X2-Yb7YJ#{SYE9lCoixO+}(|u+H@Z6Rz-l1eZ7{I;vk+Y7kP7ev>hG zv|(I<4?N{EXMSvRgUhbQhDoP1&A;SEUGGep8*!@4u)fNbl3%cts<&=m5<5pi7M-HQ zPS#svbXWu2n&m*K6jL#@xm3VSMJxnxve5J6w1qGv`2>5<6F!uzGVHP1A(_xI7CWlX zm6*wpT@dmQ&pAlm`r~T;)>m5HK^H^cM`pCSoh{;-CE43rMkg<;HnZaCHfMq1LoN0S z%%7|$y~&k6wpiY@rsdCY9ZDh%9W6Pf=2^p=;iv-Ah^ACxwK3VmI}SMNneTa9n%biL z#GoojRHxa}R2zOo!G@<8M-B6vNp?)@_>#mYku#pe{O~t?~}1 zE8`)=BstIRk5W*xZw@2=89@ds?eQ~mxzkrA`y<$oR8bmaUw=rE%lFmzHY&aY8?<-N zp1|bb$(XrOMmiYy{pH#)D1GOmv5aj_?waU~*h~s{VZ&H_PhoXYz`C8Pss{ymY_hPG zt{NY&nPMH#FRvwR+T0(Xo2#T6;=oFmRgA9b-HVY72d|~YF+6v$F%sY0 zS#^LF7sTj>Itvyi!~){Hit*~3imOG*Xh51qLz+!W~`vUBVeZZ5&k34SD%Ha%5#aclSzMfoGWjiq9#rl}j zOf*8NY>VN(`W!DxaBgjBzj3oUAVlLY{R}tiZZ0o>K$vwr?+eggZ!q74m2t?lkvm9z zAmL2=W$jQJL>SSrbIOibe734A(K^B8`M@uao!`E$p+9D!rBea8Oxb|p5r3o4##G8K zMr0I9y&`21{@m=Bi+4tTJ-xy(DB_mG$kYv+qw&VBM(A9^wP9;Yo*6{#5tMpfa;m2FC+%l@ zk_cKXg-d&YUIj3(x{)aNwYGYjSHiOQK2K#yWt$vQomhbnF;Qhkxl`+;i{&+t{PrY` zp5r28&|UvmUK|&Jlv>oX4>XE87Zns?fiE6c;VP7BixT*6n}Zsbv$wd{gXyrE&Sd zhRlv!-{%~xv6yNvx@3^@JEa$={&giRpqZG>`{93 zEjM}YI1i6JSx$DJa&NWcl0M;igxX;est*nz=W16zMfJ0#+s{>Eo>bxmCi)m*43hU1 z;FL43I}nWszjSS%*F1UYt^)4?D6&pDEt1(atK(DKY1pAkNMG`a>_ec;KiT z^xMBBZ9i=;!_hNGlYp^uR0FW^lcBrs_c3ZvhcctW4*T^-DD^OU{{hK8yHahyGyCK& zL0>f0XW|wvi4f`bNTfO+P*Ao^L@8~ezagtl%l z{(2uo71sT3rKTQ-L#Y5Rsy#x)Eo+HQranZmk;r_Hf7WWkRq&QmP{?}do0X=;3U_UYspffJl7v*Y&GnW;M7$C-5ZlL*MU|q*6`Lvx$g^ z6>MRgOZ>~=OyR3>WL0pgh2_ znG)RNd_;ufNwgQ9L6U@`!5=xjzpK_UfYftHOJ)|hrycrpgn-sCKdQ{BY&OEV3`roT|=4I#PT@q`6Lx=Lem2M&k4ghOSjXPH5<%cDd>`!rE} z5;hyRQ|6o>*}@SFEzb7b%5iY}9vOMRGpIQqt%%m)iSpQ@iSAU+A{CmB^&-04fQlV9 z14~oE=?j{b{xE*X^1H)eezKTE27;-=UfNvQZ0kZ+m76{6xqAyTrEB&Oe`Mx{4N;}5 zXp%ojp}JYx6PE}Z`IBO3qWsZEfVPa4EEz0vnsFNkQ!kG8tcec&)k$+s&XmPErROoNxeTh9fATBk)w1g|9*~&S!%r0u6+FTn}dK-qa7cfK~tkJlV zMi{BX!>lQsZhSQUWAf(M6+McPrv>)j<*T&hC!*?qq{@ABJWX z@!~2Y1rhy*Z|x`DZUBuyayz}Kv5Pzrh}1wiHT{9|fh`Wl%ao=lRSwEFl*wy6BZ%vo zrt9Ocbicd1q$a{F6`4#ZQ6vJa@`}IGz+xUr*=6TF^GR?`u{1to&gqJpwf$LN0?G&! zsLNiG+}M+c{*j-Q4I zO!=lj&~{29Os}hgEv`iJ1tU)dx}=ob>DHSHKX|FVu2Y#pO|SsigHRgg4?!FX2>b3W z`m}xI<#_02adGka0TuAIg89kS?>*lKyI)T)Pa)|12XfH;k9}#=dzH6TiciCNO->e9m>!W)l&4B zd74@>_LL9OuJ&v5e0)l7ME@xW)9K@*LUd1RY}Vs_${3YC%+LfSR^H+I=(7Szh2nKB z_8bMoty|M+k9A|hGURVePvMf0XY9NYOiC@h^MLs-X@(8PV4zI7A155!RnZrBE9R1> zuI4E`=JTxyJ#d`!(9_s?T2jxEM*E`){wGI`DBFIz%ouW`Y0cKDfXAGN{};aMpLRvZ zu`PZ-3(+Tsh?UKAr)TQQ;2Jz(kv8{R#!c9Tyeev55@5@Ng*c4-ZQ6vC?o#5>6{;?gVfAIr-+^g>3b$}13U^~?gce6s6k-4ulnzWlFpq}*)2 zd0!wP{2>3U+zYiPaNr+-6O`J;M2Cb`H5hjDXw(1oKK!?dN#Y~ygl{H2|9$( zVg7`gf9*O%Db^Bm6_d808Q!r%K;IUSa(r^hW`w)~)m<)kJ(>{IbCs-LkKJ5Qk~Ujv z|5`OBU>lb7(1IAMvx%~sj+&>%6+_-Pj&OOMzMrkXW}gMmCPOw5zddR}{r9blK&1(w z^6?`m=qMI=B*p~LklFLvlX{LflRXecS#lV$LVwi$+9F8zyE29LgL> zW6R-6z&3x-zL({$nMnbhu|plRO8S_EavN?EKrr+c&Tt;Mk)NC0e|cvyXk%VKb5VIc z;|DN^5)t^}tr&-2q)SbwrF>=k$moYK;yA{Q1!I940KmPvg_Ogb81w$_)i3FgFWG+MS?k=BpkVGk-bRhBF;xJ}wnGN{)?gbry^3=P1@$k^#z9*@tmmB+TZ|L@3#3Z+x z8hJE({GEeEWj#+MnUSN^~c!=G+yW^j=cfN_0!}%(J-f1`G}w^}xi!T8BJDOCri{mGBU? zsKXxeN*=L#<-p_aj6cHtYWMJ+;F`HLeW5cpmeVAhFfy+Y=0rIqqyJ-NRIu-aE*Mvr zVnC-RDR`d1nnQu|^S79I>%9=bPNx1JLOJnB**Y`2WCq zctq<)Cq2^Z%=$*&;QxX30;642;y+=mlMLec6{KA208FQ~_S&tiFQW zp2{C3nyrmgkh+HRmG+$_y19m~0z~b`Mo+m6)Qq82p5)Z6ePn&B=!*twk7Rz%zzm-R z>Qj!PE3XMBY)N-xO(=VpO6=Cky5kpl}fQztM7QzvG#a}5$>2$f5w|}b8=3E)cNQw<%e1xAEwaRHu zhHCGB4Uzs6x3A=7uUBC0({&iNH{!7JgQHVa+ zKfQItwD}sd;587x?M_hzpR|TKtTH^4{`G7*87o_wJrFlmrEjk=jvA z6xBPKYjFB9{0Sj0rBL-z9BuBY_3c||UjVgv2kqw2m<@4#>zfx&8Uhq8u+)q68y+P~ zLT;>P#tv|UD62Nvl`H+UVUXPoFG3>Wt-!sX*=4{XxV|GSC+alg10pP~VaA>^}sRr1I4~ zffa2?H+84k=_w8oc8CQ4Ak-bhjCJIsbX{NQ1Xsi*Ad{!x=^8D6kYup?i~Kr;o`d=$ z*xal=(NL$A?w8d;U8P=`Q;4mh?g@>aqpU}kg5rnx7TExzfX4E=ozb0kFcyc?>p6P# z5=t~3MDR*d{BLI~7ZZG&APgBa4B&r^(9lJO!tGxM7=ng?Py&aN;erj&h``@-V8OA> z=sQ4diM!6K=su^WMbU@R%Tj@%jT5prt8I39 zd3t`Tcw$2G!3;f!#<>>SQ<>g6}Q{xB|sx_%QKm2`NxN|Zl%?Ck6Lu_EMC?*eRxdgS!3zYU#OnO~0&UFei zmP3k9!70^O24j5;G-fH6%T}X{EdO(%*+7ThlNGAh;l?$&{eZ-l`j281o@47x+6Z*DC`R2CkPo{1Behvlt!4${0Q?fBx)iIw$Ky zI#xvxKs1U`uMgeZg5fD>s5AYH*n=+UaRzS?ogn6WwBPK3Gib5@Jj!sZN^tm>M&*r@ zjbBoF7uXJU2MW~JK3%Xa3R}3zsP7qHEqbnC%eKsJ51+% zVAT-eRHwD)0YlfK2&rN549*};CJ8I;dj8rD^PR(>#n?Jccsqx&wF#We;Auv9Vm%-} z3HjpBGp$t5^S$XhJmYAP0q_qM@^#D}NM1FmCCyo;F|wv3_ci@$MA<3An0Aa|>_M&S z%qGjO@w{NI$VKyDF@w5W*6XK~5S`S$@ABWh@uaFIBq~VqOl99dhS}?}3N#JizIfYYt`ZKK0i_e#E;P0)VXh-V!w+qX%^-I0^ok>HAm5)tbBZlYov@XkUL zU}l}NDq{%pc=rmBC>Xi>Y5j9N2WrO58FxmLTZ=$@Fn3>(8~6sbkJ;;Uw!F8zXNoF@ zpW;OS^aL|+aN@xwRNj^&9iX;XxRUuPo`ti>k3Hi3cugt`C(EwuQ&d2lyfO` ze!0fi{eHhU1yN+o%J22|{prPvPOs1S?1eUuGUkR zmzMlCXZtW)ABWasAn53}?BqtPMJ*g>L1i6{$HmoEb@h(kILnMp(2!H!rG?MNH`1V0 zotb`;u#Yz0BZrT1ffVTCV!?{L^z8q11_21ptR0ITbOcaZ!mlWhC_AZb>?2IDV|b_y z9lVt3)0d@W=lNp1ArE;h_;DDQX^_;WtsSIO<;Ly&(#O~Xw$R0~W|xdQk*Y(b2=vLV zt8HX8=;#;$=y}!;Qku2HJbGEzF`2_~&i$&ogHUe5vhx}FLR}K_Mp)J{n*Va2<|pk$ z4tI(7v3A%Z7Z0|ZWw#7%$U#*mv+`Ujlh^N(t63xFt_%*WoJ^oq!U0j+Bx`<>q!J&0sWy4&{@#*BOr-s ztZ68f;l0UT3wf@RRC}_ufMr6rQ69Woa@1sZ50Ww|{yfp8!7rMOh_POTE;|zamq+4OObJ-VeTK|D|h?mfR$^lA{E7pk8DRDz*j&r<&fR>GaG*d zYaJ*q5#n251XIpR6F1o-w>LZ)Cb6Ma^6tCfcOItn1o;$#H?^jqOd(PA)B3HaTlJK zw!~?nh-v-_WBi5*B=IuTZOX2sa{1I!#%VMd5eGe1VcL6 zQ!aDft}>TjlwzEJ9Kr6MWh1MoNNWr$5_?z9BJ=>^_M59+CGj=}Ln)NrZ;Fja%!0oU zAg07?Nw&^fIc9udtYSulVBb-USUpElN!VfpJc>kPV`>B3S$7`SO$B21eH8mymldT} zxRNhSd-uFb&1$^B)%$-O(C$#Ug&+KvM;E9xA=CE*?PIa5wDF_ibV2lMo(Zygl8QK5 zPgH1R(6)1XT9GZ6^ol$p>4UH@5-KV66NF$AH-qOb>-b~+*7)DYsUe&Is0yTx=pn8N zs&2Z4fZ1Wk=dz>AXIfd%>ad=rb-Womi{nVVTfd26+mCx`6ukuQ?gjAROtw&Tuo&w$|&=rEzNzwpuy0 zsqq)r5`=Mst4=HCtEV^^8%+Dv2x+_}4v7qEXSjKf%dOhGh~(FDkBW<~+z&*#4T>r@ z>i7T5TGc96MfD%hr~nK9!%r{Ns9=7fui)N%GN8MvuIrox)(0nNg2{McUIC6nq>dD+ zNvX69vvf=Pw1@x}^K{@%UCL734;&AVta#($&l2E|*VUaKW@h`X*L*;1Kl4tajl}GQ z$K>;*$3y1(<^32Cg8ugi^ZII=I&ina>q@GC&~gQ#Z88(nOj;*j z1{hyEq|R_0v7LZNKB|3jqZPqZOuUG(SuM^Z>0@mzsKqVbRrkTz#TRZ0sTQ|%XiYcE zEE5{9jEB+2Sdga|veYSFZEzOuepHGusAO#pg&R(%Ob@V0Lw;AfQJ{aLUJxnbe`q(m zadg^fXYiWr+mm2akb*J?y`w(!KAL8OfFD!mVWiWrgScgp9^yoh3lNNUxd?YyvgUL z>+!2VXP7Fzq zYQ?(9-r*?N*cJCK&)pbYzuv%R{b;TB_wC1V3nO#12V0ucgp);>!N=;G=l;({KZF>) zNAo=0m|3Zu*PNLa-2v=3r5>-hVI_xYdz0m*f-zUW_=eDqiM3j4MPnS~eIRNdw466? z)yxHI@6d7gL2Qj<_@72W{GDyINBy%X6X&_cF1(##v^}87YGZ87HgfH$&epf>Jlia4 zw53K1M6=Px@YCVTUk!%_MjyBeaWy7c40i47-3B{voi|&|7aXza!(OB~E)U;f>5Wd3&@#UP~gkM*qmK=aeZ zkP}gn%JmKK34}KdEu)4E2~qN)EnAhj>)4dbq&RbLu$BD&kJSoIvr$3A#S%P~l$l1A z!96hNdtFXsta!b+enJ@G;6rv-Rd=IQ_llL#tSGk-mpQi(mhop;lObiTQIARXw~&d> zVuCSG$T&zi?#&PT-fP)`*-d@gc;+tOPDaUA*6>RIrf67& zpZ<1ie#4rJ3HEu>v7sF={4;oXv?_MwEI-^o-Lr@rW%%cd0TR2q`p=rkMOKYzOs&^$ z=xW*e)6p-B(0Ek7w8+!@Cks9>$_#zi44MLyL9X?{sDlihX%V;$%a;wd&RL*XGcb$` zvU}#qxz8wAT)*NQ+lXO>AI`^r7B&IQ3J&{cVNn0aWa)(!fQtV+mm~`vsH24+xI|q{ z4ce$OB1hrqGLn;H#=~Rx%T#b|hN`d6SXt=;Jd=DNX3LO9R8xLX@6p3>SnZO7M+96a z1s=zJKd%qy0#GWLeFgc~?fsCw^$6lG;B*54&@n#>q$#nRSr?2GA4YaSSl5~B2k}R_ zfJE-$C~{O_6Rh6BJbWFuoaeXEI!Q-YSA9EvSG_sjB~-*hf_PM~mJ6BL+IcaF)8$+; z*4A4W&+_Mn6~tF|M8Sz57BxO=W9ZJrNPtdhME>$sS6)etinxj{YkK){@Q${`Vc~dX zLT4UYjwuC>dH8AAjQb{Ji>eMvJ5rH-4a(K{4EyLrCDtta)u#>`V_AvyS?Y(;FRT8L ze`JXZP4s~Quq$m=6NI@}`( z`>o3kbSApxcHP;1Mds3&41!_0r619~@AQr9TW*Swk`Q1JNmIk%nKm(ZbZMHEi z4n%vC0MuAKNz2njKLk~w|6u!|y7FN!SXk5=7>^^p-R4w7R;~G!v<{>H3%SC-?>8jAP&ka=owuQ$sKwU4e8EVyc6V2IpBR56HthbwJ*XdwnwrW4 zcR7oGg7kCmj(q{#ka1d85mRVIo0`1v3+B--4RXv$hGb545y#j7bmu0*>BLnTRZ+mp z29%AP8Id+57Q(6`ep^<tq}GO1dvJ*8~jxjiH0quR*Poy%N3@c8rhlO6YR@LBk%l zux{&bK~LvKYq%d;Tzl|VS=?rkBUD-j$YY-xX)z`zUfH^&($ZYco(Xc1tr|9rwx}=- zk`E2Wwkh*HIVsWej-nJ6HNH)7rWDlB0@`{QG*0)&P+~Ng{m^kG#J*^p`drM(`dnd& z9$U+FH=rXh2py-N$l_0)@|JY;X1hVL`@}qxNi@Zy5hI)@(af%=1cl~L3{fxZWys9G-hLv z*%jvhoba^ePB8YL)`%d%=t6Yh*c5p1S7`+BPjOD*#q4~gv#bn0wOaf_K0SiGC{jp8 zAc_Vk31hKTSUiEU7XNk7`D}S-RUrYb<7%)k+tV0zZ7(}vQN@0C5EI<=$$qW}m7f7I zk>dMLd+kSjN4{OaxBJ^_h?FayJ`Yr)3eC$jdk1@jEzVT=a?{BSjp?&?qPX=xO!ttw zN_s#<#Ve(0i_|cRa=MC2=8MonmoT5)UtF&Wr9-b2ng>>zv{8$*UcIBIXSZ3)x727q zy{r>bdOh?E;ZI(^io=P3`o*tLdsjkjM!rGae!v5QH<3-OBW(XcRhvM!(b)Yas?oK? z$5)Y*YS^_d9H-ZP^_iVooK6EE1(akYvmNkXQGH1`kXg()p94|_F8B@_ABt*7QTmYk z47RyNSjX8nMW&@VZIQ`1WB%-*W4oN#|M}EKDCC_@HQ9!BenOQ{0{i#>IaQkyU-HOT z#8ueeQdKezCP`+p0{|o?!axX6WB@{OJTR;qfs(;uKp@Kjq4Dr)^>R9T+^$ohEYKB= zQx_P+t?e3z}3#W ztf10?br2MbSVn%*3!j2QFu;=K)-ueTmgyYq;%9HjJL_W=dV$#21FIjyv}d3@oIy+c z?IcrTw17F6oYGMQA=66yCh`48DJb}^Q?8r3Lei%QJ!qpxnt5`aP%aJL9ltY7#;qzq)qdoGzpYx=gz7Lz$JJZ4?^Nr`!1MK@k z47M)#_%Bezu?xD<{tFcQ{{@OiDQRGst}MJJdOtp%(wvCymmU}NKvIK%z%RysueJ$h zMe(J;-iblcWW>90Ptma{$`%AUZi8_y>pQy*1GpoiiS>`GK9%)TGXC!$FDO5REO0l^ z&lv``tj^Y#F@DP6&qSkCYO-b8O*XVx^8O@0D}Wv-tbz7`pYOlCS4pVmi!~|4dv-5i^8laoUpk zxH@-rdRED~DyWrZO2290e;bISH8z$=kcmp_ct)+edl012<`vnqx}D^FD$twK8)RpVW@yMvk8CRc&d*ku^a#%~2|u>f%{up2Q6x9Mdt&e&@t?_bEXURy{+@>{ zJjDZB-f~7aGc%-QXc7g4fF1tUfP-hsa@qS*#N2_g3675xMqbzyQnC~pK_jH^3k}w%a6jCW!C?MU zo{9eUxt*=#6(neNmoNf#hiRNdGBu|Q(@9s7|H`J*IMWuCEyE4;3IJtKS-n7f+C1=O z89gY4%6N}DeX%EYz8B!^9f5Sf8V2S}yTJ>r+}=RsLXtADv|&$w!dxTz4oSIuz=8S> ze%G>2|5coCh@K)cA(h6O>kRSfAQt>H_fE#}H@p)v`Tw>aulOfNhyS)7=rI4b9Co$DH=Jd$I?iu%Tq!e%aPW7DXN#iTjDG0TqkpLrhBBzR8`k zD7XbvwV1f*5U7kBxrIxHO}NcgSmCK*P*zt<4FpS5V5@~j2g+wGN-WtIbV``U0-3X< z(0T||f@~2Ebo3UuxzrdG=FuH~6+|7!VsYU$0Z;OEL^Mr^S^zSSbYwE3A~U-vOJDyUDUStXfD%K9;#`BD_z>Zb zYj83mc+8KTgEK6`Y;^Q6ku|@W3|m*M55gt8^^WdrxGslExn_2O8$_a0M&&_Be0KPA zDd|?nYAOvUkTJUXZ7l2Ml&#rK04@AJabu&@g=pIr~b;eo^(8BT(?FunH$AF3j*ZiHB%C({8I)tTa3VRkn) z=9uW|9))}J#GUqRh<&w4yL15QpK%2bM)-YYq2tcqZmh#_)@tYAn7$!Z+6(FhAPs2p z^%a8A6xo5O-hgk)a=r7#iC9Sn=%vgrQsl}WCq)N+4q*=_VT+ac3I+*3lJQ&#epf@`!?G!7S(!aZGWqpGk8(*`ig}*V&iyhzH;xtxA$y_N z>)-lw)z%-mcQ3s#`hcb*fp;U`yikM&{Z0^!k1?*j(d(dK9Vw#6o;HRAhEj6!& zxJ$%z@#hubu+iCATwZBgyl$DO;-%^6*lhP|m`wV*S9e%1oP-d7}LFzNb-nbg&b zLeV~*+>vogxCnjjqMaj6y1jn;s7GQLf{ZSY20O#1YGg;yjg-{KM81iL;0{|;LN@@* z6ST#KrKAJTzEMTb{1d?&eNzE47+;ZFtJ8pB_U~EkOk=`-6MB) zTaU^zm3`7P2kZ;D_=u#Q2t;SHzo8P1xqM5!?7^WSE#u5XoolRV{Q}doTaC)1S08Zy7GJ?pd&8Jjw z`*_`ev(<+Ra2R&CQf7cb97~c^x3voFRhQSEV_1pF(I!QUWEkUh<2Uq?3Cz9FxIKeB|n?CuVkX7tAhr<4Ej#%Cq?uB5e^<(Tu{>54T z!(6b8DmhS=>>S)e9h|J%5}ljxfXIRDVa(%*0*xTQ{+ zUjroY*#_U^>b1Teuc$T-egClH97?IE<0#OhF0Y9ByTKPxej00P`|jMJVCqxQ>44F0 z6StS1JT#Ng(}>CWNb0uNM*qkV5JF(s$Hm`S`+O2LRS#bpUMgwU)x`e2u1#H8woa1YGZIsxydK5$JP$cfI67I1 zBE?jjeY6QO_arp9gg1v9k)(iTssRJl7=WdW!5$tkQ-3&w4c|W=|Bh|HOKy{C>%J3@ zZ|8r+H6nd{{iLE~*`b<}mmrmA{8WRDdlJ%rL%W#To}q01jQ%5ZNy@MC_fzCo_!q8x zb46H1v;|CrZ;mdn-6=g>sqK$5H<)H5rH0*n+c!YnE5YQcu{wHPyVztNP`)K`bv3XO ziFeTQst%KJAd9G3SLmUQ|V9fRRc;+ zPd%sGo1p@XsJh&z8?psQ1@NnY|!@p3%Mm9gi!S*yNThSTSi>xCoEGLx%T*dPC_ zK3J4iwp-OZ&1%b#}32cNRbgvhDTdd7->2vcnO3Mt%o zR22P|KlOg^Lw}@|mzlgUh+KF7hZA-R_k=AFARuTl!02E$Fun#45CtF|+z(y&M--)~ zkX(>sZe#6y_I>oP0}9KH=o`);bPVMO1Tg8k$trp`n2F7Ga^3Z^)#GsOamw&Zg{k!R z#))|f#dP=GU6 zM#KYRBI_eOICiiDR%oBa@n|ggpZJs>v7kQ|)(*x)4xxl6;d76Fl^)QGde*sDZnRit zpWm`UgACR9MH}@~KMp!Y^x#))Vw2>dEk%BKQY#ne{MWqyu__rdoOP0@hS7`G*TR#L zKP;$iLuM2_a){&S^B&D>F@2K;u0F-emkql27M7pe;`+bWflrlI6l9i)&m!9 zKWFwavy<&Bo0Kl4Wl3ARX|f3|khWV=npfMjo3u0yW&5B^b|=Zw-JP&I+cv0p1uCG| z3tkm1a=nURe4rq`*qB%GQMYwPaSWuNfK$rL>_?LeS`IYFZsza~WVW>x%gOxnvRx z*+DI|8n1eKAd%MfOd>si)x&xwi?gu4uHlk~b)mR^xaN%tF_YS3`PXTOwZ^2D9%$Urcby(HWpXn)Q`l!( z7~B_`-0v|36B}x;VwyL(+LqL^S(#KO-+*rJ%orw!fW>yhrco2DwP|GaST2(=ha0EE zZ19qo=BQLbbD5T&9aev)`AlY7yEtL0B7+0ZSiPda4nN~5m_3M9g@G++9U}U;kH`MO+ zQay!Ks-p(j%H||tGzyxHJ2i6Z)>qJ43K#WK*pcaSCRz9rhJS8)X|qkVTTAI)+G?-CUhe%3*J+vM3T=l2Gz?`71c#Z>vkG;A zuZ%vF)I?Bave3%9GUt}zq?{3V&`zQGE16cF8xc#K9>L^p+u?0-go3_WdI?oXJm@Ps6m_FK9%;;epp{iCXIh1z3D?~<4AhPkZ^c-4Z}mO zp@Sa4T#L5>h5BGOn|LS(TA@KB1^r67<@Qp!Vz2yF573JoDBug@iPQ=tr2+7*HcE3(5`Q%{A2 zp%psJG}nJ3lQR>^#z-QI>~|DG_2_261`HHDVmM&*2h2e|uG(OXl?228C|G32{9e%Onc=sVwIVZ=g2{K5s0>v2}V&CZi1_2LA=x)v|&YrWGaH zEe3L=lw}aSiEdWu&2-C5U0O~MpQ2Hj-U8)KQrLg0Wd|XyOt&Gc+g8oC4%@84Q6i;~ zUD^(7ILW`xAcSq1{tW_H3V};43Qpy=%}6HgWDX*C(mPbTgZ`b#A1n`J`|P_^ zx}DxFYEfhc*9DOGsB|m6m#OKsf?;{9-fv{=aPG1$)qI2n`vZ(R8tkySy+d9K1lag&7%F>R(e|_M^wtOmO}n{57Qw z_vv`gm^%s{UN#wnolnujDm_G>W|Bf7g-(AmgR@NtZ2eh!Qb2zWnb$~{NW1qO zOTcT2Y7?BIUmW`dIxST86w{i29$%&}BAXT16@Jl@frJ+a&w-axF1}39sPrZJ3aEbt zugKOG^x537N}*?=(nLD0AKlRpFN5+rz4Uc@PUz|z!k0T|Q|Gq?$bX?pHPS7GG|tpo z&U5}*Zofm%3vR!Q0%370n6-F)0oiLg>VhceaHsY}R>WW2OFytn+z*ke3mBmT0^!HS z{?Ov5rHI*)$%ugasY*W+rL!Vtq)mS`qS@{Gu$O)=8mc?!f0)jjE=p@Ik&KJ_`%4rb z1i-IUdQr3{Zqa|IQA0yz#h--?B>gS@PLTLt6F=3=v*e6s_6w`a%Y2=WmZ&nvqvZtioX0@ykkZ- zm~1cDi>knLm|k~oI5N*eLWoQ&$b|xXCok~ue6B1u&ZPh{SE*bray2(AeBLZMQN#*k zfT&{(5Tr1M2FFltdRtjY)3bk;{gPbHOBtiZ9gNYUs+?A3#)#p@AuY)y3dz(8Dk?cL zCoks}DlcP97juU)dKR8D(GN~9{-WS|ImophC>G;}QVazzTZ6^z91{5<+mRYFhrQeg z|Kn=LOySHXZqU8F1`dXWOJ?NViPE%&FB1@$8!ntuI?)geXh|#JJC1+G^n$h4F)g-P z4WJMPQn{p=fQtw0)}uk;u*&O2z+G5?iW_=1kTy(!AJzj}de{a9WHY+*SqJ7`={VTi)3NK|)*W3PUT#5a$D6oyqH%5zjdO$5 zICHx_V;1Z)4A(rT6aasvZ{{r`HnxK7^fMLS1{;H{o<8j5hz*F@WkKQmDI*Q%Kf$Mo!EpQ)=HV^lsj9KSz->ROVIrXAI0!Q?WUosf8t6CR*rl382^sU3q@($L~E zC(AoyIjS&2(el|I$ za*8oAtqGQs+O~huhBCOFw(^b&bol)FWsp15Sra3v%&#wXz*!kSi!sV>mhe(I=_Zxmz&E1>i6=yB*_X4M#ktdNg7_G}MVRGQ z7^zX=+mQ}1xtg7JN9E(QI&?4}=tP2#z2<7N%zf9rxzynL~!MgNpRvXaU69c*^X2(c?$=h&o~Fvv z06*{JdsM!gF$KALcW(}@Q&Alo`@3h!H3j^@5rFMp8l6-q!cb?1iS$oZfU+}A2< z)&2ZoL34kkSnbf=4>qd%guV7zM1p=amds@nhpkK7mRJlb?9zYI&?4ftd8+RvAYdk~CGE?#q!Bv= zbv1U(iVppMjz8~#Q+|Qzg4qLZ`D&RlZDh_GOr@SyE+h)n%I=lThPD;HsPfbNCEF{k zD;(61l99D=ufxyqS5%Vut1xOqGImJeufdwBLvf7pUVhHb`8`+K+G9 z>llAJ&Yz^XE0;ErC#SR#-@%O3X5^A_t2Kyaba-4~$hvC_#EaAd{YEAr)E*E92q=tk zV;;C}>B}0)oT=NEeZjg^LHx}p zic<&Fy$hApNZFROZbBJ@g_Jp>@Gn*Vg{XhVs!-LSmQL#^6Bh-iT+7Dn)vRT+0ti(1 zYyOQu{Vmgyvx3Tuxk5HG!x2a+(#>q7#Xji%f&ZxT@A*$m8~z`DDl?{&1=gKHThhqt zSBmSpx#kQc$Dh6W76k!dHlhS6V2(R4jj!#3(W?oQfEJB+-dxZOV?gj++sK_7-?qEM1^V z=Sxex)M5X+P{^{c^h3!k*jCU>7pYQ}gsEf>>V^n1+ji40tL#-AxLjHx42bchIx9Z< zz`>51CG4Iboc%m0DAfvd3@b}vv4%oRoYZpZ*dW?+yTcduQlxreAz&6V(Tac9Xw3_` zNotT9g&r{F_{!Xb%hDPJqn`CWqDwai4M@7F4CQ?@C{H~rqxXwD(MFpB4!uljQmH~( zTXJJj3MEVHkt7r8!^R;bp!H=&%-OG&ONKIOgLJtng(VD0u9%2LuXKe7h$?9lQ^#cL zOo}gOx^+ixt2Izmb6{J`u0VexU0j}8Is+?LWLGvQ66Pg0ax4n^G+xW-rwp&fIZ0}l zI?y~wn^6o3{jj*VSEQ}tBVn1#sVTQB(l&Gf(sriC0DKR8#{);Sgb5%k`%l#BfM#W| zfN5C8APnl5w%nrNi{BWrDgudYAZLGEQKTzz^rV(Bst!UI7|8?nB_w}@?_pYX_G?9i zgK?yo0}({MC^6DiO!bB88kijN>+BCQ8v!rg{Y zz$`Hf$tB*WdxSPHMMkJ{&p0(l zyXx|^X_VUQBdh9)?_2P1TViiYqy+91$zg%3%OjzWyY=X^f7I)2-34bDVCEhECAi z^YqS9x@(kD(Bto;VDKfgIo z-)s_q)d2mr4O;DTUTgjOe4f51kd6T9`xa6_AUP*N{jz%!Z0E!Dqq}JlfPZ2EyGN*E zoPHJ^rT;z^0vaI03Z(WcdHTh1suHxs?;>yWLj~GlkAQ#jSWq|nUE}m()bBZ1`Rh^o zO`d+Ar$33kry+En{&JjrML}&gUj3pUFE58(t|p~g@k3p&-uvoFzpGktUMnQ6RxDA& zibYl_A!{@9au^_fB@6;1XHLORS}C(Hi&J8=@>Kw66&QJD@w>_I1XJuBW3_vn?f~bb zTv3_J^W1+E?921QNo!MQiLHISD9?+dP0BsAK+yB?l009uXXMOteoGX;?5I|RG_v#B zf~l?TPy3zGkT`N>WlZRa=k7Vdbz-66IQ979fX!i7Wen@lu-oEcweu$76ZXrc&JWRf z!tLRg2JqNG{;`-H@L` zKHfgY-Lve@vsPT7B0@716|Z$Z-Z{!WV;qGHV!`h!S>b)rZpc`9J))^79ey;7@-=zZ zjys+j=U6maKhDddqZ}XQffIbFYn)R657nRGEG#j`M-Gni4deWVXcr=HoNok4SKTPT zIW&LDw*WrceS&Wj^l1|q_VHWu{Pt**e2;MKxqf%Gt#e^JAKy{jQz4T)LUa6XN40EO zCKLskF@9&B?+PnEe(xB+KN|M<@$&ZP{jM;DemSl!tAG2{Iisge|}6`>*BENm!G2E!s_XsaUit2`a&pfn!ggt)wG<~No zFFD~p(1PRvhIRZaPhi})MXmEm6+(X?Aw+GxB}7gAxHKo)H7d=m&r6ljuG2KX{&D9A zNUe9Q=^7yych#S!-Q!YKbbka8)p==Am-8`N5_Qz~j7dxLQeaeCHYTma$)Fy}ORKS4 z5sf%}(j`4U=~Aq(!-|ZRRXvQijeGJ^%cq3itmW;FI)JsU8k4pNmCazDyH9@=bqwS9 zq)y8?KhH}MpVTd^>?u+Cs!&l|6KH<*pikOqr$wK%YZ7(>z%vWLb^+m&cCQ+h_MDo+ zaXmPW7CD|K$-d&cg$&GVPEi#)hPjGYx|SBxatca)&Ig?*6~uiQKE)tF7l+ci4JvbZ>vQo}1mB?m;{w?j6>1xBD9F+2p#Y zP3U>vfnMicQVHdhK1yDCfacJHG?$*GdGs93XO$LkB~?nFAfNOoRY`xRs9JiG7CM&D zd5!=ra;zY~qn6HhG|^&58(rYoNlP4qwA7KN3mvymz;PR0%5d!IoDF1vxVxNS5wG&fEt`JYIGi>i=Fq;YUc>8aXv_wIKNAm zI$xs8oUc$5M((w)<+NMQ6{7X7iz)2tqz$eebh#@<&91|=(KSq0xZX>fTn|!v{~LlTjaOXR{3kxDZfD5rHpl>gbmAU z@|wOa$t%grx`7}nA|ePPsN0Y)k&2=Mc4?uE@gW0-f>S_2bO;VnKt&W3k$KKdvZh@& z*WWKa@7#~`b#Kuyw9kqd zj%CMuQ9ESPc-)MbM#7}YUL)ZP_L{+siDWcU?e8%n3A4VsFYJpNeLjn2bT>CI3NCJ< zwecm{{XNM@ga#75hHnwEW-M&QOfzo9!Zfi7EH$DX3S}9p>0NY#8jZt#!W_KUc?R>k@Ky-w6=+Da+_s0GJldl zF|P?(31@{B7bweeajQGYky;y%9NZK$oyN7RTWNn&2`?k9Jytjwmk||M(3Z!M&NOYw zT}t~sPOp`iw~(CAw<+U2uUl%xEN7WOyk@N3`M9ikM-q9|HZC|6CJ8jAUA zst!H<<<&6(6Zvbpj!BrzUo!>VHN3A3vo$EF5-6b1Q~ajXENB~lhUA@|>x6=N0u#cf zv&w(qgG`^+5=HoNur`2lvR~b&P zjumO|P8X;=d`c+z1YJlY7&H@Dz-Rts$X0IYE9kSIlqGZ7utSx^+ z2hOEC-eXviWZXQ9;$Va+WlHlU%y|f~w(|)o@(5J0o|3MQ2O@+B<@r*H4*65)(r^JT zq+<*b06XMGclsEElst5dEfFJ;AQfYhRt}O0CVKdGh4Tk3-(^-{kukZb*3oM$ZffpG zMs;jtk2ZjAsn%mND4R~OS73JDbj^Q440{oS&4<@VUYMInc0xxy?FE@$J_^n)b|gY+ zOj;8Pk^)6$w9nbnMms3RSr6q(9wP_)v01|=P}UbkXoS_1#FCl?>&9cjCHOS!yEJqiGd`83Nj00{X6dHFN84%)I^*MZ=*Ihw5FxD0YSJHV{j!9v(DT#k7##q~$ z87Dig!k3EiMO;k|9XhYz8cGVPukGe$N5@yNtQgngIs(U-9QZ2c^1uxg$A}#co1|!Z zzB|+=CrR6lxT%N&|8??u1*Z?CRaGbp6;&#}$uQEzu(M6Tdss;dZl=hPN*%ZG@^9f* zig-F9Wi2cjmjWEC+i?dU`nP`xymRwO$9K3IY`|SvRL^9Jg6|TlJNEL9me$rRD1MJ| z>27?VB1%1i)w5-V-5-nCMyMszfCx0@xjILKpFhA4*}fl9HYZ~jTYYU@{12DS2OXo0 z_u+ot_~UfZNaN>@w4Es$Ye>i&qhgqtxJf9xi6El-@UNPeQ>aXcYVxOUA--x3v1 z3e=7+%#m@}QuMTjN3n--=-{@rNtyYdYS@LJ(G?*np*HILbUeo)+l8N#+F-;^(8w>i z8Q6til8Y^NG7_qa*-n2|4}(k<-HF~R0v*cP7bxlTWNJ1s6#Rz!N zCYesAbm(}4qp%-;B%AF-LyS5Q6@Q|V&Y2ar$uWn(?UstqXy;5$ZOCC_?L$F z@o#dk--?Co{)CGEP^73Kb_^>`G8sAN)M@iNKQLBj>QAcHjIw0!1 zl6{UYd;|bA+CcC#3IGYysWLa4!KA}CsEV#c)JpJcF~NX9mrX2WwItXv+s%I2>x#v) zy%5xDSB`&bU!9COR@6LwbI|OQ&5mf&L^GGZnOXEOLshxOs;Y;ikp^M(l-^>J(o0NIdbt5`(fTq>p%?cG z;%aHXhv=-@!20#xf*q)++kt8IJ5cG{ff?Sy9hfzQIroA8N>Git>3xOUNhe8nUspSV z`GL0DK}<_w!3gRCwOvD~m+Zn6jxTMde<_?egr$S1OySh6XsS!0Wh)wJPX+xd11YQ= zMq7X2tU;U;Xx|ObfO}%y{pchi>ryaM2zAy50_$ltt(ew6h#CF@+U74D#H@hdQ=dX_ z=OChf#oerWnu~l=x>~Mog;wwL7Nl^Iw=e}~8;XZ%co+bp)3O z{Mryc`*3ryyIC*S%Zu;8Y_D3bFAn%8NTYv?y_%Q4zR-DvE(Q*~>ec+JSA76q7D#_w zFR&HI@z>V`9-)xr*ME%7~<$Ykd?U8uZ~EqUe&AlGDqP{uUvna zvy#q%0y2VKf%UxO(ZC2ECkuzLyY#6cJTru6Q`qZQQ+VF1`jr8+bHIwcJg}=iko8FE zDt(bW8pbOr>?{5KLASE=YFFv&(&IM|P6@wK(5#jhxh@Pe7u_QKd{x@L_-HM=1`rX8`BDds3pf+|$)DBqpXrDP>JcOxubC$Dy60;8(mfG^6yXE(+N*UWMW? zA~?H-#B7S@URtmlHC|7dnB!Lqc0vjGi`-tNgQ8uO67%USUuhq}WcpRIpksgNqrx{V z>QkbTfi6_2l0TUk5SXdbPt}D^kwXm^fm04 z^i66Xn0`pLmnhX(P0|TezLiFcQ{E0~v*cmmAR2|PETl7Ls>OakCexUmie^yDw3ccuqd5(wV_6?YM+ zegsV{M=^n{F2a}~qL}DfhDok9nC!X$C9WV!U15~DF2xl0YLvS#K!rPqsqS7(b8m## zZA(3F3H0v&0Z>Z^2u=i$A;aa9-FaPq+e!m55QhI)wY9F+db;s$6+CraswhRp8$lEl zK|$~`-A=dB?15xkFT_5GZ{dXqUibh$lsH=z5gEwL{Q2fjNZvnQ-vDf4Uf{9czi8aM zO&Q!$+;Vr_pzYS&Ac<0?Wu}tYi;@J__n)1+zBq-Wa3ZrY|-n%;+_{BHn|APLH8qfZ}ZXXee!oA>_rzc+m4JD1L)i(VEV-##+;VR(`_BX|7?J@w}DMF>dQQU2}9yj%!XlJ+7xu zIfcB_n#gK7M~}5mjK%ZXMBLy#M!UMUrMK^dti7wUK3mA;FyM@9@onhp=9ppXx^0+a z7(K1q4$i{(u8tiYyW$!Bbn6oV5`vTwt6-<~`;D9~Xq{z`b&lCuCZ~6vv9*bR3El1- zFdbLR<^1FowCbdGTI=6 z$L96-7^dOw5%h5Q7W&>&!&;Mn2Q_!R$8q%hXb#KUj|lRF+m8fk1+7xZPmO|he;<1L zsac`b)EJ~7EpH$ntqD?q8u;tBAStwrzt+K>nq0Mc>(;G;#%f-$?9kmw=}g1wDm#OQM0@K7K=BR+dhUV`*uus`*ND&2x<wG1HL5>74*j@^8Jn_YA_uTKbCF<(bN-6P0vID7dbLE1xY%jjOZPtc z2-(JHfiJCYX>+!y8B2Fm({k0cWxASSs+u_ov64=P?sTYo&rYDDXH?fxvxb>b^|M;q z%}uJ?X5}V30@O1vluQ2hQy*NBwd}kGo8BE>42WYjZn#(~NPFpjeuet!0YO{7M+Et4 zK+vY}8zNGM)1X58C@IM67?0@^Gy_2zq62KcgNW)S%~!UX1LIg~{{L&cVH^pxv&RS8 z7h5Dqhv+b?!UT{rMg#O##tHOouVIW{%W|QnHnAUyjkuZ(R@l7FPsbEG&X{YTZxd6? zGc~wOFg0-e2%mI+LeRc9Mi3vb*?iSmEU7hC;l7%nHAo*ucCtc$edXLFXlD(Sys;Aj z`;iBG;@fw21qcpYFGU6D0@j_)KD&L`tcGuKP_k_u+uZ@Sh<3$bA}GmGrYql z`YBOYe}rLeq-7bVTG?6wpk_57A#-P&*=D9tDbG+8N86Ovlm%$~Fhhg1!#<%uJPW4P+L>rOa{&N2gbFd3Fh-nnA8 zlL@IrHd6K33HFYag|7^pP;EZ&_CU5|tx*P)T5w<3xsYB7C+*ZJvZ7o_)pdFg0Mq37s%lo=)Pp+u-bBo85|bFx@z znXN$P1N#N~1jF)^LHc?61qH?2r$7+}^DzU=b4Sh0ILA`+DkZGwe8`w6RaaLOy2{+; z*G-qRoS@LWVrj2g$m_QBE_9ft8J2%>-hNdge!7N;!t-RmW$Sx$dLFwX06)v6%V+3+ zI_SpK&${J_g&{nfAAf~@mBoJzd1aB-d!go}pMC=xBXEb1?t=6Z2khtQWf04f1vH2D zAzR~Tj#erum;iqZ)uy9mW#IE(g6{gBs0m8`Hho^9SLk>6WYl=|`BSI?aM#~0G0T@g zhZQIE7P486_X7pDDlh!Lpxdh5G=KJg4;1hc2-bl zI9c0tmCMY}Qn=5b(4Vqv{|sKKb)cXA9B?~>}U6*`p`RQ9+ELmfJLHahw z(?8R{AQudS8<=zg^lz2qD}8im+_uhWqYUr=fMT#sIo${8zZfe2N&j7)tPfNL^8Z2} z6)v8;x|<$fDzHr5?L0g@AOmYTwm%3~HQmw+c~!W5LEVM>2|z;BF)jd7U&jQ>xPb5h zeEn5a91wogI=6UL`b7g^&v-q5Y#V}Z4=>PWem5wViJ&4Bv3xeU=0-BSSJgLq4+X0GzB+;^$X5GmqzaR*xhkIN?DGhN6_q3Am7=yuN- zb_|MEpaRpI;Cvp9%i(}%s}RtlP5ojEwsLfL7&QhevV-Nsj0eq<1@D5yAlgMl5n&O9 zX|Vqp%RY4oNyRFF7sWu6%!Dt0yWz|+d4`L7CrbsM*o^`YllRPf2_m#~2I3w7AEh+I zzBIIu%uA#2wR>--P{=o&yasGhV$95c?|JRlO>qdUDA33j5IN=@U7M#9+aa>fFb^X45 z?2QBBpdyCETfk(qrO_G9QH{AF(1{Qg6c9(jWVU>`9kPNV#kqZxKsnG@ z%?+|N3y9-DUAf>)sBX#CYB(Ss;o`eS>0TYtk8(ugt>(!)?E#S%6uC82XIZqAYlIHH zMHZAe8xkWHvSk$;54;FuF~4*RSLzf()!C1J`J>iHkKBN2e70b?Xqa3NOvAB(w2*)%usxAitdXR zXsosCjl0P-*iH$V%MrP>2!E3ZHl@yU_+CN1fffNwny;LnWvPf(q;(3vd z)}hwfgz-(OR5H?(nx==K>;(!(<@t9;uhDT<@L}{HO(kEVmC@_oXQ(0S**-;H@pAPM zql=DME;|u{PV`eSkr1cw8-cy+VdH~Tho_^5PQzI5hn0Vy#^@BR|0?|QZJ6^W2bop9*@$1i0N4&+iqmgc&o1yom5?K6W zxbL!%ch!H^B7N{Ew#U$ikDm9zAzzB|J{M9$Mf%ALP$`-!(j_?i*`%M1k~*I7dLkp< z=!h>iQXd~_`k9coWTEF$u+PukkXqb;1zKnw?ZnMCAU$*2j^CZL_F4f6AMEu3*y|O1 zH*on~MrSW(JZQTj(qC~jzsPRd?74SC6t~&Ho{fJ*H*AMvXXx@p@_Al3UkBY^gXE8Bdj+ z^csKuPu+aSU<4<E+ z*bM#6<ud+wQMn*g0ivOoLF2sMG zMX|YA+;yTTVpqi0qIi@1?JkN$!q*sv^Y<6UyZ3E5ufmiwQi z%d*cc_c?mG&n@>~qR-1dx7`0aeM9!S<^Jm^0J+aC`obd`xi4Gp$3(a6bIbj-cuMM7 zii;+o|1H4kBUC4nix*$<2{av@xW8pXsPUVs;6 zJVT3+(1xAt?9Q3@Iqyu)%%8u%egjy8DR6vr^rrerZ%S*Q{Fc6`FJH6}@8{p6nQo%F$e3uUKnOSQ}Q)_}#>H zIS{p_QQ;x^w&N3pj&F1Hkiv+)I9^?SyjnF{bf|wGg%C(Lf+V!)h2xUId=T2E9mcN1L$QF^ z5g2*u_)h#xV5qoL+7?I^OWPS_a6JtT*$mPcAHy(mJmUtoz)Z1zp0^RJebf|pVGWIs zQB0nO8D@fneP+6d6PT}AA2UVLt7UKlb7PprygKtn-5>!^V1XRwIrG!}4+mn=`W zBk<_rS~lAZls_hOj;GnnAs;L$9u zaRbuj_dhXN_<^afP)`ndO!qW}o+exVj;Uj$zv1Tc32vVWmrHP`CoJ`Zxvp@$E4=rv z{Dp%8tK5(97c5fP{T{ZAA#Omvi%lqOVetgT%V6phEDiQ6oM7cL#+QIm<(v8kP)i30 z>q=X}6rk(Ww~ zN);x^iv)>V)F>R%WhPu8Gn7lW${nB1g?2dLWg6t73{<@%o=iq^d`ejx{msu;S`%=Y z2!BRo(WJ^CT4hqAYqXBuA|4G-hEb5yvQw2Bx7zVRpD;RR2ccOu@PhR3faoc zzJIZ5StRhvJT*c`VV6u>2x;0SlCBHsQ7n>YhA$6iQU$Rd`#A*0pf5UAX^2~Qi`Ky%f6RGsoueIc_WKEcM!=sZzkijF|}LFs~GM=v-1aFc3dl?tifz zSiqvXmL+l|5-?ahOL%3?PG<>&D{-(~{sG3$mZG!I^`lqCHWOSn}?5JWosiW?}R7Hz45Z6M; z|I3ZkC#9f+gJwObwvJ7+lKPKs9)HS$N-3eNAWZc~d`TP=sY$X_md=Li)LwW?#|kR6 zy$#RzQ>|l?27Kf`O2bZM(f5 zT<@B@DC9-<3~{+a6@$%* zbtze+^?#(ya}=}LbSblhT0Q6Rm4>3=gi)o*G!B_6$tq*ItV%e0&U6FU!uj0%!h9}S zX6NEZ9}oimg4WPW?76Hk0#QwuQj$)~3QJw+v|eX=>YZgbHMJs34ZXEzFL($9Pw6>L zDO8nGd&N^$GQH4GKq$+GsmsL%*AWQpwp1!JQ-AyUofV|o;~RKj0^!|%nF=P~ai{JL zHLCol`|FQ7a$D7+PR6Mx&`hnhg>;JWrBjTd0T_>aUBJK||PoA}xw zjpy>>3&$74TY?_p_n~D4+YZ_`VA~C};yEAv@pMP)u1z-biGn_klvcL6s zU`UFOa5WKV3&fLwP#~_QGqNI?vZjX9e_Ddmyv`La8Jre}B_kXk=J63Dn>GS%Nl7ty zD3D2o(^4iZ3mZc%E$ibOHj%F0n#U)zib4~{uoPZTL$0P|m2+KIQ#3oub%T7-d~5T@ z=GJh6j|NV-!5BPIEvv`*E?MCW0ZmUuQo58-cw|hMG8wK%_B(RtIFDydO?RP^e__!P zX;g|RlA4P24jtif(}ij>mC-fQG-YluEa|d!vZky=`ljZ$Ff1r&IZhWinz9xVW74RO zYid$XF*J6~9#4m@lhthw1!$|R%I2dC^$n%=%E!^TkD;QWai13pu*d@!Y6y9c-dw2l zpbj-&crkx2s<6ZhH|C13WnOqNe@}d^VDJ{l;le5kl8?)VY1pm@y|@qed$1aQ;y}@) zL?Jvc0$AuFD-SZv*SVC~K`>q0t1Aq34UJs|`lF_(@D?xDV66bu6ClOSK1t`Q>F~QK z56Cm(MI(a3aT7ypQO-6;vTAZ&m6Uwuwr6=LD-tLFL&h0P zIO1GPDmNp0`#UM72-bPfjP(o)4PIiAp{Ai!ThwhM9u`&DL*e7r45@}qS>??T@1^nnVwqpqQ|k{%dq*L zC>flElRbiyesX2Z>T19VbuXQiV{#@+&4oMF+fTiOA{>-6PSIjcOoKFS6iq+l;13qz z9r6xO;T=vS2R}50ccv2#o=Q|h+CAJH)AW%6InA}KX&=!}FH#s5e>yTlWkaW!*oqO6 z8SU{JVB)Hl0v zvZTX1MRnmt>R(Ase@{zh`Mq(VYx=EF{=B@5S3GzLuQCMxe}@eW>)Mz!MD4@r)31AQ z0&md9FQ^oyd75EqanI>gGg*_2aw+Y?TZJByZ%K~Lw>>z6cc`nDyCqzBkH{8`(LOG~ zi!9q#KEQ__ypNCak(H{r@CidzT+zgq{Y+dopW-YvxkPDIf8F?;VQslqQT}{=AzZ6F zxnZyS=YB7*X}^!B6yLBv)PF1Vi?pQN^vOp4KT@~m?Cor>*}GrNCrA8Eop<;|;99Y} zKl%=)R=@D=O1lzz203Idf@c;Io*aod|N(Ldvd&;<#t}{mYn$t?;DCw($YAa`5v;U*>3p2K6PL7 zys(f}dR3lZQ!YEl$O}x4oh@DO@qatRvqM}Vm)_j>J-94ELt=Krd$CtZ8|QKA>}ys5b|I0wKk~(gw@WTg-gz-E z-n{phQ@gf~i|(7xw!Vj%cOG@#m!2tdzIT#XUxY_=#kr=;#50FJdPiKX;<6g%q5bcD(S^wB;}3Jp@7< zZ8SLqRYg^%-#s)lqC8l`qOsgr%x+u3JE@b!)d9qQ{Pr~%n=KFw@&Ec@m*Rq_0JbiJ-FiiY_(H~OychZCO!23^?kxr zsb6t9-n)(!fBU=h#GNC%a*MbEeJ^QR$1+>KO}iv^@kf((?fv)jjy!#k$T;iB`fx9s zvzxcKJl2e6tM1)!{qv34mp6vCtlhS;y6DDUlXXfveK%ZiQ8{u;>;0mt%BNQ^#D=u4 zTW8me!45Xh8a%S}8iHk*; zc34jqTp|rTRNYt_aaJ*KIuAv!@??P}v9jPJZ-M46271&EMPA8~VY0rX2RK?0r?4_G z=%c8Lbe^oZLUeMavnp62{G3T(ETUTH>k3u~IlNU5tQh%hJ`)sE-+Mq6Yk?H9f)CP} zY_Lp}$-xIK5$7WgHUV@9%T1u`HvwI*i(Pa>H^(8RR7~s8;^31S^uMk^xyMjTmQSU{F9Y?c8LA z6*jEkA*0EOD@2*(y1`E9U7;!i9~1$43N=S==mjf!yh29?-XUURV9-M`*{~m^2y+-k vO&Z*)1cp)oP!FoJdnQj@>B$Ny9`3IcWx78NY!UY=EiM6G;6aIVL4^VU&1=uc delta 34727 zcmXV%Ra6`cvxO5Z$lx}3aCi6M?oM!bCpZ&qa2?#;f(LgPoZ#+m!6j&boByo)(og-+ zYgN^*s&7}fEx`25!_*O>gBqKvn~dOCN!``g&ecy%t0`n>G*p;ir0B{<{sUU9M>#WqH4lTN!~PgB@D;`rIdQ#hRw z?T|`wO^O=zovKDMVjuZHAeratT0Q-HK<95;BTTtc%A5Bo>Z{jfiz& z$W5u4#(O_eLYQDY_i&xqzVd#y&cR>MOQU@-w1GN((w{b+PM;=Y3ndBGVv|>|_=ZIC zB^E2+XVovHYl%!I#}4)Pma4)hM2Ly6E;&R5LmOnMf-Qz43>#K*j*LSWoYxxIR5Csm zuHXA8{`YgmqApC|BgY0wGwj-im6rmS^jrAbN8^PEIHj1WH#AVVuUA2HXj&Vm*QD^# zWX8+sR14XM!@6HrfzFpcC$ZXlhjA{{oq5cs&VRBUX2VwX$fdjO~`3n~1})#Bxr5Vh%KwFov=k zW;Jy5qsvC$lw>?*BsoPIo}YgJN>u)C^4Abbjx$NW@n5S8aN_T0BeAXWjz#dQ=3v*# zRQrjH1%R&krxBrfITop};aQdE=ZRgLN%n%+^y5BOs|pO6lg|I3prX{gSgQuRK%177 zlE#t+nHbT~VSO995imTaX&SCB&pgp`Izkg}-NV zI%~Z42T+^_9-gw;yOI&!oZf=H(Cot~)w4^gX&q(zg`7ekm4un&?FuaJQKIrLF$<_% zR;ok9K%L!NlTYgW8?uhX&TS?ojtu~oLm(`7iY<5Ci@V)7+gRHbb!o0OipVh)`vKW) zp9OVLDkaP@Sn!ZRa zpfwY36ct~JlEsS7_Dr%e0UL8^zRSsSv3K)+n$b@Xq9*^-p|AFj(*#}L-%5Z}D@Zl%y2gokn7l;Zr z3CK}pP8BDR1$L~R{R^BwKH~@v9m;O_$00a5MMXTe!u0FG^=2=_f-XZR!DQeQ`5S_$ zO>mOUF8Y-Wfl3P|Mk-VDsBp`X&=kMQl<>nt9$C)^A<4v@xtW>qn@`Z)`|gCedb?$A z^S(N0{?3!oy|^tx0p&<-D62OWo$gVhEodpMi;O#DM7P>i6bnTf$_=~8)PdQ+^h30pu>DfM=LQT20!&5)= zGdR6}f=YHb45NFG9?dd44$Dm~B6k3w1%E%atidmZ`Kaw4q&8yb+5=wqe`pXWH0J%);cCo710p3&(EMuAI{aKjT^Z!u)Eq~b?HpnrSE9ftF4Ibs#HFpuPR zyT$g5JIX12nSw?q!}IY^iHMikUh8V)gjx{JN@8Am6<$2Mz^mHY*_n$LNj)%w6Vs2|Kwpq;J=(VFf`y)>|;A@J@8mL zpw=k%oRd`%OdUL*1^Bd27^<|sYM9NqMxOfyc56FSDcG3u;oJKCAOsBvw)JlyBt5jT zQZ;fkKI1}9MJMtnCEG?ZUph^R-lV{%Av1S91fH#pacM-EI@93$Z)d@UUxu6ruJMHVl=>YjT8reRi0SjW8t!4qJkSw2EWvi_K%!>35@JDfw9#W$~G@9?4ubk&}M9<~>f3`r6~|Hun&D&#w^ zZ2xrK!I3O(3uNXz*JhWWdgESs3jPCOS_W_J;0ggAduavgNUuLi`PfS*0$=1$q$C-# z>ca0l=Pm+p9&+rJQNFKvb%8vn0!qW9SGnIO&tjv!kv980`FquGKanhc(YAwQTGx)(9c1fRnojjxST~<*=y|?=9V1w`t~7Ag$5h)P#FwB7FM=E`e^youj?Nh^d}|GOC7mPW z_H&16WtD5M9H)i@@=Vzo^f`%yIQZ-qGuCko?CP8h^B$X|UkaKazJe>9C00F82u$Iz zFOjPU5)>;*KBg9UezT$OL$aW(Ogut^COwjSO2!@-ZbW#lHVfb_k?7DlEGcbl^tn{p z#+go${sx^TPB3R5272wadT(x2lACj6Y4~LktAm z<+#pEqlksdo%9?Q29%rP9C+LM*WZM-N-e*wX85OOu}J7Zrt%9iGjxN358Fy5GGaNA zlr-b*b{4zqiK)A~_jjEnJhRaVOdID52{6I%oS^X6)EYS(>ZE6NKd-S?F}lIJNYkBz zX=;apb)xyAi#nMFCj#Ex($CGiR?oF|gei))16?8E-mB*}o2=$UtMDZxq+&Q?liP(n z&Ni8pBpgnCai7%!7$wG2n4{^JeW)f-h&_$4648~!d7<~p8apf5f~7e0n$lV_qbrLM zH6T|df(D0@=>WA5f5yN)2BIZFqObOK5I*vhD*2~PZSt*83>fM))aLjXIEokDF;KGw zZ_75?2$lhYW)I_!@r8QpYKr4p27lOeG~ESg#8)LE@pH;oozO*hv19;A7iT#2eow_h z8?gZtDstc~s|f{hFXH|~d~zQ~z_94FB&hp$n~Uv_DB!2y<6&VqZs>-fmUU^yuJGdJ zNCHP?2Q+FZr?J{^_M3`92rOWnrL2vymWZ&0dYxz>Kv&GXWgwxTKz)<+J43r&!q}II z1DmfLl8nu-xGa?TgsrX45d}j{QAC!m8iO1JU=|Pb8D@9FE-V0hJEA?F)srec5$GqD z8(`^KQozt$N;6ts8^+R_uiy|d8MO=#Jvd3z_#2aHXjF94XkEdq3myI_UvT|r>1&LP zU*Mm7Fk}T$qbutLyH`@m{L57Mlkq!hAMe>2-o(8*axogLh^b!!{|amH_{Hrdu!4kWol?jSB%l2>w;Jry$!mf_nbz9_B1#8bWJwL@w!No42F zZ!YAr(^WO;wuxHb`%ZD(qKIOW&)L%j)eAUf-WERo1D?D~FV`np( z5x$@RPj8}2Rbm<>mRjfuPFJ`nN>>ltyp;oE9#K9IU>+pE$;Cq!IYr!NXvc_-MDFXBXW=Z9LZM(k9}OKqEKn5 zMk4%l_POO{UM$2M+YvQV#N~$?Ycqe>LbTz9ur0(-Wp!^8a^GDh7h{U~8h980RG|9E z6RPnEU0ccY1fEIdJfnZ?3Nl4X0Ag>*m6>|oajhbexf9~a8(K`2Ys~o)z{jnuOj93V zg4L4K@x2Dewt5Bok=03M@JIhBSWy2hwxcxRv7ukj`8uYPGrMdH0q!`qHJ^xDQ_bLG ze*?ZCvMv^t`JI7rlqLPEo^WJ0b^>d@C~mI!Zv)-ljBg#u;uvw%ZXMqZsz8Mxdtvbh zbK^eGn90ynsgjzKUOl)O`l3#-uY%L?tj;+Edgz+awV132>9Z-?mj*}u ziM4~P{Pc$s;}v&zYF)Te5J7W2!$o`EH|~F3NfA2NjF&~?@K5S*f_mv2@wT};{Sj`b z%#^~iJN17>qQ6aej~{ubsrhkBAD`C(j7{y)+hU@!^SU03F0Vu6vU3+>!lN@MLR}42 zLOtGS+@f@~=id z8&aK=-2+Pz*y)te)kF3xgyS?qgp@L;G(tM1&#!4p&Z$yX2<+lj>VWT1tiO4`_h^}* zQ@WGd`H9t~sH>+NT2d{O5(~BeYjG#5=s&k0J)iACkpC8u;rFz@_E-w@s0bAs_;b>+ zeR6?5n@}4wjy}GSL@%#%!-~chg|$Q=CE38#Hj0u5P4^Y-V?j(=38#%L#%l4={T(Rq z=x*H|^!EG)+e-leqrbec5?(g)@Op(cHsVg4*>F$Xb=BheCE*5LdSmdwZ-MSJs@@i{5t){y; zxAVyon;`>Rns;YH^`c&M3QdxzNaJl(Byct8a9v38fkXaJ_<=8oe=(6%mZ}CJAQ}2r z#oHZ)q;H0pGydy~@02e)oeVW*rQaD_OLr+)29*|p(gAHd<9*JxBnu0W61lNr+cO_= zX$B`VmPwyz9?FV9j3-@v0D7Z1Z}O;#KZ!@Gm7ZeKORcLQsPN8= zAZRd8VWqow?b1Kp8!AiYk8acC$>6xHuUZWkNk~?EqKsUr2$iixV=zYwM9laPwn)(W z7b-$PlwKh6n5^&Rs$#s&98P1ch#7FGNN6yU!Nwzcesp2Ylw~C1F@G^YA!PF|a$MJ+ z{!r?468ju$sWQLL=o~SYP|CBJ7(3`;c^t;TL4ScL$Pvv>N+5iugRLdmL zaD(CzY&3J+N)7MS)Jw`U8u*IevtEAUKN4~AiL82B$4Bl5oK#No3jGEW-o4`>c%G#8 z!h<$iX*efTk1lnM-d*7Db6h_94Y@IcQg@UJ1-g76_d9@vHWB%F55WG&!4DAy{K)Xv zz~7iiiq(J#G*Jdb2F>RKFnc3y>bIwlQ_Jhzoc4h(EOVm|0C}@X1v`lf-*wuaH5_H)kg%$_&tAkc`-Mk_04t+f0A_7=y20O8`7#X)4WDMOUpG*Z~n ziH5Zevf@*c28LS>z60h(QH92FxJHOKTj&>ep>z##ag+Tm*{QU<#Sk`f3)1y<#hgNV zkGRx3`qggo)?FK!Vd`6U+lA@MVk3QlsjDj#M*^!8JsEqK;p+%l%NyiKg#EX^3GBuk zlh2;u`5~mtZgY!005*{*dmF!OsrxVg*Rpvf{ieqF1ZPV6Mm4vb&^x06M8jn4XO#a* zXJhi$qNRT@M;;!sLq`lbqmcnAsSvSakQ{XcfmP-CU5_ini_P>t3m1P+(5I3tq028F zE8xAnu-M!FQ{&(q8oC{RXMCqw5&ri5tvt$=P|_J!+#m6Iz;U2BaX7}7%E%i{`jgjM^OfP1@K6wN+iSJ-2z7%MfLBS2$+zC|(5j4tu zq@N1d5n}UyXF>Bz{_%qT2O=&{@hkb|g++>5oZPMe%j~Ee^;OCr)Y7u{V4m&Qf@%WD zEUKEu%teX>pmF5DMIP1!>pm1D);32{D-N5>U4W*9kTO|z(Tb#n-@+j!vWj-S8aRy<(xvQm zwZ-#hyB%RQf|G(r&oI7iZhf^pG13lCEWA>mk}rI8IFlm%*!~#7;2xQps>NS2$f@g2 z1EoM!1ML(HjM)=bp>Z>u=jEM5{Ir>yFJ{m8hLv-$1jxB4a{4HNUhk+Rj5-H8}G za~r&Uoh}bQzyC)f6#o3mEkwFNhaD8_~{CW03Dv2Tbl4{ zAFamTS$i&ZYWmae1aCxVNIKrj+u4g3%D96}iqw8~HBu+gFA&*oRP5Z`MikjjDgYjq zkf0&#_Xj->@bJ>!}JGl=t1|~ zGIx9!u63fRtm^?=^0z=^H2SZA43p1deVixbphteFyrqycaRq6DLy2$x4nxgB;-Dug zzoN<>vK7~UxLPDR{wE0ps6mN9MKC>dWM{~@#F)ne0*ExL**#VrA^|@km1xCtF`2N( ze{G#meS3J5(rIs2)mwi>518)j5=wQ+Q`|O{br)MyktYd}-u+5QYQmrBU2ckYE7#Z$ z>MgHjknqi-2`)(Z+pJ?ah4UMg*D%PFgHFMnKg?{GSZZ*f3V+g@129FH@79v%&$&v32_So*G$-3SIp6 zYTlLgF2}s>)U;QtdWf5P&xikI0p1eg2{G!w0+xXNuYf%n#X#fou8}EYvAw$zmrjK&OZkS!$REMr$*aG zyPPjsYd_SXp#Vt9NGI*R;-*4~Gz)&7!zq>hh7)i?8PzCAAv(pNcUGlPNf^OXS$=bx(V#ji2eMF6q{U@ z9?ldp%YEsl;)d%}_Qs81OX>!2>kyChh!-n0Xd@2C1cI2qkRk&b4)(?@KY|?%qMoYb zEi7l}n$O`v+T31;YZF(;FEwj`I8Dz*9fbKrE)8#&?joolVY~3YbZuJwfRt4-kCOM; zcm34HXKH>;a?joGLqjIBG|B??@rS`LSU(l!vxSyfKmGa^x5&S$gvrsrlVT0@Yw#bP z-3#zdbm1;n!DpT@>AnxkZ4llVa;h^fj?R3uN5?-F)SLb}a%TBE=HM5_U*{K=ddu;L7kJ## zqyyGh;WY5rpvMm)$*xZHv!CUlc{zU8huQp`KmQT*yq*ugOu_#Kt-kRa+ODx`Va(;{ zLMO*lsSV`U%+u>-R9GmwqgWulP#>jO9|V60TBE z5ONjntHY2V_MmDJHr3CyuL5X%IlQKbDRch~>EBrwAM? zvOJj&z#NzlWa*K*VEZgjP#cAQ-HRG&mC)aqyjY19GP$U zSKm`d_gXzrLE_^a!9R<~vT9n;>{y3F`!rB%M5psN(yv*%*}F{akxIj9`XBf6jg8a| z^a*Bnpt%;w7P)rXQ8ZkhEt)_RlV=QxL5Ub(IPe9H%T>phrx_UNUT(Tx_Ku09G2}!K($6 zk&bmp@^oUdf8qZpAqrEe`R@M|WEk$lzm$X=&;cRF7^D#Nd;~}a8z$(h7q%A88yb=# zVd1n3r|vPZuhe!9QR*ZtnjELX5i*NoXH%d1E1O1wmebT~HX0F~DbFxk=J^<v|BCiebRdAHYXxOo$YS#BHYecz?S6CX@AcF_k;#_IF+JIV*5|%lV=Y;Ql?=b^ zt}1qN)~qaKnz~KZRf9Aa7U5S&Opz~;SF2ojOSD3HP8WYTbvlEyYK~);#wr+UO8_Sl z$-Yx3B~JYU!uChjzf0v1TKYAtsRkH`QZeF8Q$_`7iPJ79{8V(jbX4T=-LF59vw>au zY6LS|t!~Zz>*ops1&9o5w z3lQx+lhgdg^4d0r-%q!s(A$J%XYhUx~)v|ptx_cU#?44pnz*s$G%3=wh_01 z5l7f$uM;P6oqhM8F|$4h0me5--syUE%vI)HuhLv@kL`s1eP@buw&}80Umf5QOXBlP zAY(8r9}paD1p*&Bir^3<@3Cc4Mr>EpoDHghr{U$hcD8$^OZ6bZS{UYhl_*Otp}Be} z-P^9U7tc!@aodKCp{~TV6o}?M9xG$hN$Kr>|7e~E4mJK>_yjrqF@Kk1;fHw1PP`UI z1Aoa$7yGRMrUVO0M9$rM;=Glzi>SO8!lqon9E_1^0b)CsR0%Nv-$st+be?a*qJkqI zUNaqi*6Y^E>qlHH+*M=aj?)y2r>RGkG?X;Rv!7JG6Uz=^g7B`jEKEvgUq)s3Fw|zFMdak((XwlUaSRN4hGMrH zn2xFaLH!t8txnTiQW;qUWd^m#<3zgCp(=5~i~xw9lU{R~o1qSo#Sh1_4W5(^hL%O9 zOauMH!uGL}u?hV!4V~#?F-<;)X<)4B$u1F4 zf=%}>{b#f`$Ixo^Du_42V6Wir?Muh`(!izQSV9Y3d-MCQT|9bs zIlCtJP7*;A%^1-=u(Laj97hG}uP6Hq0+DzAjB^|$CG(?e_adMTiO&^_9WwrW4H!ju zWEYrjLw<{fSyh-yiPOP{O;c|453fxkp`E;k&)d^wYK=ipbD_kG$u*Ro!kQJOppV5* zP4o#ab%r@RITbag_zHMKF5$z8fJd1L+D8G@m^`*H->XyF$E{x;d;A+T`A zR!1#O!ed)ai|TF054f1+K6 zTDH=fps}vL7=Yl3_R)o948I{CP*`f1v{E~-xX#PaLvb?#qQRElOF-pVuL>d8_�{ zSCu|?z-R)71@L#eM!y^Z6p;ZjzlW@gZzHJC3~O?Pk5QEa0q(aFy!-~pFZ%vBM{a0B zOfAZFmYc{!vg!PSF@l2U zJK`=N@CTmAO4Wuqv6k{SNl?~rs-CcW0VFIdAj^B2Wacs>M@3N&63=c06V6Rf2sR|QLucLaU zKEq5=F9zA=+3ZT|OlY$lIrFmvTV4H!iv+MxhtKJ%j}wlD3qAoT@g^}Cw`#0dsQnXX zETbS9p{IGl{fkz7ld(7^$~HEkkh7pv3NYi8<1qwOw!a|xaQ$TntGU7;01Z4?b9D8N zBh&aOYgatY!f;X<$(oO>v=8iOcEG%aUvS8Uu1du6!YK*G&VLOXlHRCKu=FF(IkNo_ z!128k!z=B?9(@872S5v{*=6WjNH3gAJAUYkC%^7Y;H4r>$kZZC%?&3E-qa#4n-YG$ z{5tlV`bCK=X~Idzr7&v8p)y!whKx;pP;V!X^4&igR1g*2j}8HyVC+>KqbPFthf}+i z5*V2^NBvmwfWIU)3;IBGEwFtYFWVWUoB2RyvL7S*E#d%FT_ytxM895Q4V_PCQh+>< zlu~L{SuQcQ?il+AeFdE87H!P8>HgIJjkGW8@`{o5wNd6uVn=dNX5$aDi14$pTSR=` z!YTmifM=Cy`Z=%xX-u&9>1bJBw3nKr0@mO&YfAp~^V^fzVJyvwMY(hM5 z=T^FaQL~&c{7fIT@FE@vI;GbS=Go0=v=3x<1AaB@b>U z;-hwvu#U||CUj!>9G3YgO6yQX+H)L6*ozXXaV=U_b`_DQWq#`f$?cZ;??y9(AcTLq zHrc9U_$w&NRKgWZ>e};_T#tf-g1TX#Ttj{JjKjCJqlf63U8$=~02ty9Nn3p2WX;CqqYS% zz5QZEArIj!d6Y0VI^JFWKudu=NFUPF=6TxRR|reQB5_2vIn)qBV}S3;MX1}04E3Mt z#5d$zK8z>OW^i7tXPB6e%UCqcK(le)>M}pUp6H17YHZ$`4urRAwERt6^`Bj>zwymc z6H+f|4zhQjlg1Gy%93Sw`uMScxrA;vQE~ta!zM?jz@&c;IxYkrPHXB+h4)S0@SIgF zdm{UTZqxJaxzBR!!`71;K*uco18U~X>AK&Pu-C&`R?B-Aj0=_$cxPzn{MlJK>ywJq zsw-Yj{^>7%vDCYw^iw(od$~o-Pz6ks8aQ}A1JFWnE@Ez_SYh@cOMFVY`?D$Y&Z~a1 zd>zg|c6+o8_xSfEUIvTsdiN&WOe=n|xS;8X;CYLvf)|=u($YtOu_6J z0tW_ukuKXj2f=f}eva;=T4k7`&zTqf{?>lGm&{Fe_;9R2b^^i}Krru0>ta|4^_A$H z7DO?PFho!p4A2C|$W~JYbWN&eW(4R;;Tmhz zkr;EbZ4D?Birca@{afZpp_|p2YAInGJ`1Fkz7A$droV0#{h=lZdX+xO4B%I?B_3ac z=7FCkf`P*_R`SaCnBPG1Jd|Abx!brVL zIt?Rv1@qnIGKpG7W-M54@Oi;BujL}Xdacfmc_9q?u&4#P2hPg`({??ZOOjRFnps_D z-f(IqU)UUW`f&U}`A@568jBEz<~CX~Yv+1et@-+dsV3RVrNTx?H9ht?VAAS0D1{G? zJbr4_B_Tqy_Ag;Xppzr)KXQ9QX}21eoMW|m_{|BBHJ*=OjhvNq(4HgLp`u-X3tw>X z9A?^?H5zIU4r9K*QM+{?cdUL9B5b=rk!&F@Nffz-w_pG9&x+7;!Am0;Llsa02xfYC z*PtggCwO@a;vLXCgarLHOaCqh;)QBGzd)|oeVtn=&wvyz)rOR3B)bLn=ZqpwZHq0G z#6YvZtco3reVEzgsfMR6A16B&XJA|n?MuIu8bp_){SA_{zu;H?8${rR&r^T3v9C(nb5F3yeC zBCfU1>1a`bLUbS{A0x;?CCtvBD58$7u3>y2A_P9vigNVLI2|Lin+b~C-EytjMOHW0NTui}pkxXdFdIJ$-J+Bm$%CN%mac~u zc65u)RMsVt!-|8Ysv6BvqDBlFKElp~B6L!lpd@XpeV9f#ZPtB*A?b!2cQ>(0KpkD3 zcX2g{WebJL!6EmdE>s!+V>?WUff2Qb1G0)SgHlNwmhKjxqoM~UZ>S=G#3}dZqbOgm zLQr$%IH~rG-VibZjQxA+wx_MOF@JC7m(z5WFp@?e-&dnA^W!f5(1q_mx7SHG&7Mjz zJ*FkzBLiO~YXM}_WN$-^LB=)#9j0}Ig(60{oTJ7L{`hY&|LX}pO&lXsa+ZJY)@FOggOhohsSKci~64T#~a*U>?#ib&8;moQD4mX2U+S(Fg|)$9R86W zITbI3PGBmng{xAMx7@wkfPyHgTBnY--U-MN(8g4;hg*?%-H-2y9+fMsROmUruu~DJ zD`y+zHt;&kEmb0pX<5f>5axt7b!mHhGZrk)cPJl8fFV}4Hof{DHc?nmlNe4OZlh%Hw~gDORC9fFH@ z(dp|iOIbEM2+*ogN5G5IIj5N6dcX2{rbl=|y=_lReUu(wdD=vfPY1!pN@X;H)!7M& zsVSTH?G;8EjqWqJgt8F#raa9{%Ig46>|d7k@)*edY9u$q-2MD_g(YtesUb(fF@ zeIca^`q$v%I*l@1*pSA^WwV15>IOc#+Fmv`%pKtg3<1=cn#Ja|#i_eqW9ZRn2w?3Zu_&o>0hrKEWdq=wCF&fL1pI33H z5NrC$5!#iQpC~h3&=-FwKV0nX1y6cWqW7`fBi39 zRr%M}*B_mXH{5;YJwIOwK9T9bU^f*OUt#~R;VnR}qpl2)y`p76Dk90bpUnmP%jt$sr^*lRURZhg{Jc|t% zzJ@`+8sVJPXQ1iJ<*|KHnVaNh6Bw9w7(H5d@A2z)pFDaQHfA+~;ft*Wl5TXgXt$X+ zw>HuHuNiPuH}l);i?tm23b}z`d*)Fc#9aSTR0**x64KPFxH=waD^aF`<3*U+;u(Jl z%Vml|ibUgNPW@Mu(3F&xqqX`Ywa;f)vz@_@ai=KchFb+T#v=)>bVeCp(|;s8%R{-yG(vI#MB|PpTf%;Q_dytxihYgUEEp*4UnBD2i zFzwhlAsbs^rvyOn1@$Y4a#xL*#mfe*-%9pKM;rMxBrQ{x6g=Z)-ac6r2QHFaIB3Cb z)MlIq>|a&HnWt;JF7aNioc_56#kOM7`*3HQOh2zj587o#jVvMmd0^Lq^}+G*kE4L@ zyr1bonUrLt{25*}164@vq#vyAHWXa=#coq+BP`G?NvJ{D6iI(?WK_#=?Sghj z1PAobWSn&T1JN2+aDKWLzLa-vkU}op+rSMu-^54o|YB$BNlXsc4)Pk+N;1Zjv_2G@*gdMul2v zus9!wq9-nM_j*C2j*4}T#EOpQH+mG;>6M45k1Bv!l)vdjfmgsSe9%ze*37SC0>9_L zi$J!Ziite+mT#sPW;8{9EdmpRcM_V2yctTOVr}V45Ya@X%iVpnLr%`<6JxcpQZJW7 z8cdPFktXB1WhRl~Hl4PUPw4E0+n*{!yDCO9mjal(#n-SeE6ATb`3BWpmcOoQtW0YC&i_4DFt9eMt#<$YtDl1dXA!$_EIQN?X#w1#3P}!YVg2_+D)GMjl zY@_EZ_ZKP?D)_w?>J6RZnB*Q7Ruv~$QHEOp7abg-XyAe)|FAORoics58~_N@dE!`8kvn*VMyv=fg8F zE;Y1gK-hU9#R`_&5n`$v&+@j=#2b-LIZsY&v=}NAOjfOB3*&2UItP}{OqgRpGh>_f zh%mJf#U&@U;;T#cyP}$M2?X^}$+%Xb$hdUMG3A`>ty6>%4yuP<(Yi8VcxH+@{t9(T zEf55zdju@GID-2&%(4Va<|Ra3khy_F5iqDnK(rPsYx`73WPueFWRJV)QFt_0MR4ew z^AAwRM+u8@ln#u7JFYkT)O+ zi#|KR&In+^((C^Qz6W~{byGrm-eEQBwWk;Gru$Vq&12PTBnehngdy#zSGdTlw| zntnZVw0Zw8@x6+gX%7C`9GLL`vpHbla6TX+B7XSrfgEy0hYHbGenBTju?E1^# zcPx@a{i?zW3ISa;V@%Kjgr2)Vx3UHv;v0j#v5i!do{bld!wDqWoiXLi;bP20NC_Q1 zWmLa5QI~_)A`d}#*aQ+SfANbQB7Qd!Ncl(>6 zheiX141UI3v(dtiSKg*zR;+|a*Uv_OU@_I@u$Sw%+tp%rqDxg~Va^*|OD%zXAYe6! z!Osuw69pNHQ-?@qEDa7bt^Ga?Xa(5g6(KJGSSDy#r$D2V;~$a?q6O+}b4^#6wsf5E zX_GK0Km%Z@vtZr~zNs08B zzlMH4(M*)#G5 zynvFiw~srA#@cLNhHk`!r@!W}8-+5UBM7C2P^oZ%kc0uzbTp>FHRO=xYa=v)0aQul z9UgNxrY#bF^%AFxsI;{sv#0ekRc8}5bc+e-tghcK-OU0FGl`O!q9lk-bQK3kz*s7? zV*U~Q9=~-fem_OJizGL{$4*=a7|@ZKwLY%#p@2?FP3Q>15nTl#b(ZW{k6q`Nx zOMonpItf;aZ4(|66znCH7E27N)R9I&GsIJ z*ClS8kTkcOvZ{S>Fv|`^GkxEX=rkW1(MQX6IyC;Za75_)p3!=|BF|6pLRsYUq@}YIj4k#cwM<(2dKCeZZpd6cJ$fz6 zXU8ca+ou~;k@S379zHDD8S5)O*BT7~{)Dj3LCoshK9dt=*UEKo$P_!yxozT=ZtBkj zev^`G~ zc4AoF3d|9i#^@>JywzuSvW7krJ{v(4IX&@ZU5})Jy)F_p647?_s=B2@mHHAWI5l=- znNFit0x5-AIV}8zv2z;Y-K9McGGqK{hU0@PjRaEJG*_X4Jo*Ua=DamQ8b7f09*Mazbhhn6LBj%&=C`Zw8uz@XoMbA z%j)N=G34Q-&zQal!IQE=*PWyC%Nzbkc?SQz^J9l> z3}_mkctbvtd6Vvr=Tx5dQ|k=lg-=zHk76OjP=g9IPH_%tWed^LXiY9Cazf??c$snr zz!4}Hl4G4@_xpkYJf2FXoKOO9-6J)oiWYVXuSJAY&Q`aFnV)5L@nU~x9O9VuEbZmm zRJHYpRyw?}bQVa47oYcRa)$0@{Whq+Eszd#|A;H146&zmxR5#?^3=Qdiij=KX-Bvd zk&plq0|^#&B~AjImXrDvvJ40$v(^a!JSp>w3$@6tFc)7&spiek=YVmKkS2(%uo;S; zqBCrWkh+zGsP=MQ_NEL>&43-zSnE7k>kbEB)jJWqRV5}k>J?*Rcn)jx=c`6*MZ~|i z%~^le&(UQK^+n_>?xxUQts<>aPR-TgOJSE6Uvk5ZUkP+>VveCD#mghIG(nOynL#Rs z2$vVgxk2{9-OsO=D`|Z%@x3w)&CjCgeKN0P_V|BE-c%IL`c-nXVk9#S-YNj3*P!-C z^7XvFA|Fc zQxCIu-q?|)UMe%sa3wKx=4brU5@->gWRLT4CltHUIy;}a|KrUJ{a?72odi_$Jtv~g zkQWC&u|Ui#HMR{#IS~nXxMkhhGSf zY@Od4)>#^qTHlZOA6ih(()g<+OnN3wb6{Q^(N3|JFQ>wk@M>uhX) zr)h?8eW=WL#|vUm?PV9~lwWnXh-FzzJ%!x>#?s)dgZwur=+ie)NL%H#f~c%;e2_O? ztRDfj%ldcOwjk(ny5_GYpz}QMZ&YY${hM|O2AyZWre5QzFI62O!>~tkqcDdtBY{-$ zuP(XeSh@3Xk*0o^Wa)qAsTKNxZe}ik_%)PtKt<$f>wWvxMo*99^R)3&;*5cJd|r=q^}Qw~=ZGkr7Dg^@4b4T-b$ zv#R2Xe!$2km%(4C))AfZ26hixuAF}-+f zZwfDSoMo+1_8Bu$7xPtlaoSMSxTLFO1~#1+>uc(Djj`l$TpKz(SF{%R8g%NC7!}{IaPsNc}&S&M`WZu4&tu*tTukwv8*!#C9^# z72CG$WMbR4ZQGgo=6>GqNB3UctM{K?)xCF}Rdo~rsc4{MqGT*X7Wi1f9D7k%cwP1a?U&RIrc`PKXV&fRKgI#_d$X(&SXS1O&!lRovJGQJQVg60S*AF9wDZ zh9=X$yV0h)E%*z&CuydVyRSQ+JH9@TQ=dpevf`7)2Bn*IUCx&ilfbHu<}m{SoElh7 z39m})DpJWpAR!Qp@x3%)%4JbzWB4LPxVLQRSboj0EXO)iCbQ->>+)1T{T~oy%}-k zZPiD;=v1*g?z+0TArLF-QXVcw-NDyEHfrSgjtgkt>ep=3P%Q6WnvrJt z+4RwtdR4Q#RUS7xS~!Qbs=E;lje z53Oy>LXWHQ$2v+95NE2^FeUsgp1y4FyvUw1VadDrg*G_B4otGbMYIlWq>so@%yJ!C zV+>DAk}AXSYO|>TXO$oecP3UZixgcI-#ccF znJq7up8Zjx1AN0)D-mL!udb@{XsbvCrCnAgur+f+WxIfw{$K!o4 zfn|*egR+@Cqfbd)SeHLedNl(erm}_}Clq=82-p7cA`8%vq@&iJlk<}*b;&T@mm@wX z}1cA((mK@yos zPW0ZW@JX#qtMNijTe@pH1gG4`^<{AR@h;s(T} z&3#(~u$Qi#%j!zW{ss#Xsm|DQOrmKNB0cK9N~^$rZJLyDEKoClR=V$R;aujtgT#1b zA`U4#ht`VKoHWuito?@~br1x@B1L^j>cuo=exM!L_g$Gz0SpZ^`C+o-yaA}LPlf0= z^n~1R7J(vVSULvS{$R8709Q#R@ZbWBjZyY(AbHaC(7|(oHtzZ@NbtoHn;_g=+H3fa zy!pe)r}Lf|tftQ|FMWp`rny9HZ;N&8jH3-LHf6@ zM&!|x^O%ZcPJiq#EK4mpID>Rd469b;u>zA+kvrUva9OQIDXPl_*T6IGn29GAYKQ0n zASA;!l#^KpqRw`sb%#}-2}Ud`ZK&<)htt;RIog2CA2(DI+sP*f^;yl%Jzz6%{0}^a#h=NyKLgPR? z+h)#g+PQn_^B*+snviZU(joHWllOKpV9D$p5IwQbsoi6pC_`)m%$bm~s>3~@oHT|MFt~;^&e$k z`!AZ@c$^%MzW3|Jt;kr?yNKC`4g;qphv-mowYqO~qxIDHG&T*1Il;sp@iK|H~; zRY8%8d5`6`s8oac%2s^AFKN^&{3cN##QttYZ`4w%O1kG)vS3r_nko@(3WSWY^hy%k zD_xZkb0hmkTBJdfu$mY-P*DN?TlRxM-eP1OB3FiJK5ogaE%S@t)Zzn*d&`8NQU6AL zC9qU0aDA(=vpOu~8PPvMOGiOGcbw0;i&OIZa_^2(khD z;&117LsI_yz=<&pOSpyG0=nv1z6nB$uqp6DxHM4~*{6ytIT39}>Z<;BowyqFU@THt z9tvb``MojCN=M7LPJs?9k>}02!$N}>-Hdf5sj+7zPsGcEpJ72v5=@DHxVbShM znTCaXY66l$r(TQRo{5JpXcn1GZ4$yFyu=I%t%@xcR3pUKP%~9_4y2j%Q(-)PkDfn} z9I;eUk*#9=IplZ{KjMiWV(J5dk%FI*g!Mq0g2h}Kb^c8wfG~@54Ml|sRB_zCI<@{6 z^>GrT2@cGf?mzHC4F8I^S9r33+|on(dnh|1Z>%)RxVYT~j~E*AoAP*jexWIP76myS zPmxHAcOLo4+KFvX7leBb75ClA;yi&nJL{!SU3@ zWMvA{qx5Pu{sRs@9^q`F3_ray9*Q&n76E5u$F_G0Tl}P{sn+HS)^78+pUqFXayKO{ zi^~-OJkHkEj&_t9g1Y0<`H^--_8B+x!zqT9=#17`5WUA@RUk-mPwZ;c+8RhB+N`=K znJs*ymvdg07$&iKn$G*Mk6>^D1*zhr9ipPUJ%R8Yk{s78rc=2jq zx?!bk{FtF%6OeF@OlMxwiOa{3JZqSunUzIK$Krxk3j28$=JhtBUVAPyC$e(tOs@2&>aIiai+vP@s~9CD!K+B*cxuJH5{ZoroEdkOb07;B!(&?FM&tYiDzMEi^#Kvu)$>mUMf_&sIXt9V z1`|{6PuR}`LE+?M@z!%&B1y|M_RaF73@U??hm`07>sJ^Y!2lLnd(8Vpp>y1ny1lr3 zl!y`Wp!J+)z{ok;P0$-LP(J+_fL&p*f0=;J+-ts3-7_(rS04#pN+)SQz)n%tOxR6_ z@iS9s7}z{TeV+AZUSI^TvB)a<)51kpw?}19ciIMhgxJi+fk$dzsUIxLVQ}Nw6>zz% zYtr38Z538+YKBWeW51rNm{Tpg2qKiX&!^s#!ve?C(NY6ft*#v{M7+r!kFvwni9Vg9 zVE>1ImnPXi@nY&lD&bwEzxTI{dNtF18pL$JC~#UVZdYp;{nAd(+?7ql2-I0p0a3h^ zdE7VU7KJ)trJ-z)KsCRt^QH%e#W!F~rPh@w4+*$@ zK4)>+_gDsG){RQP2XFWefCz@LxK4qr#%x=WmPy&Qi9cIKa_7gh__E4y=^U1@#vNfA=^ut28X2_ieyr<^WqKZ6Z-Or8MH|Ad<`?oNVuOc^D;a300H_ zM@89Pv5h{>T$*iPbD?^mIOFe&5u_Bf2CQ{5|AFdS+Fwi*XSv_QuaOXm*g$E@V6`8E zQRKWE^)Z_$Y0gO|a~q&cE+vcV=jv9uS%8|>#SnVFD4{g@06WNT*HBsw>2!tC0{d{{ z-?m)$6BB^p0Jsu~0e@^&+QoxKB>XGk((rAyZ?!zC_Y&)X*aR~{dd)P4=tBS}&bgS2 z{qy^PL8LkzJ@}LlCE)1?0?Rcsi(8&_kltfWR6M$DM zB@k7TLP~t7P?uK;Ts)*HwZe_wZDjbBZM%!6b?Jhxe7&{7sfsC;9!MX@l+!aDwGefQ z4x^TY#)Apr3tC6_!dw?x(%AL$?5VUr|4VvE0UoX+_onVuhyG zjno6xQ`GYfpa&yn`;1$$&NDY>HXLD&54al2@3A?CO|q4u_Avv9^NpXV^|y@IoDy42y31Z)~eiGpE6 zjFQWawJp?DvP0va!#N^er>_g=QN4?!$QgS^+?fbZUO$e-pB_^&i#<6xi*}@zikhr) zQ3p!O-n4OUat{Ysi^*BT_O2f8jyx#;l8S9XRMCoMZ2A)_ zX({EoS{qBU0kjhm%{)Y@gbA}dPEho2-^nP_{xyxl3R{(C!oi@~ily18z0RaLa0~`Q z-}?ov&mj*bb++L+Cn&la1{QW6ioeY&-ik0^fbt>FeFp7$E%vk?b`~WsQnvbzyglt2 z9`}pj;QLZOF2GfJW`1Ani=s|17tLg$8U+`!R+s>XANYrUg=l>KXV@4VJI=(f0lM4q zc{QF7gEfqt;%le{C3*5Z;l{WC zFSAqZwN$9H)7C|NkiQGy?ue@E(A}7Xg?|NcL2!wKV2fX9dAtshHJ||p-F=%=!ny8q z6#06TOF*fvSQIa|E4OQ!zt_m$j8YEAXLb#*=)p7dhKLDe#O1>ypGw~Mhuiss4SE&o zUCOJU9zDRJ%X0NAEI1iD47H_vlSGZkF~C$89(cGGOkm&MeNlaq=G0Z^LGoC#&+(5; zaLHJmE~eLwe)P>Soonm@y#9COv=j>${%>Y)XCS}#)W(vgsSVQX`2E(M^D$y3#n~@U zgV@DGaFc@HzP4;aOZH2b_Z$V?;5?hCMg* zn!6cCC{y}g^m+AoL?$;eAC=f(GWM_EJYNcPYf@{mDE%^ugN=T0ugCc2Ib$OHbSS~)R(7Omi zjZ9k3U(d1-{M$k<#<4`~+j1kbgN}?&yxq;C&cE~NugdUGNRR`qr}^`}2t-ziw}9Yu zND&z4NgN_teN~?NfvUpDyi>c_B^0D$$U%w_9IM8HxQLYy){J#zv$J|XC2k3T=4g!TR3r2+)_P(#EJsgpZU#ejJ820y9k*w+P@sqnB zl9o~obFSN-5jU6z9D=9cynbWie^HJCnF-Ek_hYH71W5_lcLsNLo|gKJBcNoqk5c#` ze{rg+LtS})^(X{gJxq+Am1Jg{hJ6adCBk8!+}{d>I_;u1kC3In1Oy{5Hv>zNHJZs5 znjAml*}FNZQo=Ul=BGBKuJg#6S6ZrlZyojk7hV6B@O&_H#+`Ni^H}s&=v1+EevijAm=O*FaVtKKpajjc} ztaO=b1DMn~BYxd*1Ljzw4}l3A@`qiyNuq=mV%qB(#Sat#fi05rT^EFLO~bNLgjSc> zSJeJCu>K0517vo(tmJk=ys?J>M|?&{ev!nS5H~cObS#1rSXcN(j8<2c>5`D6w2tf7 zjkvK{8I{la@AP+{l|PZ5ymZ+vIZ)x*a@lgzr?3`tKDAD@YKBNf+PeRun(}CTCE(QK$%Jyv^`vksei?l5pL8gQ{6s0E?fw#I?&W!G9 z+C)pZbxWvq8L3$`GAe}p$97nO+37R48}bxo#dEr&Qg2J#ZMnsBo=g#@IeASh%rv$3 zCyobcB()INWZIHZD`1NqVUEe;JpLx>!$#$~`lfTHjZNvIt*&KmP29<5qHD)>(a~>x zDT_5fVT~3K%Ybc3xNBC1#@T$N^+~ISZ6!Z%293?xQi>N0^`8#KfX@*0`rA@o@8FAT zsB`&GEUOCN_|)~=lHXT#bL%f2XZWAqP55N5u%n`YbLctRQH>0A*QR;vQFGqagnY+W1#k`J)!VJdJRaXokyH%~~(F{OUSN8mX&?MrQyK$stRrJN_8j?Wp zkvR4O{4Z^Vqxx%u2m=IUj^=*~`lcNV5Y9)}4C60QCd=D9OJJjRd!f6-KB(4iLqL0d z06RKXrX;z+KDpkwUBP~_lcJsC)qGnR83P3c9A(LFOs=@F++QC+{gdCcPuUTcIvlZ| z1hzapkd$@yJ+ayMyfQFU1*rdhojeGzLl{LMmVJLfqNj@w~3XBub!DJCFknUoW~z8qjLV2$^@+>HX1 zzkSZ4A3OtiiMH9G)F{x8-`pxn7O@+>p8bL7A}3@y3{7A@M8Vy*CAVFWIF!T1DH%dJu5FlvnwyLF0#cSdT1$M6# zZ18qzTQfAt9;sl^A2aK%_~@pCg>_Qp()DFxmpa6s=1SZ4*=uzdMYCjqo;X(5oMhv{ z(dB(zEBvvp#a1pisvEaXUh>{EKF)%>rO~fl_8B-_Ime(8ne*WlnsG* z=ur;WDhz}R_=p6&Me__0Dnqa)Vm(Gjshb;d)FwR&H(;EMbdzAFeKFCT-Ig4E$-4aK zGi-#-;?EInxP?iXbRq=$>IBkhmhdo$FOD!Kejf)(j0kQ2kZL;=o?Rn5)dp>0x9TTa zCPh;SH*Hd8zFU~s1yV6Aqabc3g)G)YP&0~_iN4(1;c@Mm-(~T@_R?w9F6{(DUIimi zp3cI_mO`0P?HWD-gKBwij}GDE1U1oqsx#4xf_P&!$(ge3=p}rPpg(z7QtSLwVp%wr z)b0###i4ADrG59KZ8H5jrgmQYIGWL*j+|7cc$#s65id0@KZnq(3&wC@I#!RvrVJD` zc}=SdM#lo1wY7qQ?%8r4UAkOF5s^!cBg2nM=0e+U=;dHNa8Rk z6OSdR1P^6%75kui(xcdvAns#PwNEUe)W6QKvx++Gk|I@P=%B{I!M1%mN#BD~Z&~S> z$J6!HZEokW811c=}jB3iJ%ga)vN0pvV7DdI!MQ|gk(^k^%8^T$}3nBR>8|jLy4Kc zE=NuJDc;yGJK4Q)RVO0FMbi#2d?W{tqrvP2@CjY;agYympLu+8SM^1Bm^UyXv=)A) z$BGy?QAf}MC3Q9vaj5ue2ht+%CG->!2?Xo*aAjdD>+D7_N2BVDezDXJyMf0#@!V-l zodn=f$EwhwvPjP_`FNCTC?>YxIjNyQ{JA`OmQ^H@t*Ugyq^(rOx@Jb)%18SEeuX)K#ChVAWHY=G3=!Nw39B8L}Up9V)+ma4^A&pH?m z!ZxP?A|Ow92k*S%zgJf&B;)6NY_3^}60 zB^*Tq4Y^#YePB|#FBZNY8^FhrqL)yz@kIB=2}87#%Sz7pTM@ebhNF*?h-zOlGaGfv zZQ6P7qKX#@;EeeS%nI0kqiA2Vr6}63Y&%v5y0ML^&*z*~kj@ok`vxQmDwUd}iS^e} z-?Z%5Rm&l#PM70=N&Wo!2i0KZ&gRQpo@dtJqbT)p_hI@y$KO)UOh{V+3hcj2VhIFR)|`=Pg4tx(@};;bTtOsuNyB$QXe9pmHv*L z1ben*Fi>HnWoMC*FSQmeJ=SCE7~L=5TdT2brdx>Lpwa+1d|$6We068K6Wxxe&F!baQ|&s7pR zl$NXuC6`oi3J}9TYEA17G5kP5aP5fSaDISnI#xzANK&8QAygL9p|IKcF>Js?yRHxU zXvzf=6iuHcb=PWBZ^DVxxF3fDUpU6wevU*hwgyKVtY3u>XIdUCa0x^aO19CqYHPS9 zu`dYUXsTy$uB%DR^04ViJd4h7l#|9UlYmL0#XJR0%{SPhqaVrB&z{5U&dg+Rrx@9o zO385wN^)BuxZOicKQ)$`=k7N#;9Rnz+VF@5%Y`gGshFy8Hw5qg1W|DShA!yJt9nJq z$TD$(FaiuiWu6WUWb_!WUy*ZE@V4svwd&C@-1t~Z{HSQZ`B<(gJ*A@AOX3QZPVwMQNTn>MiKs)cfbC0;XP9g$wQ(ssw*!|cIBS)~BQVg{XNM;6Q z;Z4vGuyho7&kMD)b8KPy{I)E0CA9=YS*^)sySa<+o{t^_`#Wr&9lM#6YQ7DV>6?p(hnyN`!Gj7pUlUK!ybM`VhCQNEdRJw0Ukd^J@oN^+6;{FFz;7a!3hiE!Py)C;^8Cbt>|>vA@hw*yV9$+*+F}_|C^C{ z^$4FY6yp6QXa@b-Xbg5FDP(X<&GfJpd+IZhw5H3X1pyX`UgqephJAD<7@yKcmyak{ zBe-1l&h}3?t;+`H{Z5<-0A-Ed?nmf4oZn+6q=JKLD0`|9;b#lCP+P-NR`c8`gG}~o za_Wop;jix$On;U>r}s_Z#~q-fxnlbMCTVSaw6-|ETsY)HQi$+ZohweoYG;J!#MmYU zJ-&E}<7=c5?zK`~6X1y;X3s^0gnjdu`^z8PyA=m4zB2}%OVJ>2-(KV1!c_UG5tvz;-b<-P>67PMe-{!%S$+ge-~q#h{~r!iBIm0yR$+-JIM$&8J3`IN$zZby7XCwIYN&KX**xR?3#I`P@$25sP73{J~Fr{&VSx zWjo4(!WZY0!WRLG+&5_hs+36ennIRCGszV{g{c&nVv<_CY*JB76~&P_B3|dIkxj~o zswLyq+@`s3IgBXdfGL(JNd6+zp~TOG2=b5kop^*4-kRP~>$H7FNTn$aAkWn2(`%K@ zrFm>^ze(m-JNeWHOSG8y%D)sDXEXClyF~dn{9#!|`|qY&trq!g^80r!*MCE+{w?so ziMQ>7@&6_Yxnljhy1zm7fOt$qRr3GE8*nPAj(P{1Ed#RkgKMS8Kldx-Y36B97IYsk z|9}y6IW9i}gPJn_ITCs#0(+!0^=F_B17!!Ja0Fejsus9etsKjEH{|gRobo=RabqWx z+E&({i>_*%E@=1X|NH^2N9Z7gBRCL{zZm~NrH23ixJRLXwVMH>*4=hnF@c(Vhz6L? zfp{Y5=prJH88g|6MHz78O^o71L#>V^fpA29VW_j}65@zQ*^j4uK+%Uk_aBf(U@o9> zNJyvCe618gc(S4%qX--Jg9r=UYJd}3g)VM{2sg3JVv3zB=}QO#SbJNpmK#M~YdHii zU{sg3c`hw~d2=^L3ugw$bl$tWmJOz@l-DIhqBt!HD{X}KbwYy==H+zrbaN?|>TEYr z0CKrru|C>d!2)@Ga^_fEG(5+9tE4#&&R_0^_9d@-J|c81x}VBM4}h2AIy2OFiy9l) z2iDN_TbnQHnDsiZ1q<~HtUsOfO(hHZK(R8@n&|X&-gme5v8YW}j;=D)lv_A@`oA1+ zNUKZ`vXjqpP>7Wn$t?Ru;6+8)qSGP}KP5OAm_7UIg5B&VzSzLZ|8a+!1NZ5<@uMGk zC%5@!@%x4*mY3luwenb&Jx8X{=A`6&qZX+C^T;Z}lVq*`rMsN|JN}nXopeTxk#y!Q z1;nHgX~8#Wp%Il5CkUX>H2{TkrZ7rd*OxBTr?aAamEB~ISQMB2*=}#sQIjND1HPa_ z`VzU_VYSd?wZLZglgn%4^}vuEa|9P^noEhB(MO`zY_m{qND#(h`HJd6D$kG_kme5{oszd&i( zEO$uPV&<4Nk5pW9Y~0A>hUeCvz*EBZtGT4R@XC&cP9DRNGq&SM(;Fuyixh&|s@)*| z@R`oGyCdd^huhWJ8piCIg>D{fJaRF-E(BkVkmZr9$R)jZlgrWyD^K@hc1=v&CD8pe z|GW*rcuG~5uTj?g8(^WxCdG#oo4vAFn|A@Rd|ExPvW?j!sPofTRq+M|eN6jwD!arC z+^(8p%`i9gjQ87zSIaT_w`yIkE5IZBJF{Y3?WWGaHoew93sB1j*FTe;A{Yecfk@wu zpS8McksjKqHCMF1dFHK)V52~|0NiRI9G!n8tyZOz2fMkVdBpl=JIpar9_Zchau!WviRC`DxWD%D3h_317BbUl44j1a4&^ zGs$RKV+L}b>ga6jc(uQI1uWd|5+t!4_96Io%_HvJhrg2uY)acmo&SFF&mSd9q|{jTx^fJvbGU$-P~^aGpDRPn#1$1;sIRL24$V+`egtex zE0k}VA5-#zF0nBs%l&y#BhpJ~zUqR^xco=d$&7V*PH zZ=(514Nu-@FP;;Wg?->1LF)jYHi}1_6XDz?5r0lRq0^lXaH8k<3vAvt#)oP8Jqopn zrAsa?bw*t^03OdK3HpRM0`p{7XB=%X>0D6C*+UeG(3y##xz;tUM1{^fo^F%pfTlLd z#?dCv%;ETjo#!e$C)Lv`iA+?t?z5~zU%{cd-;DX>v_MGiYDW9< zxgX|zu<79r0gb4~B!MrWUytBX=pu9m7rpvVIlw0`O1cN41Fb?v&Z6_1mp2eH4{GvQB3CrHZWyrJ;VnXLHO@%E zN}Lo;kSiq2fzh`?=X#gM-#%8;q(d{1S4eY6v`^npV%ZZaTx~x^K8$(CSiZ=xP0G{T zc0(O^50=d&>c_p$N43*lVIrBX3n(=G{Ivvw*be|0`dVQ&l^=&sB&pxb7BL=}$~X|` ztZcSIzQG9LxDz1?LIBcJ3y2zUcP~kNIxR=HnK=Z z$Wk>Vx#^8P+vXHHZAm8UFFR3!#hHtX@Y<}(s$-Omy#$v~zLk0N7ajAJ`o~JX()PFc zWrpRbuu*pK0Y{Qv34&GzdRHoS@k8)D4bmvj40_&)M`F5^D#&F=t-fRWF}}{L+uiU-6_d--48;;BRMD~TQn3cBij`+7B^`ye zsH$AndXoEoe5G+SztfZ>ycU7WwiDI7j(Hy<<)HI8pVpN-D@n?jWThZq|4u{WT}l92 zgM;60dekYz?-Rl2H}NbCJEz1jbe>FP6mCEO|JH z3_(<5pMGGP-K>)xQsP2Z@yxwywe=+~J8hr?y<61l@QJh!w3q+x(#_Sz9{Bx!pLVXL z{iT(lg=r-K!a?=*bUB9|;0w>|#mOz~OgdS&|qCbH}A(#|zMe z6uhN4%e@WH%s+CNx4`g<@yk+@jM2&i3I*YUczoxe{`UFds_i7|K$3OrDWvUK^)PS? z(^0gc@Mr-vEMRId6m`k1!K4hmkN3)Qk5^@QXnC&?+bWtOgAP#?ryk z-yqkXeE_ZvHcB`Ny#azmP1R>8^$}PRZmr+)@s90MQEgqYX4H|wG8~Ib$fDbyeKRg zCr8v{0HDv)uS^-HK1K0?s1#GqxSF3QK#JA|7|!-3K+AsTY$58G27<7Yzi!9C&IH3NshKKtMbEHyh%yHtJl3+Aey;Lh59(yqb??B4IeD zm9F)fMrB^tbIcgRMuM#3d^gvtS4S7aPR#7$h;)>PH|;*1>MMn6A&JiwkKa5Ur9(F% zL1dS_1Db1u`Yo_*JP-F_C^XB9Z1L%C4q+orHgXL8I1Qzx`W4jrt?5EU|8G;!NSzWeNG&Hjli{v-u-D zK|+c?Ehk)<>H{WSI-Kn-rf=uD{+^_AaB*JD!npc%U;;R6;)=QgB=CEuocaaljF4O^ zzh3^FZZYf2_(J=uj?=7+#$yjMqav7#SK`)IPa+SN+=qlo_e!s_>W_|fWSCEG>IbO+ z4~)$s6yV~rwtl@A73o)$Yk~A`&@)zpUu5o!>pQ^bK5JG@s%yBlD8XJoz4WyhRr{-` z?Y1%AV;Q(Y+WnWiWpoZI&hV+9#4!9`FijOI@(C?1UzJ^>n9lL#QAP-l!i{zRSv<6R z-q_H#O;B*_X_3TXT$HKUC@(K30Wj4E%Fq<+eqfFlpWALXdOM@zUE?2&^x{Qy^^Dtt z*Y?F&^c#zfut^`~ypB85(1^?KWviDYa?{pmRuWi<*D~0!==#k1&d;P@9dzR${4gPB zwpXZ4yV+KSPcXZie_65QSFS_9K!xMM7Tp>3_QvsJ%!ks=-y`(=P~s!T>LVL`=9Fn( zwrA;<@ShpH%kZK^?dCHz9;K;XWzc*$k8w!=)r;%MyJB`A{(L~!RKHz5kLw!7l}#vm zfdT(gIdpqd2PW;L{|mA*)jiC@ld6k!y~x7Vq+SD5%{FE28WGgeY&{kY))D6f*D25Q zZIKpb)^m&1>KPLxb=G4OC^kX6rCPowoo~yKCR>iMApU@GvgktHya9$ou^;6|xY1)2 z77Yy*2*QhNRl*Z61(u(lX+Cs`!LhAByn$as6T5%IiG(Yp|Eglf-rG+vBMiH zNSRL~4z>Ds_`*DKHWA$IFyjUaiNWXB=oRPVpNREz~ zJdb0>;6p5v6{Ap$$6i?8IF(M#@^o+V%BY6TpW3(m|8$-~te>WSGA)dn=IQI+0JCc+ z1Y5UG&yN3{fgyr)pIgpUQ2yMG@mf>~r-@em=hB4Fs zPb*keoJx*#qEzubR$|G;*rVNlJ}u6i+w3bM2#6>C|3n4uC`O>oe;pP>cTvtnX++y$ zFws|ab+tA7kWz5b7Keh1RemB!_9(Q5T@M&c7%-2FA?<6G&u6~%6Ya&Z<`zguZ-j1N zUEO57^4w-*X9xj--;nh%YI{#dM+)aj25BoK?+CuStuN0U+pt}!hZAcsK7(+$L-+A| zi75A`YLcPLxgP>|q589cvPj-(Q-~QFwVzNdrq#xNZy(E{6RzPeFY#v$sNQj|a;fsnxzI(QS z{VxM!EhB2fwQ1s@ODoItDdL!WmT2NhHhUwuspBfFUp5T@DIKRY>vG>{lLz)G7BuoJ zwpEerKA-82becp1o*+DJ>_L7^2=fnU_9O77RM<8@$jNktpD?X$roUS71EkVyD%j1m zi;9B(0p=z`tb2#kAf~F~b4j)G>2^Cov%uDKasoo}w8VVriKr*Tw%&Zqj7~!Sy7;1^ zYXoZCSciBN^qHn`ZBGtWsl93LukGbpBV!*@Rb@_{ngsW#*s99n=UBvfoEUa;`FK47AVK3Z(Kk(`VMK%yB0isQfAzy_3+`v+SvC`vx<*mRenZ{rYe)+FRhOGb8<>o1JfoC4lLp|Q8h!ZVWpYp z07yBY#DyLjqm#Ft%nC9?=7gD;Q5ew0z{kR7g;rohjNHvfHj3lzM9_A+B0g#t*@*@9 z{}HX0C=Zbt-1H1+v=)mJxzxka&}Zhp+WrDpM_JLG{nPm;I$-s3wqsAM49srLc&@FG zsSi5S^wPxDXRWkHj_AgJiOi0$SLF4XOF4+)uII;p@9csmNs#=Xu4Mh=zwZ!?83ZP2 zzXTmw?U#$InVqt;gQJO)TX9nQFNFeHunGU#0U(YKcfCc z84#4Am^@i|WI`3q8)xJJ+WL)Ocu)OW2EQ`trvMLoSx7zacwbm6zN#CgSZU@pQ&aCR zzPAo}yMO;2Yk{QA8Ljy|n6|eiR65#dv@I{WPE?jW&`jF2*oHy1oZ>3f(Lw{$22i%J z$ZZ{W>v0DF&zlND9Quc`Ob->B+m;Wh#&kr5&d1KptP&lKZ9ffd_z-{i1>s?(MC!Kc zlN4XC!04kblxYWJQI%0fNorJ=_(cb@oSD@zFgPu`gNv;sJ&Wo;RFc77Cbj}ZF(=}_ zh1nhC;t&HEzIbjDwXMUM;e~)lHeGv;tp?ha{OFqb#^J_IjDbO#@TZH90(P5p*I5hvP54 zxh0t^54jbYv)5d@)6zndct=vo?){V~T9*+g0?@lE_Ss9^nBNUh9nOK$dv>AWhxfFD z6#^xKpSd@D+*JeQIFJmZj}rJa8ls@5H2WI&ZSG5fxHg^_xoapOW%| zOow14uOw#3p6V1%SNXsjPT39#z4-#;Op=pZXA{=Qs?W9GHMIeh)t^7o0(woLngo8H z4+<`;3k_TF3ii8&u70}@15*aHJ6uf>^L}bt?G_vGHDOJ#Bov{K;>*h3QRG}&gQA@e z9uuwy{Gu;!pid-0$Sm*--v8_BhG$5_$izneQaowLRi9<@l0X3jTqMppT7(t&mgqZd zDr(dm2mtDIXaq9!9H6->&ZG}aZPHH0aT{I$=!SpgV87(Dkm)+bc$OZ3T-qn z!OMiD!w1mEJvir zW2aB4yS38ZKex_!?|*;5l|zc^%zwxkMacgz)ng?gr$HrASK=q_C1C*z{EtQAsZzj) zn*sykJ8fjxA4I<3d*+5lhOqoVgp!?FJjzN0Y?J=AZu#rr?qUAAdP^kq z!-%j2#;2oW!dx)?7og3^T15{9j>1Wj-ZG`KT3Kyn$y9=lHG4H9e)>KgFRGv=@ zc=wADdn#VCmndt<5**Fy^goF*{V1TuD`h;j(UT&s-&L=ek|zL~ziK8}$2jZC2=^h57nb&+Xj0;6SK0M{Not zdZz(j4-L_ilW$;OzN@|ih7mQU2i-~jJ|$tSoAseoPDM>*%W1v2)MgWKlT^6ZZHGNF z8c*EwJ6_0X#_|qDK*Y&GQL+Wb5n00*6lHD1u^afa915W- zT?Loj+aB5k@$jc%8FKd!@1QnC~E88_D_bL04aMukP?cxyVom601|3fVoQoI-RZwN7@6Q2ln#~spKR=Ry(6IxzC zF#%G+G2D|id5_3Z6hUrCG9IDR-DvGwThMI#;US{nZ6p)-TOnW1-kx0TTX2w&(1xm(aP0F71hR_K*TMY<5a+Phx^w{W=@t17gH^mSK(im&ZG=( zHY+&j8`#KC*)CXO1mRNQ2prSNvye;Fm5%5KQCx; z+dA2~9tVLR*2#}wl3kX<%G~y*mW&hYC(@b49;C3o^Z~v_7$_x*N|I|v`&i45IX|B1=4vaVd3PpNY;;~A ztC*Q@XS!v7{8;phXUsnbA-TMXmOWsCxte$qib6tBnljH_wrg(qy)J~r(YKJKiI^@L z32i1FU~UBL+>rPfVS4sWYUk4F-yrQH&d^$snQ+bh=Grrl*yp_Y6P_G42ksY7{XDy!@BpD zR7o?eFWUQz?llUyQc1AcFyYNn=wV8H2Y518w=C)>qG}Dt!QVs|`{G*hTt>yKL6|Aws-73L-7Tq6n*O^57tyDvcRy5%UYtiLUv~R9V`;&h>u37{T3v< zEBXKCudNlzz882L^h?Hd@5OHmzJA%W>qTRDqg3I?%i+B{zU6xQGfmPHm>A*ke=Wu%L&yh?jK4PyH&G0^GizJmh0C&7taf*Z*5)C+PrUhW`)J}iYwoBdLQi! zymZKrJCpl-q=9Zvghi#~YAfIYXmtHkldpVts$g2*daUr-xl%9PhOn4}vooBx z>sA*WndWYo;?1g_Qz?|5Q#tKlD@&m0iOKa%0)at}MK@K>9kr5nK3KR%deeuEts7sf z9Dg_AUd*L9mK#SdF{`(~aW#FXyi>J;`E;$gPED!!y#?=?Rxim}-+3Z4@##G+!MZhz z50xuMN%s8Om$^jdSm8%LMah3l>iHvAE_{D<+mdXX^!xL>&-kvnt+rg?s><9=mrW;J z&Qr=2>`l|(aq0Wtdz>+x-?%TZ)a{LWl(}xNs*L|lqZ_YV_D(#0Z&u%0rJSw3cc&kg zTTm!^QnsnpO-XUv+E03`riaII-*pXraqE>~$i|mBB|)aSMoyPc3anhatYF66U$rZK z@Pj%~f{}?Yf+zRPUCBB*p(;Xgvemp~mc!G9W=>u>PmIY$U~=F*naQ;RqLUx26kvti zt^R+WC=uynoD+HdCGWoQ!JlHzW4QPvi zy~J8z4dn~9WW=t+?#W_cFh)`QKm$p!HY@l>rpW?}M47_1;Syepv}BO) z$+1T4#Ch@z3~DGQ#h6Y$uviIrMFm75 z_%L*!57z*(4vNChmOzE>vXH}}85rgOPp3!q)hcU-$qx2Xliyn_gY1-rpH~bFEJqZh zgzZ5py}_#B$KL`~*`cTsa%7ln@8|(`KjI`-1_pf;RUXchA1oD}+`rUR8gbAhx`j5A z?=OvI1)s+^*>RaD(_NscOXVhOdMbiVM;w*|Je&{3bX^~yLfOd=mdVS&4_g5`R2N0j zt5C2L43-axH1|&#=Wr3=B#r3YSm5zuZm+d94eoZBHsE zKUgk1*`f-PT@V9^3=9e=25qVaDwLVLbA`MNVnm36K^{dBLpRu2{@vi5DT5dWK~EIW&pHfkaU4roNf6g>=uCr>T__Rcg`=}3c15@4P_ a%EQ2*fnt2> /dev/null && printf '%s -' "$PWD" ) || exit +APP_HOME=$( cd -P "${APP_HOME:-./}" > /dev/null && printf '%s\n' "$PWD" ) || exit # Use the maximum available, or set MAX_FD != -1 to use that value. MAX_FD=maximum @@ -206,7 +205,7 @@ fi DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' # Collect all arguments for the java command: -# * DEFAULT_JVM_OPTS, JAVA_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments, +# * DEFAULT_JVM_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments, # and any embedded shellness will be escaped. # * For example: A user cannot expect ${Hostname} to be expanded, as it is an environment variable and will be # treated as '${Hostname}' itself on the command line. From 701f1a38e6d8f06169f1f16dc9116b360448f80f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Mar 2025 19:48:27 -0700 Subject: [PATCH 064/550] Bump org.jruby.jcodings:jcodings from 1.0.61 to 1.0.63 in /libs/grok (#17560) * Bump org.jruby.jcodings:jcodings from 1.0.61 to 1.0.63 in /libs/grok Bumps [org.jruby.jcodings:jcodings](https://github.com/jruby/jcodings) from 1.0.61 to 1.0.63. - [Commits](https://github.com/jruby/jcodings/compare/jcodings-1.0.61...jcodings-1.0.63) --- updated-dependencies: - dependency-name: org.jruby.jcodings:jcodings dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: opensearch-trigger-bot[bot] <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> --- CHANGELOG.md | 1 + libs/grok/build.gradle | 2 +- libs/grok/licenses/jcodings-1.0.61.jar.sha1 | 1 - libs/grok/licenses/jcodings-1.0.63.jar.sha1 | 1 + test/framework/licenses/bcutil-jdk18on-1.78.jar.sha1 | 1 + test/framework/licenses/byte-buddy-1.15.10.jar.sha1 | 1 + test/framework/licenses/commons-codec-1.16.1.jar.sha1 | 1 + test/framework/licenses/commons-logging-1.2.jar.sha1 | 1 + test/framework/licenses/hamcrest-2.1.jar.sha1 | 1 + test/framework/licenses/junit-4.13.2.jar.sha1 | 1 + test/framework/licenses/lucene-codecs-10.1.0.jar.sha1 | 1 + test/framework/licenses/lucene-test-framework-10.1.0.jar.sha1 | 1 + test/framework/licenses/mockito-core-5.14.2.jar.sha1 | 1 + test/framework/licenses/objenesis-3.3.jar.sha1 | 1 + test/framework/licenses/randomizedtesting-runner-2.7.1.jar.sha1 | 1 + 15 files changed, 14 insertions(+), 2 deletions(-) delete mode 100644 libs/grok/licenses/jcodings-1.0.61.jar.sha1 create mode 100644 libs/grok/licenses/jcodings-1.0.63.jar.sha1 create mode 100644 test/framework/licenses/bcutil-jdk18on-1.78.jar.sha1 create mode 100644 test/framework/licenses/byte-buddy-1.15.10.jar.sha1 create mode 100644 test/framework/licenses/commons-codec-1.16.1.jar.sha1 create mode 100644 test/framework/licenses/commons-logging-1.2.jar.sha1 create mode 100644 test/framework/licenses/hamcrest-2.1.jar.sha1 create mode 100644 test/framework/licenses/junit-4.13.2.jar.sha1 create mode 100644 test/framework/licenses/lucene-codecs-10.1.0.jar.sha1 create mode 100644 test/framework/licenses/lucene-test-framework-10.1.0.jar.sha1 create mode 100644 test/framework/licenses/mockito-core-5.14.2.jar.sha1 create mode 100644 test/framework/licenses/objenesis-3.3.jar.sha1 create mode 100644 test/framework/licenses/randomizedtesting-runner-2.7.1.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index f40100aa2650e..e3c69549bd62f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `net.minidev:json-smart` from 2.5.1 to 2.5.2 ([#17378](https://github.com/opensearch-project/OpenSearch/pull/17378)) - Bump `com.netflix.nebula.ospackage-base` from 11.10.1 to 11.11.1 ([#17374](https://github.com/opensearch-project/OpenSearch/pull/17374)) - Bump `software.amazon.awssdk` from 2.20.86 to 2.30.31 ([17396](https://github.com/opensearch-project/OpenSearch/pull/17396)) +- Bump `org.jruby.jcodings:jcodings` from 1.0.61 to 1.0.63 ([#17560](https://github.com/opensearch-project/OpenSearch/pull/17560)) ### Changed - Convert transport-reactor-netty4 to use gradle version catalog [#17233](https://github.com/opensearch-project/OpenSearch/pull/17233) diff --git a/libs/grok/build.gradle b/libs/grok/build.gradle index 9ea59bc2a77b8..3bcbb59aece56 100644 --- a/libs/grok/build.gradle +++ b/libs/grok/build.gradle @@ -31,7 +31,7 @@ dependencies { api 'org.jruby.joni:joni:2.2.3' // joni dependencies: - api 'org.jruby.jcodings:jcodings:1.0.61' + api 'org.jruby.jcodings:jcodings:1.0.63' testImplementation(project(":test:framework")) { exclude group: 'org.opensearch', module: 'opensearch-grok' diff --git a/libs/grok/licenses/jcodings-1.0.61.jar.sha1 b/libs/grok/licenses/jcodings-1.0.61.jar.sha1 deleted file mode 100644 index df675e0f40640..0000000000000 --- a/libs/grok/licenses/jcodings-1.0.61.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -51b947691d501d0b6b3c04955d82e934e190b7ad \ No newline at end of file diff --git a/libs/grok/licenses/jcodings-1.0.63.jar.sha1 b/libs/grok/licenses/jcodings-1.0.63.jar.sha1 new file mode 100644 index 0000000000000..244c6685e0b52 --- /dev/null +++ b/libs/grok/licenses/jcodings-1.0.63.jar.sha1 @@ -0,0 +1 @@ +b8c386790e72f0bad666c8f1a8af12d0528992e0 \ No newline at end of file diff --git a/test/framework/licenses/bcutil-jdk18on-1.78.jar.sha1 b/test/framework/licenses/bcutil-jdk18on-1.78.jar.sha1 new file mode 100644 index 0000000000000..9c88eef3ace17 --- /dev/null +++ b/test/framework/licenses/bcutil-jdk18on-1.78.jar.sha1 @@ -0,0 +1 @@ +81c1f5e06f206be5dad137d563609dbe66c81d31 \ No newline at end of file diff --git a/test/framework/licenses/byte-buddy-1.15.10.jar.sha1 b/test/framework/licenses/byte-buddy-1.15.10.jar.sha1 new file mode 100644 index 0000000000000..b89163a2aa842 --- /dev/null +++ b/test/framework/licenses/byte-buddy-1.15.10.jar.sha1 @@ -0,0 +1 @@ +635c873fadd853c084f84fdc3cbd58c5dd8537f9 \ No newline at end of file diff --git a/test/framework/licenses/commons-codec-1.16.1.jar.sha1 b/test/framework/licenses/commons-codec-1.16.1.jar.sha1 new file mode 100644 index 0000000000000..6b8803089c6d7 --- /dev/null +++ b/test/framework/licenses/commons-codec-1.16.1.jar.sha1 @@ -0,0 +1 @@ +47bd4d333fba53406f6c6c51884ddbca435c8862 \ No newline at end of file diff --git a/test/framework/licenses/commons-logging-1.2.jar.sha1 b/test/framework/licenses/commons-logging-1.2.jar.sha1 new file mode 100644 index 0000000000000..f40f0242448e8 --- /dev/null +++ b/test/framework/licenses/commons-logging-1.2.jar.sha1 @@ -0,0 +1 @@ +4bfc12adfe4842bf07b657f0369c4cb522955686 \ No newline at end of file diff --git a/test/framework/licenses/hamcrest-2.1.jar.sha1 b/test/framework/licenses/hamcrest-2.1.jar.sha1 new file mode 100644 index 0000000000000..b3084acb6e26c --- /dev/null +++ b/test/framework/licenses/hamcrest-2.1.jar.sha1 @@ -0,0 +1 @@ +9420ba32c29217b54eebd26ff7f9234d31c3fbb2 \ No newline at end of file diff --git a/test/framework/licenses/junit-4.13.2.jar.sha1 b/test/framework/licenses/junit-4.13.2.jar.sha1 new file mode 100644 index 0000000000000..7d065692bff1a --- /dev/null +++ b/test/framework/licenses/junit-4.13.2.jar.sha1 @@ -0,0 +1 @@ +8ac9e16d933b6fb43bc7f576336b8f4d7eb5ba12 \ No newline at end of file diff --git a/test/framework/licenses/lucene-codecs-10.1.0.jar.sha1 b/test/framework/licenses/lucene-codecs-10.1.0.jar.sha1 new file mode 100644 index 0000000000000..ab61c727ad3e7 --- /dev/null +++ b/test/framework/licenses/lucene-codecs-10.1.0.jar.sha1 @@ -0,0 +1 @@ +c8c5a67a9718b24fc2e0fe958db3404254af628e \ No newline at end of file diff --git a/test/framework/licenses/lucene-test-framework-10.1.0.jar.sha1 b/test/framework/licenses/lucene-test-framework-10.1.0.jar.sha1 new file mode 100644 index 0000000000000..29df9ec4257d2 --- /dev/null +++ b/test/framework/licenses/lucene-test-framework-10.1.0.jar.sha1 @@ -0,0 +1 @@ +1985bbbb74c358d2e2477eb467b867fc5a8d0e25 \ No newline at end of file diff --git a/test/framework/licenses/mockito-core-5.14.2.jar.sha1 b/test/framework/licenses/mockito-core-5.14.2.jar.sha1 new file mode 100644 index 0000000000000..a9fe959400ceb --- /dev/null +++ b/test/framework/licenses/mockito-core-5.14.2.jar.sha1 @@ -0,0 +1 @@ +f7bf936008d7664e2002c3faf0c02071c8d10e7c \ No newline at end of file diff --git a/test/framework/licenses/objenesis-3.3.jar.sha1 b/test/framework/licenses/objenesis-3.3.jar.sha1 new file mode 100644 index 0000000000000..5af6ca9301a38 --- /dev/null +++ b/test/framework/licenses/objenesis-3.3.jar.sha1 @@ -0,0 +1 @@ +1049c09f1de4331e8193e579448d0916d75b7631 \ No newline at end of file diff --git a/test/framework/licenses/randomizedtesting-runner-2.7.1.jar.sha1 b/test/framework/licenses/randomizedtesting-runner-2.7.1.jar.sha1 new file mode 100644 index 0000000000000..ff3471361e7e5 --- /dev/null +++ b/test/framework/licenses/randomizedtesting-runner-2.7.1.jar.sha1 @@ -0,0 +1 @@ +e917f4983144c3b969eb7d3648338ecde5e3ba89 \ No newline at end of file From 2ee86601c9f674862cabf85c7c17aafc4732ffd8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Mar 2025 23:36:40 -0700 Subject: [PATCH 065/550] Bump com.azure:azure-storage-blob from 12.28.1 to 12.29.1 in /plugins/repository-azure (#17562) * Bump com.azure:azure-storage-blob in /plugins/repository-azure Bumps [com.azure:azure-storage-blob](https://github.com/Azure/azure-sdk-for-java) from 12.28.1 to 12.29.1. - [Release notes](https://github.com/Azure/azure-sdk-for-java/releases) - [Commits](https://github.com/Azure/azure-sdk-for-java/compare/azure-storage-blob_12.28.1...azure-storage-blob_12.29.1) --- updated-dependencies: - dependency-name: com.azure:azure-storage-blob dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] * Change exception type for null credential error Signed-off-by: Daniel Widdis --------- Signed-off-by: dependabot[bot] Signed-off-by: Daniel Widdis Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Daniel Widdis --- CHANGELOG.md | 1 + plugins/repository-azure/build.gradle | 2 +- .../licenses/azure-storage-blob-12.28.1.jar.sha1 | 1 - .../licenses/azure-storage-blob-12.29.1.jar.sha1 | 1 + .../repositories/azure/AzureStorageServiceTests.java | 10 +++++----- 5 files changed, 8 insertions(+), 7 deletions(-) delete mode 100644 plugins/repository-azure/licenses/azure-storage-blob-12.28.1.jar.sha1 create mode 100644 plugins/repository-azure/licenses/azure-storage-blob-12.29.1.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index e3c69549bd62f..db39772ceef87 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,6 +28,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.netflix.nebula.ospackage-base` from 11.10.1 to 11.11.1 ([#17374](https://github.com/opensearch-project/OpenSearch/pull/17374)) - Bump `software.amazon.awssdk` from 2.20.86 to 2.30.31 ([17396](https://github.com/opensearch-project/OpenSearch/pull/17396)) - Bump `org.jruby.jcodings:jcodings` from 1.0.61 to 1.0.63 ([#17560](https://github.com/opensearch-project/OpenSearch/pull/17560)) +- Bump `com.azure:azure-storage-blob` from 12.28.1 to 12.29.1 ([#17562](https://github.com/opensearch-project/OpenSearch/pull/17562)) ### Changed - Convert transport-reactor-netty4 to use gradle version catalog [#17233](https://github.com/opensearch-project/OpenSearch/pull/17233) diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 332651e37cfa4..f2e716d1f4a1a 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -56,7 +56,7 @@ dependencies { api "io.netty:netty-resolver-dns:${versions.netty}" api "io.netty:netty-transport-native-unix-common:${versions.netty}" implementation project(':modules:transport-netty4') - api 'com.azure:azure-storage-blob:12.28.1' + api 'com.azure:azure-storage-blob:12.29.1' api 'com.azure:azure-identity:1.14.2' // Start of transitive dependencies for azure-identity api 'com.microsoft.azure:msal4j-persistence-extension:1.3.0' diff --git a/plugins/repository-azure/licenses/azure-storage-blob-12.28.1.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-blob-12.28.1.jar.sha1 deleted file mode 100644 index 95ac42063d36f..0000000000000 --- a/plugins/repository-azure/licenses/azure-storage-blob-12.28.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4f1eef206d0a71e4ef6c5e79578dd1b9c0370869 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-storage-blob-12.29.1.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-blob-12.29.1.jar.sha1 new file mode 100644 index 0000000000000..af0da6064372e --- /dev/null +++ b/plugins/repository-azure/licenses/azure-storage-blob-12.29.1.jar.sha1 @@ -0,0 +1 @@ +bf6845feeee7e47da636afcfa28f3affbf1fede5 \ No newline at end of file diff --git a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureStorageServiceTests.java b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureStorageServiceTests.java index 324a20c9030c6..3bb99d5051144 100644 --- a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureStorageServiceTests.java +++ b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureStorageServiceTests.java @@ -33,9 +33,9 @@ package org.opensearch.repositories.azure; import com.azure.core.http.policy.HttpPipelinePolicy; -import com.azure.identity.CredentialUnavailableException; import com.azure.storage.blob.BlobServiceClient; import com.azure.storage.common.policy.RequestRetryPolicy; +import com.microsoft.aad.msal4j.MsalServiceException; import org.opensearch.common.settings.MockSecureSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsException; @@ -61,6 +61,7 @@ import reactor.netty.http.HttpResources; import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.emptyString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -202,10 +203,9 @@ public void testClientUsingManagedIdentity() throws IOException { final BlobServiceClient client1 = azureStorageService.client("azure1").v1(); // Expect the client to use managed identity for authentication, and it should fail because managed identity environment is - // not - // setup in the test - final CredentialUnavailableException e = expectThrows(CredentialUnavailableException.class, () -> client1.getAccountInfo()); - assertThat(e.getMessage(), is("Managed Identity authentication is not available.")); + // not setup in the test + final MsalServiceException e = expectThrows(MsalServiceException.class, () -> client1.getAccountInfo()); + assertThat(e.getMessage(), containsString("HttpStatusCode: 401")); } } } From 7094de1c49d0432f616f9d459a86912780f971ee Mon Sep 17 00:00:00 2001 From: Daniel Widdis Date: Tue, 11 Mar 2025 12:20:18 -0400 Subject: [PATCH 066/550] Add json-smart to version catalog (#17569) Signed-off-by: Daniel Widdis --- gradle/libs.versions.toml | 1 + plugins/repository-azure/build.gradle | 6 +++--- .../licenses/accessors-smart-2.5.1.jar.sha1 | 1 - .../licenses/accessors-smart-2.5.2.jar.sha1 | 1 + plugins/repository-azure/licenses/json-smart-2.5.1.jar.sha1 | 1 - plugins/repository-azure/licenses/json-smart-2.5.2.jar.sha1 | 1 + plugins/repository-hdfs/build.gradle | 2 +- test/fixtures/hdfs-fixture/build.gradle | 2 +- 8 files changed, 8 insertions(+), 7 deletions(-) delete mode 100644 plugins/repository-azure/licenses/accessors-smart-2.5.1.jar.sha1 create mode 100644 plugins/repository-azure/licenses/accessors-smart-2.5.2.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/json-smart-2.5.1.jar.sha1 create mode 100644 plugins/repository-azure/licenses/json-smart-2.5.2.jar.sha1 diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index 8d8c49e531e77..b423523bb9e3f 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -28,6 +28,7 @@ google_auth = "1.29.0" tdigest = "3.3" hdrhistogram = "2.2.2" grpc = "1.68.2" +json_smart = "2.5.2" # when updating the JNA version, also update the version in buildSrc/build.gradle jna = "5.13.0" diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index f2e716d1f4a1a..995ff49a355cf 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -67,9 +67,9 @@ dependencies { api 'com.nimbusds:content-type:2.3' api 'com.nimbusds:lang-tag:1.7' // Both msal4j:1.14.3 and oauth2-oidc-sdk:11.9.1 has compile dependency on different versions of json-smart, - // selected the higher version which is 2.5.0 - api 'net.minidev:json-smart:2.5.1' - api 'net.minidev:accessors-smart:2.5.1' + // selected the higher version + api "net.minidev:json-smart:${versions.json_smart}" + api "net.minidev:accessors-smart:${versions.json_smart}" api "org.ow2.asm:asm:${versions.asm}" // End of transitive dependencies for azure-identity api "io.projectreactor.netty:reactor-netty-core:${versions.reactor_netty}" diff --git a/plugins/repository-azure/licenses/accessors-smart-2.5.1.jar.sha1 b/plugins/repository-azure/licenses/accessors-smart-2.5.1.jar.sha1 deleted file mode 100644 index 8f7452437323d..0000000000000 --- a/plugins/repository-azure/licenses/accessors-smart-2.5.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -19b820261eb2e7de7d5bde11d1c06e4501dd7e5f \ No newline at end of file diff --git a/plugins/repository-azure/licenses/accessors-smart-2.5.2.jar.sha1 b/plugins/repository-azure/licenses/accessors-smart-2.5.2.jar.sha1 new file mode 100644 index 0000000000000..6012c3d0379f6 --- /dev/null +++ b/plugins/repository-azure/licenses/accessors-smart-2.5.2.jar.sha1 @@ -0,0 +1 @@ +ce16fd235cfee48e67eda33e684423bba09f7d07 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/json-smart-2.5.1.jar.sha1 b/plugins/repository-azure/licenses/json-smart-2.5.1.jar.sha1 deleted file mode 100644 index fe23968afce1e..0000000000000 --- a/plugins/repository-azure/licenses/json-smart-2.5.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4c11d2808d009132dfbbf947ebf37de6bf266c8e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/json-smart-2.5.2.jar.sha1 b/plugins/repository-azure/licenses/json-smart-2.5.2.jar.sha1 new file mode 100644 index 0000000000000..97fc7b94f0fd2 --- /dev/null +++ b/plugins/repository-azure/licenses/json-smart-2.5.2.jar.sha1 @@ -0,0 +1 @@ +95d166b18f95907be0f46cdb9e1c0695eed03387 \ No newline at end of file diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 4cfb572929f54..d3c92ac39f5b4 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -81,7 +81,7 @@ dependencies { api 'javax.servlet:servlet-api:2.5' api "org.slf4j:slf4j-api:${versions.slf4j}" api "org.apache.logging.log4j:log4j-slf4j-impl:${versions.log4j}" - api 'net.minidev:json-smart:2.5.2' + api "net.minidev:json-smart:${versions.json_smart}" api "io.netty:netty-all:${versions.netty}" implementation "com.fasterxml.woodstox:woodstox-core:${versions.woodstox}" implementation 'org.codehaus.woodstox:stax2-api:4.2.2' diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index d69ddcbd1a07c..ae9dad9a8bc56 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -65,7 +65,7 @@ dependencies { api "com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:${versions.jackson}" api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api "com.fasterxml.woodstox:woodstox-core:${versions.woodstox}" - api 'net.minidev:json-smart:2.5.2' + api "net.minidev:json-smart:${versions.json_smart}" api "org.mockito:mockito-core:${versions.mockito}" api "com.google.protobuf:protobuf-java:${versions.protobuf}" api "org.jetbrains.kotlin:kotlin-stdlib:${versions.kotlin}" From e80b9077c06d262d3b3877b041087c30d04abfb8 Mon Sep 17 00:00:00 2001 From: Asim M Date: Tue, 11 Mar 2025 16:39:32 -0700 Subject: [PATCH 067/550] Update to use RandomAccessInput.readBytes bulk method (#17555) Signed-off-by: Asim Mahmood --- .../org/opensearch/common/util/ByteArrayBackedBitset.java | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/server/src/main/java/org/opensearch/common/util/ByteArrayBackedBitset.java b/server/src/main/java/org/opensearch/common/util/ByteArrayBackedBitset.java index a0c14ac8e9116..c450fdababb73 100644 --- a/server/src/main/java/org/opensearch/common/util/ByteArrayBackedBitset.java +++ b/server/src/main/java/org/opensearch/common/util/ByteArrayBackedBitset.java @@ -31,11 +31,7 @@ public ByteArrayBackedBitset(int capacity) { */ public ByteArrayBackedBitset(RandomAccessInput in, long offset, int length) throws IOException { byteArray = new byte[length]; - int i = 0; - while (i < length) { - byteArray[i] = in.readByte(offset + i); - i++; - } + in.readBytes(offset, byteArray, 0, length); } /** From e306d510a4d5e78edb98b3446215e5afb185d64b Mon Sep 17 00:00:00 2001 From: Andy Date: Wed, 12 Mar 2025 21:54:15 -0700 Subject: [PATCH 068/550] Add read and write optional enum sets methods to stream input and output (#17556) * Add read and write optional enum sets to stream Signed-off-by: Andy Qin * Only write set if it is non-empty Signed-off-by: Andy Qin * Update javadoc Signed-off-by: Andy Qin * Read false presence flag as empty set Co-authored-by: Daniel Widdis Signed-off-by: Andy * Update javadoc Signed-off-by: Andy Qin * Add unit tests Signed-off-by: Andy Qin * Fix typos Signed-off-by: Andy Qin * Update changelog Signed-off-by: Andy Qin --------- Signed-off-by: Andy Qin Signed-off-by: Andy Co-authored-by: Daniel Widdis --- CHANGELOG-3.0.md | 1 + .../core/common/io/stream/StreamInput.java | 15 ++++++++ .../core/common/io/stream/StreamOutput.java | 13 +++++++ .../common/io/stream/BaseStreamTests.java | 34 +++++++++++++++++++ 4 files changed, 63 insertions(+) diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index fabf6645fb6eb..5a3b2ce3a273e 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -51,6 +51,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Use Lucene `BM25Similarity` as default since the `LegacyBM25Similarity` is marked as deprecated ([#17306](https://github.com/opensearch-project/OpenSearch/pull/17306)) - Wildcard field index only 3gram of the input data [#17349](https://github.com/opensearch-project/OpenSearch/pull/17349) - Use BC libraries to parse PEM files, increase key length, allow general use of known cryptographic binary extensions, remove unused BC dependencies ([#3420](https://github.com/opensearch-project/OpenSearch/pull/14912)) +- Add optional enum set read / write functionality to stream input / output ([#17556](https://github.com/opensearch-project/OpenSearch/pull/17556)) ### Deprecated diff --git a/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamInput.java b/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamInput.java index f4c52cb8a6506..cdb52d78ee1fd 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamInput.java +++ b/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamInput.java @@ -1305,6 +1305,21 @@ public > EnumSet readEnumSet(Class enumClass) throws IOE return res; } + /** + * Reads an optional enum set with type E that was serialized based on the value of each enum's ordinal + * The set is expected to have been written using {@link StreamOutput#writeOptionalEnumSet(EnumSet)} + * + * @return the enum set of strings + * @throws IOException if an I/O exception occurs reading the set + */ + public > EnumSet readOptionalEnumSet(Class enumClass) throws IOException { + if (readBoolean()) { + return readEnumSet(enumClass); + } else { + return EnumSet.noneOf(enumClass); + } + } + public static StreamInput wrap(byte[] bytes) { return wrap(bytes, 0, bytes.length); } diff --git a/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamOutput.java b/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamOutput.java index cac8ddc8f94e3..6498b618b28c3 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamOutput.java +++ b/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamOutput.java @@ -1255,6 +1255,19 @@ public > void writeEnumSet(EnumSet enumSet) throws IOExcept } } + /** + * Writes an optional EnumSet with type E that serializes each enum based on its ordinal value + * For null or empty enum set, writes false; + */ + public > void writeOptionalEnumSet(@Nullable EnumSet enumSet) throws IOException { + if (enumSet != null && enumSet.size() > 0) { + writeBoolean(true); + writeEnumSet(enumSet); + } else { + writeBoolean(false); + } + } + /** * Write a {@link TimeValue} to the stream */ diff --git a/libs/core/src/test/java/org/opensearch/core/common/io/stream/BaseStreamTests.java b/libs/core/src/test/java/org/opensearch/core/common/io/stream/BaseStreamTests.java index 646acefc09c48..bcffcd1a018dd 100644 --- a/libs/core/src/test/java/org/opensearch/core/common/io/stream/BaseStreamTests.java +++ b/libs/core/src/test/java/org/opensearch/core/common/io/stream/BaseStreamTests.java @@ -53,6 +53,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; @@ -354,6 +355,32 @@ private void runWriteReadCollectionTest( } } + public void testOptionalEnumSet() throws IOException { + EnumSet enumSet = EnumSet.allOf(TestEnum.class); + BytesStreamOutput out = new BytesStreamOutput(); + out.writeOptionalEnumSet(enumSet); + EnumSet targetSet = getStreamInput(out.bytes()).readOptionalEnumSet(TestEnum.class); + assertEquals(enumSet, targetSet); + + enumSet = EnumSet.of(TestEnum.A, TestEnum.C, TestEnum.E); + out = new BytesStreamOutput(); + out.writeOptionalEnumSet(enumSet); + targetSet = getStreamInput(out.bytes()).readOptionalEnumSet(TestEnum.class); + assertEquals(enumSet, targetSet); + + enumSet = EnumSet.noneOf(TestEnum.class); + out = new BytesStreamOutput(); + out.writeOptionalEnumSet(enumSet); + targetSet = getStreamInput(out.bytes()).readOptionalEnumSet(TestEnum.class); + assertEquals(enumSet, targetSet); + + enumSet = null; + out = new BytesStreamOutput(); + out.writeOptionalEnumSet(enumSet); + targetSet = getStreamInput(out.bytes()).readOptionalEnumSet(TestEnum.class); + assertEquals(EnumSet.noneOf(TestEnum.class), targetSet); + } + public void testSetOfLongs() throws IOException { final int size = randomIntBetween(0, 6); final Set sourceSet = new HashSet<>(size); @@ -540,4 +567,11 @@ private void assertGenericRoundtrip(Object original) throws IOException { }); } + private enum TestEnum { + A, + B, + C, + D, + E; + } } From 5a4c51084ffcb33073a221d155654d0b70a621c6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Konrad=20Go=C5=82uchowski?= Date: Thu, 13 Mar 2025 10:00:01 +0100 Subject: [PATCH 069/550] Fix: visit of inner query for FunctionScoreQueryBuilder (#16776) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * add visitor logic to FunctionScoreQueryBuilder Signed-off-by: jdnvn * update changelog Signed-off-by: jdnvn * add assertion that inner query builder was visited Signed-off-by: Konrad Gołuchowski --------- Signed-off-by: jdnvn Signed-off-by: Konrad Gołuchowski Signed-off-by: gaobinlong Co-authored-by: jdnvn Co-authored-by: gaobinlong --- CHANGELOG.md | 1 + .../functionscore/FunctionScoreQueryBuilder.java | 10 ++++++++++ .../FunctionScoreQueryBuilderTests.java | 12 ++++++++++++ 3 files changed, 23 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index db39772ceef87..0afa1d0445b9a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,6 +40,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Removed ### Fixed +- Fix visit of inner query for FunctionScoreQueryBuilder ([#16776](https://github.com/opensearch-project/OpenSearch/pull/16776)) - Fix case insensitive and escaped query on wildcard ([#16827](https://github.com/opensearch-project/OpenSearch/pull/16827)) - Fix exists queries on nested flat_object fields throws exception ([#16803](https://github.com/opensearch-project/OpenSearch/pull/16803)) - Add highlighting for wildcard search on `match_only_text` field ([#17101](https://github.com/opensearch-project/OpenSearch/pull/17101)) diff --git a/server/src/main/java/org/opensearch/index/query/functionscore/FunctionScoreQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/functionscore/FunctionScoreQueryBuilder.java index b3c797f11de6d..79ff7da1a36ba 100644 --- a/server/src/main/java/org/opensearch/index/query/functionscore/FunctionScoreQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/functionscore/FunctionScoreQueryBuilder.java @@ -32,6 +32,7 @@ package org.opensearch.index.query.functionscore; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.opensearch.common.Nullable; @@ -52,6 +53,7 @@ import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.query.MatchNoneQueryBuilder; import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryBuilderVisitor; import org.opensearch.index.query.QueryRewriteContext; import org.opensearch.index.query.QueryShardContext; @@ -704,4 +706,12 @@ private static String parseFiltersAndFunctions( } return currentFieldName; } + + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + if (query != null) { + visitor.getChildVisitor(BooleanClause.Occur.MUST).accept(query); + } + } } diff --git a/server/src/test/java/org/opensearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java index 4e64a1ec03688..8cf7941941bcb 100644 --- a/server/src/test/java/org/opensearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java @@ -75,6 +75,7 @@ import org.hamcrest.Matcher; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -938,4 +939,15 @@ public void testMustRewrite() throws IOException { e = expectThrows(IllegalStateException.class, () -> functionQueryBuilder2.toQuery(context)); assertEquals("Rewrite first", e.getMessage()); } + + public void testVisit() { + TermQueryBuilder termQueryBuilder = new TermQueryBuilder("unmapped_field", "foo"); + FunctionScoreQueryBuilder builder = new FunctionScoreQueryBuilder(termQueryBuilder); + + List visitedQueries = new ArrayList<>(); + builder.visit(createTestVisitor(visitedQueries)); + + assertEquals(2, visitedQueries.size()); + assertTrue(visitedQueries.contains(termQueryBuilder)); + } } From 05b1cf58a14e3550398724228482d064dfb12f0e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 13 Mar 2025 17:24:53 +0800 Subject: [PATCH 070/550] Bump ch.qos.logback:logback-classic from 1.5.16 to 1.5.17 in /test/fixtures/hdfs-fixture (#17497) * Bump ch.qos.logback:logback-classic in /test/fixtures/hdfs-fixture Bumps [ch.qos.logback:logback-classic](https://github.com/qos-ch/logback) from 1.5.16 to 1.5.17. - [Release notes](https://github.com/qos-ch/logback/releases) - [Commits](https://github.com/qos-ch/logback/compare/v_1.5.16...v_1.5.17) --- updated-dependencies: - dependency-name: ch.qos.logback:logback-classic dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Signed-off-by: gaobinlong Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: opensearch-trigger-bot[bot] <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Co-authored-by: gaobinlong --- CHANGELOG.md | 1 + test/fixtures/hdfs-fixture/build.gradle | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0afa1d0445b9a..d5abd09d63d50 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -26,6 +26,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.google.api.grpc:proto-google-common-protos` from 2.37.1 to 2.52.0 ([#17379](https://github.com/opensearch-project/OpenSearch/pull/17379)) - Bump `net.minidev:json-smart` from 2.5.1 to 2.5.2 ([#17378](https://github.com/opensearch-project/OpenSearch/pull/17378)) - Bump `com.netflix.nebula.ospackage-base` from 11.10.1 to 11.11.1 ([#17374](https://github.com/opensearch-project/OpenSearch/pull/17374)) +- Bump `ch.qos.logback:logback-classic` from 1.5.16 to 1.5.17 ([#17497](https://github.com/opensearch-project/OpenSearch/pull/17497)) - Bump `software.amazon.awssdk` from 2.20.86 to 2.30.31 ([17396](https://github.com/opensearch-project/OpenSearch/pull/17396)) - Bump `org.jruby.jcodings:jcodings` from 1.0.61 to 1.0.63 ([#17560](https://github.com/opensearch-project/OpenSearch/pull/17560)) - Bump `com.azure:azure-storage-blob` from 12.28.1 to 12.29.1 ([#17562](https://github.com/opensearch-project/OpenSearch/pull/17562)) diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index ae9dad9a8bc56..6018087038ca3 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -75,7 +75,7 @@ dependencies { api "org.apache.commons:commons-text:1.13.0" api "commons-net:commons-net:3.11.1" api "ch.qos.logback:logback-core:1.5.16" - api "ch.qos.logback:logback-classic:1.5.16" + api "ch.qos.logback:logback-classic:1.5.17" api "org.jboss.xnio:xnio-nio:3.8.16.Final" api 'org.jline:jline:3.29.0' api 'org.apache.commons:commons-configuration2:2.11.0' From 127501789334d6deb19d206bf76d8475a9e27c54 Mon Sep 17 00:00:00 2001 From: Ashish Singh Date: Fri, 14 Mar 2025 00:24:58 +0530 Subject: [PATCH 071/550] Add support to run RemoteStoreIT with S3 integration (#17578) Signed-off-by: Ashish Singh --- plugins/repository-s3/build.gradle | 3 + .../repositories/s3/S3RemoteStoreIT.java | 237 ++++ .../repositories/s3/S3BlobContainer.java | 2 +- .../repositories/s3/S3ClientSettings.java | 2 +- .../s3/S3BlobStoreContainerTests.java | 4 +- .../opensearch/remotestore/RemoteStoreIT.java | 1153 +--------------- ... => RemoteStoreMultipartCoreTestCase.java} | 4 +- .../RemoteFsTimestampAwareTranslog.java | 4 +- .../RemoteStoreBaseIntegTestCase.java | 43 +- .../remotestore/RemoteStoreCoreTestCase.java | 1172 +++++++++++++++++ .../MockFsMetadataSupportedBlobContainer.java | 92 ++ .../MockFsMetadataSupportedBlobStore.java | 44 + .../MockFsMetadataSupportedRepository.java | 51 + ...ckFsMetadataSupportedRepositoryPlugin.java | 38 + .../mocks/MockFsAsyncBlobContainer.java | 0 .../multipart/mocks/MockFsBlobStore.java | 0 .../multipart/mocks/MockFsRepository.java | 0 .../mocks/MockFsRepositoryPlugin.java | 0 .../test/OpenSearchIntegTestCase.java | 6 +- 19 files changed, 1673 insertions(+), 1182 deletions(-) create mode 100644 plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RemoteStoreIT.java rename server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/{RemoteStoreMultipartIT.java => RemoteStoreMultipartCoreTestCase.java} (97%) rename {server/src/internalClusterTest => test/framework/src/main}/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java (93%) create mode 100644 test/framework/src/main/java/org/opensearch/remotestore/RemoteStoreCoreTestCase.java create mode 100644 test/framework/src/main/java/org/opensearch/remotestore/mocks/MockFsMetadataSupportedBlobContainer.java create mode 100644 test/framework/src/main/java/org/opensearch/remotestore/mocks/MockFsMetadataSupportedBlobStore.java create mode 100644 test/framework/src/main/java/org/opensearch/remotestore/mocks/MockFsMetadataSupportedRepository.java create mode 100644 test/framework/src/main/java/org/opensearch/remotestore/mocks/MockFsMetadataSupportedRepositoryPlugin.java rename {server/src/internalClusterTest => test/framework/src/main}/java/org/opensearch/remotestore/multipart/mocks/MockFsAsyncBlobContainer.java (100%) rename {server/src/internalClusterTest => test/framework/src/main}/java/org/opensearch/remotestore/multipart/mocks/MockFsBlobStore.java (100%) rename {server/src/internalClusterTest => test/framework/src/main}/java/org/opensearch/remotestore/multipart/mocks/MockFsRepository.java (100%) rename {server/src/internalClusterTest => test/framework/src/main}/java/org/opensearch/remotestore/multipart/mocks/MockFsRepositoryPlugin.java (100%) diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index de9c5420ba034..25d910052b9a0 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -141,6 +141,7 @@ test { // this is tested explicitly in separate test tasks exclude '**/RepositoryCredentialsTests.class' exclude '**/S3RepositoryThirdPartyTests.class' + exclude '**/S3RemoteStoreIT.class' } boolean useFixture = false @@ -252,6 +253,7 @@ processYamlRestTestResources { internalClusterTest { // this is tested explicitly in a separate test task exclude '**/S3RepositoryThirdPartyTests.class' + exclude '**/S3RemoteStoreIT.class' } yamlRestTest { @@ -408,6 +410,7 @@ TaskProvider s3ThirdPartyTest = tasks.register("s3ThirdPartyTest", Test) { setTestClassesDirs(internalTestSourceSet.getOutput().getClassesDirs()) setClasspath(internalTestSourceSet.getRuntimeClasspath()) include '**/S3RepositoryThirdPartyTests.class' + include '**/S3RemoteStoreIT.class' systemProperty 'test.s3.account', s3PermanentAccessKey systemProperty 'test.s3.key', s3PermanentSecretKey systemProperty 'test.s3.bucket', s3PermanentBucket diff --git a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RemoteStoreIT.java b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RemoteStoreIT.java new file mode 100644 index 0000000000000..e899ac685132e --- /dev/null +++ b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RemoteStoreIT.java @@ -0,0 +1,237 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.repositories.s3; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope; + +import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; +import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.settings.MockSecureSettings; +import org.opensearch.common.settings.SecureSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.Strings; +import org.opensearch.index.remote.RemoteStoreEnums; +import org.opensearch.indices.RemoteStoreSettings; +import org.opensearch.plugins.Plugin; +import org.opensearch.remotestore.RemoteStoreCoreTestCase; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.threadpool.ThreadPoolStats; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.Collection; +import java.util.Collections; +import java.util.Locale; +import java.util.concurrent.ExecutionException; + +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@ThreadLeakScope(ThreadLeakScope.Scope.NONE) +public class S3RemoteStoreIT extends RemoteStoreCoreTestCase { + + @Override + @SuppressForbidden(reason = "Need to set system property here for AWS SDK v2") + public void setUp() throws Exception { + SocketAccess.doPrivileged(() -> System.setProperty("opensearch.path.conf", "config")); + super.setUp(); + } + + @Override + @SuppressForbidden(reason = "Need to reset system property here for AWS SDK v2") + public void tearDown() throws Exception { + SocketAccess.doPrivileged(() -> System.clearProperty("opensearch.path.conf")); + clearIndices(); + waitForEmptyRemotePurgeQueue(); + super.tearDown(); + } + + private void clearIndices() throws Exception { + assertAcked(client().admin().indices().delete(new DeleteIndexRequest("*")).get()); + } + + @Override + protected Collection> nodePlugins() { + return Collections.singletonList(S3RepositoryPlugin.class); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal)).setSecureSettings(credentials()).build(); + } + + private SecureSettings credentials() { + assertFalse(Strings.isNullOrEmpty(System.getProperty("test.s3.account"))); + assertFalse(Strings.isNullOrEmpty(System.getProperty("test.s3.key"))); + assertFalse(Strings.isNullOrEmpty(System.getProperty("test.s3.bucket"))); + + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("s3.client.default.access_key", System.getProperty("test.s3.account")); + secureSettings.setString("s3.client.default.secret_key", System.getProperty("test.s3.key")); + return secureSettings; + } + + @Override + protected Settings remoteStoreRepoSettings() { + + String segmentRepoName = REPOSITORY_NAME; + String translogRepoName = REPOSITORY_2_NAME; + String stateRepoName = REPOSITORY_3_NAME; + String segmentRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + segmentRepoName + ); + String segmentRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + segmentRepoName + ); + String translogRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + translogRepoName + ); + String translogRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + translogRepoName + ); + String stateRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + stateRepoName + ); + String stateRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + stateRepoName + ); + + String prefixModeVerificationSuffix = BlobStoreRepository.PREFIX_MODE_VERIFICATION_SETTING.getKey(); + + String bucket = System.getProperty("test.s3.bucket"); + String region = System.getProperty("test.s3.region", "us-west-2"); + String basePath = System.getProperty("test.s3.base", "testpath"); + String segmentBasePath = basePath + "-segments"; + String translogBasePath = basePath + "-translog"; + String stateBasePath = basePath + "-state"; + + Settings.Builder settings = Settings.builder() + .put("node.attr." + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, segmentRepoName) + .put(segmentRepoTypeAttributeKey, S3Repository.TYPE) + .put(segmentRepoSettingsAttributeKeyPrefix + "bucket", bucket) + .put(segmentRepoSettingsAttributeKeyPrefix + "region", region) + .put(segmentRepoSettingsAttributeKeyPrefix + "base_path", segmentBasePath) + .put(segmentRepoSettingsAttributeKeyPrefix + prefixModeVerificationSuffix, prefixModeVerificationEnable) + .put("node.attr." + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, translogRepoName) + .put(translogRepoTypeAttributeKey, S3Repository.TYPE) + .put(translogRepoSettingsAttributeKeyPrefix + "bucket", bucket) + .put(translogRepoSettingsAttributeKeyPrefix + "region", region) + .put(translogRepoSettingsAttributeKeyPrefix + "base_path", translogBasePath) + .put(translogRepoSettingsAttributeKeyPrefix + prefixModeVerificationSuffix, prefixModeVerificationEnable) + .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, stateRepoName) + .put(stateRepoTypeAttributeKey, S3Repository.TYPE) + .put(stateRepoSettingsAttributeKeyPrefix + "bucket", bucket) + .put(stateRepoSettingsAttributeKeyPrefix + "region", region) + .put(stateRepoSettingsAttributeKeyPrefix + "base_path", stateBasePath) + .put(stateRepoSettingsAttributeKeyPrefix + prefixModeVerificationSuffix, prefixModeVerificationEnable); + + final String endpoint = System.getProperty("test.s3.endpoint"); + if (endpoint != null) { + settings.put(segmentRepoSettingsAttributeKeyPrefix + "endpoint", endpoint); + settings.put(translogRepoSettingsAttributeKeyPrefix + "endpoint", endpoint); + settings.put(stateRepoSettingsAttributeKeyPrefix + "endpoint", endpoint); + } + + settings.put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), randomFrom(RemoteStoreEnums.PathType.values())); + settings.put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_METADATA.getKey(), randomBoolean()); + settings.put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED.getKey(), randomBoolean()); + settings.put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_SEGMENTS_PATH_PREFIX.getKey(), segmentsPathFixedPrefix ? "a" : ""); + settings.put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_PATH_PREFIX.getKey(), translogPathFixedPrefix ? "b" : ""); + settings.put(BlobStoreRepository.SNAPSHOT_SHARD_PATH_PREFIX_SETTING.getKey(), snapshotShardPathFixedPrefix ? "c" : ""); + + return settings.build(); + } + + @Override + @AwaitsFix(bugUrl = "assertion of cluster health timeout trips") + public void testNoMultipleWriterDuringPrimaryRelocation() throws ExecutionException, InterruptedException { + super.testNoMultipleWriterDuringPrimaryRelocation(); + } + + @Override + @AwaitsFix(bugUrl = "assertion of cluster health timeout trips") + public void testResumeUploadAfterFailedPrimaryRelocation() throws ExecutionException, InterruptedException, IOException { + super.testResumeUploadAfterFailedPrimaryRelocation(); + } + + @Override + @AwaitsFix(bugUrl = "Test times out due to too many translog upload") + public void testFlushOnTooManyRemoteTranslogFiles() throws Exception { + super.testFlushOnTooManyRemoteTranslogFiles(); + } + + @Override + protected boolean addMockIndexStorePlugin() { + return false; + } + + protected BlobStoreRepository getRepository() { + return (BlobStoreRepository) internalCluster().getDataNodeInstance(RepositoriesService.class).repository(REPOSITORY_2_NAME); + } + + @Override + protected int getActualFileCount(Path ignoredSegmentRepoPath, String shardPath) throws IOException { + BlobStoreRepository repository = getRepository(); + return repository.blobStore().blobContainer(BlobPath.cleanPath().add(shardPath)).listBlobs().size(); + } + + @Override + protected void delete(Path baseRepoPath, String shardPath) throws IOException { + BlobStoreRepository repository = getRepository(); + repository.blobStore().blobContainer(repository.basePath().add(shardPath)).delete(); + } + + private void waitForEmptyRemotePurgeQueue() throws Exception { + if (internalCluster().getDataNodeNames().isEmpty()) { + return; + } + assertBusyWithFixedSleepTime(() -> { + ThreadPoolStats.Stats remotePurgeThreadPoolStats = getRemotePurgeThreadPoolStats(); + assertEquals(0, remotePurgeThreadPoolStats.getQueue()); + assertEquals(0, remotePurgeThreadPoolStats.getQueue()); + }, TimeValue.timeValueSeconds(60), TimeValue.timeValueMillis(500)); + } + + ThreadPoolStats.Stats getRemotePurgeThreadPoolStats() { + final ThreadPoolStats stats = internalCluster().getDataNodeInstance(ThreadPool.class).stats(); + for (ThreadPoolStats.Stats s : stats) { + if (s.getName().equals(ThreadPool.Names.REMOTE_PURGE)) { + return s; + } + } + throw new AssertionError("refresh thread pool stats not found [" + stats + "]"); + } + + @Override + protected BlobPath getSegmentBasePath() { + String basePath = System.getProperty("test.s3.base", "testpath"); + String segmentBasePath = basePath + "-segments"; + return BlobPath.cleanPath().add(segmentBasePath); + } +} diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java index d5cf201b171bb..e83ca97b385f0 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java @@ -392,7 +392,7 @@ private T getFutureValue(PlainActionFuture future) throws IOException { return future.get(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); - throw new IllegalStateException("Future got interrupted", e); + throw new IOException("Future got interrupted", e); } catch (ExecutionException e) { if (e.getCause() instanceof IOException) { throw (IOException) e.getCause(); diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java index e44f408e6dd12..ee856a7710f75 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java @@ -63,7 +63,7 @@ /** * A container for settings used to create an S3 client. */ -final class S3ClientSettings { +public final class S3ClientSettings { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(S3ClientSettings.class); diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java index d3725642760dc..9e931c717bdf4 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java @@ -1970,7 +1970,7 @@ public void testDeleteWithInterruptedException() throws Exception { final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); - IllegalStateException e = expectThrows(IllegalStateException.class, blobContainer::delete); + IOException e = expectThrows(IOException.class, blobContainer::delete); assertEquals("Future got interrupted", e.getMessage()); assertTrue(Thread.interrupted()); // Clear interrupted state } @@ -2026,7 +2026,7 @@ public void testDeleteBlobsIgnoringIfNotExistsWithInterruptedException() throws final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); List blobNames = Arrays.asList("test1", "test2"); - IllegalStateException e = expectThrows(IllegalStateException.class, () -> blobContainer.deleteBlobsIgnoringIfNotExists(blobNames)); + IOException e = expectThrows(IOException.class, () -> blobContainer.deleteBlobsIgnoringIfNotExists(blobNames)); assertEquals("Future got interrupted", e.getMessage()); assertTrue(Thread.interrupted()); // Clear interrupted state } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index c5050274fc4e7..29deaf7e4b537 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -8,1152 +8,7 @@ package org.opensearch.remotestore; -import org.opensearch.OpenSearchException; -import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; -import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; -import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; -import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; -import org.opensearch.action.admin.indices.flush.FlushRequest; -import org.opensearch.action.admin.indices.recovery.RecoveryResponse; -import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; -import org.opensearch.action.index.IndexResponse; -import org.opensearch.action.search.SearchPhaseExecutionException; -import org.opensearch.cluster.health.ClusterHealthStatus; -import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.cluster.routing.RecoverySource; -import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; -import org.opensearch.common.Priority; -import org.opensearch.common.blobstore.BlobPath; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.concurrent.BufferedAsyncIOProcessor; -import org.opensearch.index.IndexSettings; -import org.opensearch.index.shard.IndexShard; -import org.opensearch.index.shard.IndexShardClosedException; -import org.opensearch.index.translog.Translog; -import org.opensearch.index.translog.Translog.Durability; -import org.opensearch.indices.IndicesService; -import org.opensearch.indices.RemoteStoreSettings; -import org.opensearch.indices.recovery.PeerRecoveryTargetService; -import org.opensearch.indices.recovery.RecoverySettings; -import org.opensearch.indices.recovery.RecoveryState; -import org.opensearch.plugins.Plugin; -import org.opensearch.repositories.blobstore.BlobStoreRepository; -import org.opensearch.snapshots.SnapshotInfo; -import org.opensearch.snapshots.SnapshotState; -import org.opensearch.test.InternalTestCluster; -import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.transport.MockTransportService; -import org.opensearch.transport.TransportService; -import org.opensearch.transport.client.Requests; -import org.hamcrest.MatcherAssert; - -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; -import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; -import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.TRANSLOG; -import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; -import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; -import static org.opensearch.index.shard.IndexShardTestCase.getTranslog; -import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; -import static org.hamcrest.Matchers.comparesEqualTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.oneOf; - -@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) -public class RemoteStoreIT extends RemoteStoreBaseIntegTestCase { - - protected final String INDEX_NAME = "remote-store-test-idx-1"; - - @Override - protected Collection> nodePlugins() { - return Stream.concat(super.nodePlugins().stream(), Stream.of(MockTransportService.TestPlugin.class)).collect(Collectors.toList()); - } - - @Override - public Settings indexSettings() { - return remoteStoreIndexSettings(0); - } - - private void testPeerRecovery(int numberOfIterations, boolean invokeFlush) throws Exception { - internalCluster().startNodes(3); - createIndex(INDEX_NAME, remoteStoreIndexSettings(0)); - ensureYellowAndNoInitializingShards(INDEX_NAME); - ensureGreen(INDEX_NAME); - - Map indexStats = indexData(numberOfIterations, invokeFlush, INDEX_NAME); - - client().admin() - .indices() - .prepareUpdateSettings(INDEX_NAME) - .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)) - .get(); - ensureYellowAndNoInitializingShards(INDEX_NAME); - ensureGreen(INDEX_NAME); - - refresh(INDEX_NAME); - String replicaNodeName = replicaNodeName(INDEX_NAME); - assertBusy( - () -> assertHitCount(client(replicaNodeName).prepareSearch(INDEX_NAME).setSize(0).get(), indexStats.get(TOTAL_OPERATIONS)), - 30, - TimeUnit.SECONDS - ); - - RecoveryResponse recoveryResponse = client(replicaNodeName).admin().indices().prepareRecoveries().get(); - - Optional recoverySource = recoveryResponse.shardRecoveryStates() - .get(INDEX_NAME) - .stream() - .filter(rs -> rs.getRecoverySource().getType() == RecoverySource.Type.PEER) - .findFirst(); - assertFalse(recoverySource.isEmpty()); - // segments_N file is copied to new replica - assertEquals(1, recoverySource.get().getIndex().recoveredFileCount()); - - IndexResponse response = indexSingleDoc(INDEX_NAME); - assertEquals(indexStats.get(MAX_SEQ_NO_TOTAL) + 1, response.getSeqNo()); - refresh(INDEX_NAME); - assertBusy( - () -> assertHitCount(client(replicaNodeName).prepareSearch(INDEX_NAME).setSize(0).get(), indexStats.get(TOTAL_OPERATIONS) + 1), - 30, - TimeUnit.SECONDS - ); - } - - public void testRemoteStoreIndexCreationAndDeletionWithReferencedStore() throws InterruptedException, ExecutionException { - String dataNode = internalCluster().startNodes(1).get(0); - createIndex(INDEX_NAME, remoteStoreIndexSettings(0)); - ensureYellowAndNoInitializingShards(INDEX_NAME); - ensureGreen(INDEX_NAME); - - IndexShard indexShard = getIndexShard(dataNode, INDEX_NAME); - - // Simulating a condition where store is already in use by increasing ref count, this helps in testing index - // deletion when refresh is in-progress. - indexShard.store().incRef(); - assertAcked(client().admin().indices().prepareDelete(INDEX_NAME)); - indexShard.store().decRef(); - } - - public void testPeerRecoveryWithRemoteStoreAndRemoteTranslogNoDataFlush() throws Exception { - testPeerRecovery(1, true); - } - - public void testPeerRecoveryWithRemoteStoreAndRemoteTranslogFlush() throws Exception { - testPeerRecovery(randomIntBetween(2, 5), true); - } - - public void testPeerRecoveryWithLowActivityTimeout() throws Exception { - ClusterUpdateSettingsRequest req = new ClusterUpdateSettingsRequest().persistentSettings( - Settings.builder() - .put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "20kb") - .put(RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING.getKey(), "1s") - ); - internalCluster().client().admin().cluster().updateSettings(req).get(); - testPeerRecovery(randomIntBetween(2, 5), true); - } - - public void testPeerRecoveryWithRemoteStoreAndRemoteTranslogNoDataRefresh() throws Exception { - testPeerRecovery(1, false); - } - - public void testPeerRecoveryWithRemoteStoreAndRemoteTranslogRefresh() throws Exception { - testPeerRecovery(randomIntBetween(2, 5), false); - } - - private void verifyRemoteStoreCleanup() throws Exception { - internalCluster().startNodes(3); - createIndex(INDEX_NAME, remoteStoreIndexSettings(1)); - - indexData(5, randomBoolean(), INDEX_NAME); - String indexUUID = client().admin() - .indices() - .prepareGetSettings(INDEX_NAME) - .get() - .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); - Path indexPath = Path.of(String.valueOf(segmentRepoPath), indexUUID); - assertTrue(getFileCount(indexPath) > 0); - assertAcked(client().admin().indices().delete(new DeleteIndexRequest(INDEX_NAME)).get()); - // Delete is async. Give time for it - assertBusy(() -> { - try { - assertThat(getFileCount(indexPath), comparesEqualTo(0)); - } catch (Exception e) {} - }, 30, TimeUnit.SECONDS); - } - - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9327") - public void testRemoteTranslogCleanup() throws Exception { - verifyRemoteStoreCleanup(); - } - - public void testStaleCommitDeletionWithInvokeFlush() throws Exception { - String dataNode = internalCluster().startNode(); - createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000L, -1)); - int numberOfIterations = randomIntBetween(5, 15); - indexData(numberOfIterations, true, INDEX_NAME); - String segmentsPathFixedPrefix = RemoteStoreSettings.CLUSTER_REMOTE_STORE_SEGMENTS_PATH_PREFIX.get(getNodeSettings()); - String shardPath = getShardLevelBlobPath( - client(), - INDEX_NAME, - BlobPath.cleanPath(), - "0", - SEGMENTS, - METADATA, - segmentsPathFixedPrefix - ).buildAsString(); - Path indexPath = Path.of(segmentRepoPath + "/" + shardPath); - ; - IndexShard indexShard = getIndexShard(dataNode, INDEX_NAME); - int lastNMetadataFilesToKeep = indexShard.getRemoteStoreSettings().getMinRemoteSegmentMetadataFiles(); - // Delete is async. - assertBusy(() -> { - int actualFileCount = getFileCount(indexPath); - if (numberOfIterations <= lastNMetadataFilesToKeep) { - MatcherAssert.assertThat(actualFileCount, is(oneOf(numberOfIterations - 1, numberOfIterations, numberOfIterations + 1))); - } else { - // As delete is async its possible that the file gets created before the deletion or after - // deletion. - if (RemoteStoreSettings.isPinnedTimestampsEnabled()) { - // With pinned timestamp, we also keep md files since last successful fetch - assertTrue(actualFileCount >= lastNMetadataFilesToKeep); - } else { - MatcherAssert.assertThat( - actualFileCount, - is(oneOf(lastNMetadataFilesToKeep - 1, lastNMetadataFilesToKeep, lastNMetadataFilesToKeep + 1)) - ); - } - } - }, 30, TimeUnit.SECONDS); - } - - public void testStaleCommitDeletionWithoutInvokeFlush() throws Exception { - internalCluster().startNode(); - createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); - int numberOfIterations = randomIntBetween(5, 15); - indexData(numberOfIterations, false, INDEX_NAME); - String segmentsPathFixedPrefix = RemoteStoreSettings.CLUSTER_REMOTE_STORE_SEGMENTS_PATH_PREFIX.get(getNodeSettings()); - String shardPath = getShardLevelBlobPath( - client(), - INDEX_NAME, - BlobPath.cleanPath(), - "0", - SEGMENTS, - METADATA, - segmentsPathFixedPrefix - ).buildAsString(); - Path indexPath = Path.of(segmentRepoPath + "/" + shardPath); - int actualFileCount = getFileCount(indexPath); - // We also allow (numberOfIterations + 1) as index creation also triggers refresh. - MatcherAssert.assertThat(actualFileCount, is(oneOf(numberOfIterations - 1, numberOfIterations, numberOfIterations + 1))); - } - - public void testStaleCommitDeletionWithMinSegmentFiles_3() throws Exception { - Settings.Builder settings = Settings.builder() - .put(RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), "3"); - internalCluster().startNode(settings); - String segmentsPathFixedPrefix = RemoteStoreSettings.CLUSTER_REMOTE_STORE_SEGMENTS_PATH_PREFIX.get(getNodeSettings()); - createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); - int numberOfIterations = randomIntBetween(5, 15); - indexData(numberOfIterations, true, INDEX_NAME); - String shardPath = getShardLevelBlobPath( - client(), - INDEX_NAME, - BlobPath.cleanPath(), - "0", - SEGMENTS, - METADATA, - segmentsPathFixedPrefix - ).buildAsString(); - Path indexPath = Path.of(segmentRepoPath + "/" + shardPath); - int actualFileCount = getFileCount(indexPath); - // We also allow (numberOfIterations + 1) as index creation also triggers refresh. - if (RemoteStoreSettings.isPinnedTimestampsEnabled()) { - // With pinned timestamp, we also keep md files since last successful fetch - assertTrue(actualFileCount >= 4); - } else { - assertEquals(4, actualFileCount); - } - } - - public void testStaleCommitDeletionWithMinSegmentFiles_Disabled() throws Exception { - Settings.Builder settings = Settings.builder() - .put(RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), "-1"); - internalCluster().startNode(settings); - - createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); - int numberOfIterations = randomIntBetween(12, 18); - indexData(numberOfIterations, true, INDEX_NAME); - String segmentsPathFixedPrefix = RemoteStoreSettings.CLUSTER_REMOTE_STORE_SEGMENTS_PATH_PREFIX.get(getNodeSettings()); - String shardPath = getShardLevelBlobPath( - client(), - INDEX_NAME, - BlobPath.cleanPath(), - "0", - SEGMENTS, - METADATA, - segmentsPathFixedPrefix - ).buildAsString(); - Path indexPath = Path.of(segmentRepoPath + "/" + shardPath); - ; - int actualFileCount = getFileCount(indexPath); - // We also allow (numberOfIterations + 1) as index creation also triggers refresh. - MatcherAssert.assertThat(actualFileCount, is(oneOf(numberOfIterations + 1))); - } - - /** - * Tests that when the index setting is not passed during index creation, the buffer interval picked up is the cluster - * default. - */ - public void testDefaultBufferInterval() throws ExecutionException, InterruptedException { - internalCluster().startClusterManagerOnlyNode(); - String clusterManagerName = internalCluster().getClusterManagerName(); - String dataNode = internalCluster().startDataOnlyNodes(1).get(0); - createIndex(INDEX_NAME); - ensureYellowAndNoInitializingShards(INDEX_NAME); - ensureGreen(INDEX_NAME); - assertClusterRemoteBufferInterval(IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, dataNode); - - IndexShard indexShard = getIndexShard(dataNode, INDEX_NAME); - assertTrue(indexShard.getTranslogSyncProcessor() instanceof BufferedAsyncIOProcessor); - assertBufferInterval(IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, indexShard); - - // Next, we change the default buffer interval and the same should reflect in the buffer interval of the index created - TimeValue clusterBufferInterval = TimeValue.timeValueSeconds(randomIntBetween(100, 200)); - client(clusterManagerName).admin() - .cluster() - .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), clusterBufferInterval)) - .get(); - assertBufferInterval(clusterBufferInterval, indexShard); - clearClusterBufferIntervalSetting(clusterManagerName); - } - - /** - * This tests multiple cases where the index setting is passed during the index creation with multiple combinations - * with and without cluster default. - */ - public void testOverriddenBufferInterval() throws ExecutionException, InterruptedException { - internalCluster().startClusterManagerOnlyNode(); - String clusterManagerName = internalCluster().getClusterManagerName(); - String dataNode = internalCluster().startDataOnlyNodes(1).get(0); - - TimeValue bufferInterval = TimeValue.timeValueSeconds(randomIntBetween(0, 100)); - Settings indexSettings = Settings.builder() - .put(indexSettings()) - .put(IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), bufferInterval) - .build(); - createIndex(INDEX_NAME, indexSettings); - ensureYellowAndNoInitializingShards(INDEX_NAME); - ensureGreen(INDEX_NAME); - - IndexShard indexShard = getIndexShard(dataNode, INDEX_NAME); - assertTrue(indexShard.getTranslogSyncProcessor() instanceof BufferedAsyncIOProcessor); - assertBufferInterval(bufferInterval, indexShard); - - // Set the cluster default with a different value, validate that the buffer interval is still the overridden value - TimeValue clusterBufferInterval = TimeValue.timeValueSeconds(randomIntBetween(100, 200)); - client(clusterManagerName).admin() - .cluster() - .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), clusterBufferInterval)) - .get(); - assertBufferInterval(bufferInterval, indexShard); - - // Set the index setting (index.remote_store.translog.buffer_interval) with a different value and validate that - // the buffer interval is updated - bufferInterval = TimeValue.timeValueSeconds(bufferInterval.seconds() + randomIntBetween(1, 100)); - client(clusterManagerName).admin() - .indices() - .updateSettings( - new UpdateSettingsRequest(INDEX_NAME).settings( - Settings.builder().put(IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), bufferInterval) - ) - ) - .get(); - assertBufferInterval(bufferInterval, indexShard); - - // Set the index setting (index.remote_store.translog.buffer_interval) with null and validate the buffer interval - // which will be the cluster default now. - client(clusterManagerName).admin() - .indices() - .updateSettings( - new UpdateSettingsRequest(INDEX_NAME).settings( - Settings.builder().putNull(IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey()) - ) - ) - .get(); - assertBufferInterval(clusterBufferInterval, indexShard); - clearClusterBufferIntervalSetting(clusterManagerName); - } - - /** - * This tests validation which kicks in during index creation failing creation if the value is less than minimum allowed value. - */ - public void testOverriddenBufferIntervalValidation() { - internalCluster().startClusterManagerOnlyNode(); - TimeValue bufferInterval = TimeValue.timeValueSeconds(-1); - Settings indexSettings = Settings.builder() - .put(indexSettings()) - .put(IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), bufferInterval) - .build(); - IllegalArgumentException exceptionDuringCreateIndex = assertThrows( - IllegalArgumentException.class, - () -> createIndex(INDEX_NAME, indexSettings) - ); - assertEquals( - "failed to parse value [-1] for setting [index.remote_store.translog.buffer_interval], must be >= [0ms]", - exceptionDuringCreateIndex.getMessage() - ); - } - - /** - * This tests validation of the cluster setting when being set. - */ - public void testClusterBufferIntervalValidation() { - String clusterManagerName = internalCluster().startClusterManagerOnlyNode(); - IllegalArgumentException exception = assertThrows( - IllegalArgumentException.class, - () -> client(clusterManagerName).admin() - .cluster() - .prepareUpdateSettings() - .setTransientSettings( - Settings.builder().put(CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), TimeValue.timeValueSeconds(-1)) - ) - .get() - ); - assertEquals( - "failed to parse value [-1] for setting [cluster.remote_store.translog.buffer_interval], must be >= [0ms]", - exception.getMessage() - ); - } - - public void testRequestDurabilityWhenRestrictSettingExplicitFalse() throws ExecutionException, InterruptedException { - // Explicit node settings and request durability - testRestrictSettingFalse(true, Durability.REQUEST); - } - - public void testAsyncDurabilityWhenRestrictSettingExplicitFalse() throws ExecutionException, InterruptedException { - // Explicit node settings and async durability - testRestrictSettingFalse(true, Durability.ASYNC); - } - - public void testRequestDurabilityWhenRestrictSettingImplicitFalse() throws ExecutionException, InterruptedException { - // No node settings and request durability - testRestrictSettingFalse(false, Durability.REQUEST); - } - - public void testAsyncDurabilityWhenRestrictSettingImplicitFalse() throws ExecutionException, InterruptedException { - // No node settings and async durability - testRestrictSettingFalse(false, Durability.ASYNC); - } - - private void testRestrictSettingFalse(boolean setRestrictFalse, Durability durability) throws ExecutionException, InterruptedException { - String clusterManagerName; - if (setRestrictFalse) { - clusterManagerName = internalCluster().startClusterManagerOnlyNode( - Settings.builder().put(IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING.getKey(), false).build() - ); - } else { - clusterManagerName = internalCluster().startClusterManagerOnlyNode(); - } - String dataNode = internalCluster().startDataOnlyNodes(1).get(0); - Settings indexSettings = Settings.builder() - .put(indexSettings()) - .put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), durability) - .build(); - createIndex(INDEX_NAME, indexSettings); - IndexShard indexShard = getIndexShard(dataNode, INDEX_NAME); - assertEquals(durability, indexShard.indexSettings().getTranslogDurability()); - - durability = randomFrom(Durability.values()); - client(clusterManagerName).admin() - .indices() - .updateSettings( - new UpdateSettingsRequest(INDEX_NAME).settings( - Settings.builder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), durability) - ) - ) - .get(); - assertEquals(durability, indexShard.indexSettings().getTranslogDurability()); - } - - public void testAsyncDurabilityThrowsExceptionWhenRestrictSettingTrue() throws ExecutionException, InterruptedException { - String expectedExceptionMsg = - "index setting [index.translog.durability=async] is not allowed as cluster setting [cluster.remote_store.index.restrict.async-durability=true]"; - String clusterManagerName = internalCluster().startClusterManagerOnlyNode( - Settings.builder().put(IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING.getKey(), true).build() - ); - String dataNode = internalCluster().startDataOnlyNodes(1).get(0); - - // Case 1 - Test create index fails - Settings indexSettings = Settings.builder() - .put(indexSettings()) - .put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Durability.ASYNC) - .build(); - IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> createIndex(INDEX_NAME, indexSettings)); - assertEquals(expectedExceptionMsg, exception.getMessage()); - - // Case 2 - Test update index fails - createIndex(INDEX_NAME); - IndexShard indexShard = getIndexShard(dataNode, INDEX_NAME); - assertEquals(Durability.REQUEST, indexShard.indexSettings().getTranslogDurability()); - exception = assertThrows( - IllegalArgumentException.class, - () -> client(clusterManagerName).admin() - .indices() - .updateSettings(new UpdateSettingsRequest(INDEX_NAME).settings(indexSettings)) - .actionGet() - ); - assertEquals(expectedExceptionMsg, exception.getMessage()); - } - - private void assertClusterRemoteBufferInterval(TimeValue expectedBufferInterval, String dataNode) { - IndicesService indicesService = internalCluster().getInstance(IndicesService.class, dataNode); - assertEquals(expectedBufferInterval, indicesService.getRemoteStoreSettings().getClusterRemoteTranslogBufferInterval()); - } - - private void assertBufferInterval(TimeValue expectedBufferInterval, IndexShard indexShard) { - assertEquals( - expectedBufferInterval, - ((BufferedAsyncIOProcessor) indexShard.getTranslogSyncProcessor()).getBufferIntervalSupplier().get() - ); - } - - private void clearClusterBufferIntervalSetting(String clusterManagerName) { - client(clusterManagerName).admin() - .cluster() - .prepareUpdateSettings() - .setTransientSettings(Settings.builder().putNull(CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey())) - .get(); - } - - public void testRestoreSnapshotToIndexWithSameNameDifferentUUID() throws Exception { - internalCluster().startClusterManagerOnlyNode(); - List dataNodes = internalCluster().startDataOnlyNodes(2); - - Path absolutePath = randomRepoPath().toAbsolutePath(); - createRepository("test-repo", "fs", Settings.builder().put("location", absolutePath)); - - logger.info("--> Create index and ingest 50 docs"); - createIndex(INDEX_NAME, remoteStoreIndexSettings(1)); - indexBulk(INDEX_NAME, 50); - flushAndRefresh(INDEX_NAME); - - String originalIndexUUID = client().admin() - .indices() - .prepareGetSettings(INDEX_NAME) - .get() - .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); - assertNotNull(originalIndexUUID); - assertNotEquals(IndexMetadata.INDEX_UUID_NA_VALUE, originalIndexUUID); - - ensureGreen(); - - logger.info("--> take a snapshot"); - client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setIndices(INDEX_NAME).setWaitForCompletion(true).get(); - - logger.info("--> wipe all indices"); - cluster().wipeIndices(INDEX_NAME); - - logger.info("--> Create index with the same name, different UUID"); - assertAcked( - prepareCreate(INDEX_NAME).setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 1)) - ); - - ensureGreen(TimeValue.timeValueSeconds(30), INDEX_NAME); - - String newIndexUUID = client().admin() - .indices() - .prepareGetSettings(INDEX_NAME) - .get() - .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); - assertNotNull(newIndexUUID); - assertNotEquals(IndexMetadata.INDEX_UUID_NA_VALUE, newIndexUUID); - assertNotEquals(newIndexUUID, originalIndexUUID); - - logger.info("--> close index"); - client().admin().indices().prepareClose(INDEX_NAME).get(); - - logger.info("--> restore all indices from the snapshot"); - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") - .setWaitForCompletion(true) - .execute() - .actionGet(); - assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); - - flushAndRefresh(INDEX_NAME); - - ensureGreen(INDEX_NAME); - assertBusy(() -> { - assertHitCount(client(dataNodes.get(0)).prepareSearch(INDEX_NAME).setSize(0).get(), 50); - assertHitCount(client(dataNodes.get(1)).prepareSearch(INDEX_NAME).setSize(0).get(), 50); - }); - } - - public void testNoSearchIdleForAnyReplicaCount() throws ExecutionException, InterruptedException { - internalCluster().startClusterManagerOnlyNode(); - String primaryShardNode = internalCluster().startDataOnlyNodes(1).get(0); - - createIndex(INDEX_NAME, remoteStoreIndexSettings(0)); - ensureGreen(INDEX_NAME); - IndexShard indexShard = getIndexShard(primaryShardNode, INDEX_NAME); - assertFalse(indexShard.isSearchIdleSupported()); - - String replicaShardNode = internalCluster().startDataOnlyNodes(1).get(0); - assertAcked( - client().admin() - .indices() - .prepareUpdateSettings(INDEX_NAME) - .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)) - ); - ensureGreen(INDEX_NAME); - assertFalse(indexShard.isSearchIdleSupported()); - - indexShard = getIndexShard(replicaShardNode, INDEX_NAME); - assertFalse(indexShard.isSearchIdleSupported()); - } - - public void testFallbackToNodeToNodeSegmentCopy() throws Exception { - internalCluster().startClusterManagerOnlyNode(); - List dataNodes = internalCluster().startDataOnlyNodes(2); - - // 1. Create index with 0 replica - createIndex(INDEX_NAME, remoteStoreIndexSettings(0, 10000L, -1)); - ensureGreen(INDEX_NAME); - - // 2. Index docs - indexBulk(INDEX_NAME, 50); - flushAndRefresh(INDEX_NAME); - - String segmentsPathFixedPrefix = RemoteStoreSettings.CLUSTER_REMOTE_STORE_SEGMENTS_PATH_PREFIX.get(getNodeSettings()); - // 3. Delete data from remote segment store - String shardPath = getShardLevelBlobPath(client(), INDEX_NAME, BlobPath.cleanPath(), "0", SEGMENTS, DATA, segmentsPathFixedPrefix) - .buildAsString(); - Path segmentDataPath = Path.of(segmentRepoPath + "/" + shardPath); - - try (Stream files = Files.list(segmentDataPath)) { - files.forEach(p -> { - try { - Files.delete(p); - } catch (IOException e) { - // Ignore - } - }); - } - - // 4. Start recovery by changing number of replicas to 1 - assertAcked( - client().admin() - .indices() - .prepareUpdateSettings(INDEX_NAME) - .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)) - ); - - // 5. Ensure green and verify number of docs - ensureGreen(INDEX_NAME); - assertBusy(() -> { - assertHitCount(client(dataNodes.get(0)).prepareSearch(INDEX_NAME).setSize(0).get(), 50); - assertHitCount(client(dataNodes.get(1)).prepareSearch(INDEX_NAME).setSize(0).get(), 50); - }); - } - - public void testNoMultipleWriterDuringPrimaryRelocation() throws ExecutionException, InterruptedException { - // In this test, we trigger a force flush on existing primary while the primary mode on new primary has been - // activated. There was a bug in primary relocation of remote store enabled indexes where the new primary - // starts uploading translog and segments even before the cluster manager has started this shard. With this test, - // we check that we do not overwrite any file on remote store. Here we will also increase the replica count to - // check that there are no duplicate metadata files for translog or upload. - - internalCluster().startClusterManagerOnlyNode(); - String oldPrimary = internalCluster().startDataOnlyNodes(1).get(0); - createIndex(INDEX_NAME, remoteStoreIndexSettings(0)); - ensureGreen(INDEX_NAME); - indexBulk(INDEX_NAME, randomIntBetween(5, 10)); - String newPrimary = internalCluster().startDataOnlyNodes(1).get(0); - ensureStableCluster(3); - - IndexShard oldPrimaryIndexShard = getIndexShard(oldPrimary, INDEX_NAME); - CountDownLatch flushLatch = new CountDownLatch(1); - - MockTransportService mockTargetTransportService = ((MockTransportService) internalCluster().getInstance( - TransportService.class, - oldPrimary - )); - mockTargetTransportService.addSendBehavior((connection, requestId, action, request, options) -> { - if (PeerRecoveryTargetService.Actions.HANDOFF_PRIMARY_CONTEXT.equals(action)) { - flushLatch.countDown(); - } - connection.sendRequest(requestId, action, request, options); - }); - - logger.info("--> relocate the shard"); - client().admin() - .cluster() - .prepareReroute() - .add(new MoveAllocationCommand(INDEX_NAME, 0, oldPrimary, newPrimary)) - .execute() - .actionGet(); - - CountDownLatch flushDone = new CountDownLatch(1); - Thread flushThread = new Thread(() -> { - try { - flushLatch.await(2, TimeUnit.SECONDS); - oldPrimaryIndexShard.flush(new FlushRequest().waitIfOngoing(true).force(true)); - // newPrimaryTranslogRepo.setSleepSeconds(0); - } catch (IndexShardClosedException e) { - // this is fine - } catch (InterruptedException e) { - throw new AssertionError(e); - } finally { - flushDone.countDown(); - } - }); - flushThread.start(); - flushDone.await(5, TimeUnit.SECONDS); - flushThread.join(); - - ClusterHealthResponse clusterHealthResponse = client().admin() - .cluster() - .prepareHealth() - .setWaitForStatus(ClusterHealthStatus.GREEN) - .setWaitForEvents(Priority.LANGUID) - .setWaitForNoRelocatingShards(true) - .setTimeout(TimeValue.timeValueSeconds(5)) - .execute() - .actionGet(); - assertFalse(clusterHealthResponse.isTimedOut()); - - client().admin() - .indices() - .updateSettings( - new UpdateSettingsRequest(INDEX_NAME).settings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)) - ) - .get(); - - clusterHealthResponse = client().admin() - .cluster() - .prepareHealth() - .setWaitForStatus(ClusterHealthStatus.GREEN) - .setWaitForEvents(Priority.LANGUID) - .setWaitForNoRelocatingShards(true) - .setTimeout(TimeValue.timeValueSeconds(5)) - .execute() - .actionGet(); - assertFalse(clusterHealthResponse.isTimedOut()); - } - - public void testResumeUploadAfterFailedPrimaryRelocation() throws ExecutionException, InterruptedException, IOException { - // In this test, we fail the hand off during the primary relocation. This will undo the drainRefreshes and - // drainSync performed as part of relocation handoff (before performing the handoff transport action). - // We validate the same here by failing the peer recovery and ensuring we can index afterward as well. - - internalCluster().startClusterManagerOnlyNode(); - String oldPrimary = internalCluster().startDataOnlyNodes(1).get(0); - createIndex(INDEX_NAME, remoteStoreIndexSettings(0)); - ensureGreen(INDEX_NAME); - int docs = randomIntBetween(5, 10); - indexBulk(INDEX_NAME, docs); - flushAndRefresh(INDEX_NAME); - assertHitCount(client(oldPrimary).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), docs); - String newPrimary = internalCluster().startDataOnlyNodes(1).get(0); - ensureStableCluster(3); - - IndexShard oldPrimaryIndexShard = getIndexShard(oldPrimary, INDEX_NAME); - CountDownLatch handOffLatch = new CountDownLatch(1); - - MockTransportService mockTargetTransportService = ((MockTransportService) internalCluster().getInstance( - TransportService.class, - oldPrimary - )); - mockTargetTransportService.addSendBehavior((connection, requestId, action, request, options) -> { - if (PeerRecoveryTargetService.Actions.HANDOFF_PRIMARY_CONTEXT.equals(action)) { - handOffLatch.countDown(); - throw new OpenSearchException("failing recovery for test purposes"); - } - connection.sendRequest(requestId, action, request, options); - }); - - logger.info("--> relocate the shard"); - client().admin() - .cluster() - .prepareReroute() - .add(new MoveAllocationCommand(INDEX_NAME, 0, oldPrimary, newPrimary)) - .execute() - .actionGet(); - - handOffLatch.await(30, TimeUnit.SECONDS); - - assertTrue(oldPrimaryIndexShard.isStartedPrimary()); - assertEquals(oldPrimary, primaryNodeName(INDEX_NAME)); - assertHitCount(client(oldPrimary).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), docs); - - SearchPhaseExecutionException ex = assertThrows( - SearchPhaseExecutionException.class, - () -> client(newPrimary).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get() - ); - assertEquals("all shards failed", ex.getMessage()); - - int moreDocs = randomIntBetween(5, 10); - indexBulk(INDEX_NAME, moreDocs); - flushAndRefresh(INDEX_NAME); - int uncommittedOps = randomIntBetween(5, 10); - indexBulk(INDEX_NAME, uncommittedOps); - assertHitCount(client(oldPrimary).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), docs + moreDocs); - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(INDEX_NAME))); - - restore(true, INDEX_NAME); - ensureGreen(INDEX_NAME); - assertHitCount( - client(newPrimary).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), - docs + moreDocs + uncommittedOps - ); - - String newNode = internalCluster().startDataOnlyNodes(1).get(0); - ensureStableCluster(3); - client().admin() - .cluster() - .prepareReroute() - .add(new MoveAllocationCommand(INDEX_NAME, 0, newPrimary, newNode)) - .execute() - .actionGet(); - - ClusterHealthResponse clusterHealthResponse = client().admin() - .cluster() - .prepareHealth() - .setWaitForStatus(ClusterHealthStatus.GREEN) - .setWaitForEvents(Priority.LANGUID) - .setWaitForNoRelocatingShards(true) - .setTimeout(TimeValue.timeValueSeconds(10)) - .execute() - .actionGet(); - assertFalse(clusterHealthResponse.isTimedOut()); - - ex = assertThrows( - SearchPhaseExecutionException.class, - () -> client(newPrimary).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get() - ); - assertEquals("all shards failed", ex.getMessage()); - assertHitCount( - client(newNode).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), - docs + moreDocs + uncommittedOps - ); - } - - // Test local only translog files which are not uploaded to remote store (no metadata present in remote) - // Without the cleanup change in RemoteFsTranslog.createEmptyTranslog, this test fails with NPE. - public void testLocalOnlyTranslogCleanupOnNodeRestart() throws Exception { - internalCluster().startClusterManagerOnlyNode(); - String dataNode = internalCluster().startDataOnlyNode(); - - // 1. Create index with 0 replica - createIndex(INDEX_NAME, remoteStoreIndexSettings(0, 10000L, -1)); - ensureGreen(INDEX_NAME); - - // 2. Index docs - int searchableDocs = 0; - for (int i = 0; i < randomIntBetween(1, 5); i++) { - indexBulk(INDEX_NAME, 15); - refresh(INDEX_NAME); - searchableDocs += 15; - } - indexBulk(INDEX_NAME, 15); - - assertHitCount(client(dataNode).prepareSearch(INDEX_NAME).setSize(0).get(), searchableDocs); - - // 3. Delete metadata from remote translog - String indexUUID = client().admin() - .indices() - .prepareGetSettings(INDEX_NAME) - .get() - .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); - - String translogPathFixedPrefix = RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_PATH_PREFIX.get(getNodeSettings()); - String shardPath = getShardLevelBlobPath( - client(), - INDEX_NAME, - BlobPath.cleanPath(), - "0", - TRANSLOG, - METADATA, - translogPathFixedPrefix - ).buildAsString(); - Path translogMetaDataPath = Path.of(translogRepoPath + "/" + shardPath); - - try (Stream files = Files.list(translogMetaDataPath)) { - files.forEach(p -> { - try { - Files.delete(p); - } catch (IOException e) { - // Ignore - } - }); - } - - internalCluster().restartNode(dataNode); - - ensureGreen(INDEX_NAME); - - assertHitCount(client(dataNode).prepareSearch(INDEX_NAME).setSize(0).get(), searchableDocs); - indexBulk(INDEX_NAME, 15); - refresh(INDEX_NAME); - assertHitCount(client(dataNode).prepareSearch(INDEX_NAME).setSize(0).get(), searchableDocs + 15); - } - - public void testFlushOnTooManyRemoteTranslogFiles() throws Exception { - internalCluster().startClusterManagerOnlyNode(); - String datanode = internalCluster().startDataOnlyNodes(1).get(0); - createIndex(INDEX_NAME, remoteStoreIndexSettings(0, 10000L, -1)); - ensureGreen(INDEX_NAME); - - ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); - updateSettingsRequest.persistentSettings( - Settings.builder() - .put(RemoteStoreSettings.CLUSTER_REMOTE_MAX_TRANSLOG_READERS.getKey(), "100") - .put(CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), "0ms") - ); - assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); - - IndexShard indexShard = getIndexShard(datanode, INDEX_NAME); - Path translogLocation = getTranslog(indexShard).location(); - assertFalse(indexShard.shouldPeriodicallyFlush()); - - try (Stream files = Files.list(translogLocation)) { - long totalFiles = files.filter(f -> f.getFileName().toString().endsWith(Translog.TRANSLOG_FILE_SUFFIX)).count(); - assertEquals(totalFiles, 1L); - } - - // indexing 100 documents (100 bulk requests), no flush will be triggered yet - for (int i = 0; i < 100; i++) { - indexBulk(INDEX_NAME, 1); - } - - try (Stream files = Files.list(translogLocation)) { - long totalFiles = files.filter(f -> f.getFileName().toString().endsWith(Translog.TRANSLOG_FILE_SUFFIX)).count(); - assertEquals(totalFiles, 101L); - } - // Will flush and trim the translog readers - indexBulk(INDEX_NAME, 1); - - assertBusy(() -> { - try (Stream files = Files.list(translogLocation)) { - long totalFiles = files.filter(f -> f.getFileName().toString().endsWith(Translog.TRANSLOG_FILE_SUFFIX)).count(); - assertEquals(totalFiles, 1L); - } - }, 30, TimeUnit.SECONDS); - - // Disabling max translog readers - assertAcked( - internalCluster().client() - .admin() - .cluster() - .prepareUpdateSettings() - .setPersistentSettings(Settings.builder().put(RemoteStoreSettings.CLUSTER_REMOTE_MAX_TRANSLOG_READERS.getKey(), "-1")) - .get() - ); - - // Indexing 500 more docs - for (int i = 0; i < 500; i++) { - indexBulk(INDEX_NAME, 1); - } - - // No flush is triggered since max_translog_readers is set to -1 - // Total tlog files would be incremented by 500 - try (Stream files = Files.list(translogLocation)) { - long totalFiles = files.filter(f -> f.getFileName().toString().endsWith(Translog.TRANSLOG_FILE_SUFFIX)).count(); - assertEquals(totalFiles, 501L); - } - } - - public void testAsyncTranslogDurabilityRestrictionsThroughIdxTemplates() throws Exception { - logger.info("Starting up cluster manager with cluster.remote_store.index.restrict.async-durability set to true"); - String cm1 = internalCluster().startClusterManagerOnlyNode( - Settings.builder().put(IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING.getKey(), true).build() - ); - internalCluster().startDataOnlyNode(); - ensureStableCluster(2); - assertThrows( - IllegalArgumentException.class, - () -> internalCluster().client() - .admin() - .indices() - .preparePutTemplate("test") - .setPatterns(Arrays.asList("test*")) - .setSettings(Settings.builder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), "async")) - .get() - ); - logger.info("Starting up another cluster manager with cluster.remote_store.index.restrict.async-durability set to false"); - internalCluster().startClusterManagerOnlyNode( - Settings.builder().put(IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING.getKey(), false).build() - ); - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(cm1)); - ensureStableCluster(2); - assertAcked( - internalCluster().client() - .admin() - .indices() - .preparePutTemplate("test") - .setPatterns(Arrays.asList("test*")) - .setSettings(Settings.builder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), "async")) - .get() - ); - } - - public void testCloseIndexWithNoOpSyncAndFlushForSyncTranslog() throws InterruptedException { - internalCluster().startNodes(3); - client().admin() - .cluster() - .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), "5s")) - .get(); - Settings.Builder settings = Settings.builder() - .put(remoteStoreIndexSettings(0, 10000L, -1)) - .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "1s"); - createIndex(INDEX_NAME, settings.build()); - CountDownLatch latch = new CountDownLatch(1); - new Thread(() -> { - if (randomBoolean()) { - for (int i = 0; i < randomIntBetween(1, 5); i++) { - indexSingleDoc(INDEX_NAME); - } - flushAndRefresh(INDEX_NAME); - } - // Index single doc to start the asyn io processor to run which will lead to 10s wait time before the next sync. - indexSingleDoc(INDEX_NAME); - // Reduce the latch for the main thread to flush after some sleep. - latch.countDown(); - // Index another doc and in this case the flush would have happened before the sync. - indexSingleDoc(INDEX_NAME); - }).start(); - // Wait for atleast one doc to be ingested. - latch.await(); - // Sleep for some time for the next doc to be present in lucene buffer. If flush happens first before the doc #2 - // gets indexed, then it goes into the happy case where the close index happens succefully. - Thread.sleep(1000); - // Flush so that the subsequent sync or flushes are no-op. - flush(INDEX_NAME); - // Closing the index involves translog.sync and shard.flush which are now no-op. - client().admin().indices().close(Requests.closeIndexRequest(INDEX_NAME)).actionGet(); - Thread.sleep(10000); - ensureGreen(INDEX_NAME); - } - - public void testCloseIndexWithNoOpSyncAndFlushForAsyncTranslog() throws InterruptedException { - internalCluster().startNodes(3); - Settings.Builder settings = Settings.builder() - .put(remoteStoreIndexSettings(0, 10000L, -1)) - .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "1s") - .put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Durability.ASYNC) - .put(IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.getKey(), "10s"); - createIndex(INDEX_NAME, settings.build()); - CountDownLatch latch = new CountDownLatch(1); - new Thread(() -> { - // Index some docs to start the asyn io processor to run which will lead to 10s wait time before the next sync. - indexSingleDoc(INDEX_NAME); - indexSingleDoc(INDEX_NAME); - indexSingleDoc(INDEX_NAME); - // Reduce the latch for the main thread to flush after some sleep. - latch.countDown(); - }).start(); - // Wait for atleast one doc to be ingested. - latch.await(); - // Flush so that the subsequent sync or flushes are no-op. - flush(INDEX_NAME); - // Closing the index involves translog.sync and shard.flush which are now no-op. - client().admin().indices().close(Requests.closeIndexRequest(INDEX_NAME)).actionGet(); - Thread.sleep(10000); - ensureGreen(INDEX_NAME); - } - - public void testSuccessfulShallowV1SnapshotPostIndexClose() throws Exception { - internalCluster().startClusterManagerOnlyNode(); - String dataNode = internalCluster().startDataOnlyNodes(1).get(0); - createIndex(INDEX_NAME, remoteStoreIndexSettings(0, 10000L, -1)); - ensureGreen(INDEX_NAME); - - ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); - updateSettingsRequest.persistentSettings(Settings.builder().put(CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), "0ms")); - - assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); - - logger.info("Create shallow snapshot setting enabled repo"); - String shallowSnapshotRepoName = "shallow-snapshot-repo-name"; - Path shallowSnapshotRepoPath = randomRepoPath(); - Settings.Builder settings = Settings.builder() - .put("location", shallowSnapshotRepoPath) - .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), Boolean.TRUE); - createRepository(shallowSnapshotRepoName, "fs", settings); - - for (int i = 0; i < 10; i++) { - indexBulk(INDEX_NAME, 1); - } - flushAndRefresh(INDEX_NAME); - - logger.info("Verify shallow snapshot created before close"); - final String snapshot1 = "snapshot1"; - SnapshotInfo snapshotInfo1 = internalCluster().client() - .admin() - .cluster() - .prepareCreateSnapshot(shallowSnapshotRepoName, snapshot1) - .setIndices(INDEX_NAME) - .setWaitForCompletion(true) - .get() - .getSnapshotInfo(); - - assertEquals(SnapshotState.SUCCESS, snapshotInfo1.state()); - assertTrue(snapshotInfo1.successfulShards() > 0); - assertEquals(0, snapshotInfo1.failedShards()); - - for (int i = 0; i < 10; i++) { - indexBulk(INDEX_NAME, 1); - } - - // close index - client().admin().indices().close(Requests.closeIndexRequest(INDEX_NAME)).actionGet(); - Thread.sleep(1000); - logger.info("Verify shallow snapshot created after close"); - final String snapshot2 = "snapshot2"; - - SnapshotInfo snapshotInfo2 = internalCluster().client() - .admin() - .cluster() - .prepareCreateSnapshot(shallowSnapshotRepoName, snapshot2) - .setIndices(INDEX_NAME) - .setWaitForCompletion(true) - .get() - .getSnapshotInfo(); - - assertEquals(SnapshotState.SUCCESS, snapshotInfo2.state()); - assertTrue(snapshotInfo2.successfulShards() > 0); - assertEquals(0, snapshotInfo2.failedShards()); - - // delete the index - cluster().wipeIndices(INDEX_NAME); - // try restoring the snapshot - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(shallowSnapshotRepoName, snapshot2) - .setWaitForCompletion(true) - .execute() - .actionGet(); - assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); - ensureGreen(INDEX_NAME); - flushAndRefresh(INDEX_NAME); - assertBusy(() -> { assertHitCount(client(dataNode).prepareSearch(INDEX_NAME).setSize(0).get(), 20); }); - } -} +/** + * Run all tests in RemoteStoreIT with local FS. + */ +public class RemoteStoreIT extends RemoteStoreCoreTestCase {} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartCoreTestCase.java similarity index 97% rename from server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java rename to server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartCoreTestCase.java index 0ba58942644e6..63532fadfb6fc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartCoreTestCase.java @@ -14,7 +14,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.plugins.Plugin; -import org.opensearch.remotestore.RemoteStoreIT; +import org.opensearch.remotestore.RemoteStoreCoreTestCase; import org.opensearch.remotestore.multipart.mocks.MockFsRepositoryPlugin; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.blobstore.BlobStoreRepository; @@ -34,7 +34,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; -public class RemoteStoreMultipartIT extends RemoteStoreIT { +public class RemoteStoreMultipartCoreTestCase extends RemoteStoreCoreTestCase { Path repositoryLocation; boolean compress; diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslog.java index 427dbb690448f..9addc3e1e64cd 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslog.java @@ -146,7 +146,7 @@ protected void trimUnreferencedReaders(boolean indexDeleted, boolean trimLocal) // This is to fail fast and avoid listing md files un-necessarily. if (indexDeleted == false && RemoteStoreUtils.isPinnedTimestampStateStale()) { - logger.warn("Skipping remote translog garbage collection as last fetch of pinned timestamp is stale"); + logger.debug("Skipping remote translog garbage collection as last fetch of pinned timestamp is stale"); return; } @@ -179,7 +179,7 @@ public void onResponse(List blobMetadata) { // Check last fetch status of pinned timestamps. If stale, return. if (indexDeleted == false && RemoteStoreUtils.isPinnedTimestampStateStale()) { - logger.warn("Skipping remote translog garbage collection as last fetch of pinned timestamp is stale"); + logger.debug("Skipping remote translog garbage collection as last fetch of pinned timestamp is stale"); remoteGenerationDeletionPermits.release(REMOTE_DELETION_PERMITS); return; } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java b/test/framework/src/main/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java similarity index 93% rename from server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java rename to test/framework/src/main/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java index bcb0d54c0a25c..e8abcbb5f4fee 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java @@ -37,8 +37,8 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.plugins.Plugin; +import org.opensearch.remotestore.mocks.MockFsMetadataSupportedRepositoryPlugin; import org.opensearch.remotestore.multipart.mocks.MockFsRepositoryPlugin; -import org.opensearch.remotestore.translogmetadata.mocks.MockFsMetadataSupportedRepositoryPlugin; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.fs.ReloadableFsRepository; @@ -69,6 +69,7 @@ public class RemoteStoreBaseIntegTestCase extends OpenSearchIntegTestCase { protected static final String REPOSITORY_NAME = "test-remote-store-repo"; protected static final String REPOSITORY_2_NAME = "test-remote-store-repo-2"; + protected static final String REPOSITORY_3_NAME = "test-remote-store-repo-3"; protected static final String REMOTE_ROUTING_TABLE_REPO = "remote-routing-table-repo"; protected static final int SHARD_COUNT = 1; protected static int REPLICA_COUNT = 1; @@ -159,28 +160,26 @@ protected Settings nodeSettings(int nodeOrdinal) { if (clusterSettingsSuppliedByTest) { return Settings.builder().put(super.nodeSettings(nodeOrdinal)).build(); } else { - if (asyncUploadMockFsRepo) { - String repoType = metadataSupportedType ? MockFsMetadataSupportedRepositoryPlugin.TYPE_MD : MockFsRepositoryPlugin.TYPE; - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put( - remoteStoreClusterSettings( - REPOSITORY_NAME, - segmentRepoPath, - repoType, - REPOSITORY_2_NAME, - translogRepoPath, - repoType - ) - ) - .build(); - } else { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put(remoteStoreClusterSettings(REPOSITORY_NAME, segmentRepoPath, REPOSITORY_2_NAME, translogRepoPath)) - .build(); - } + return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(remoteStoreRepoSettings()).build(); + } + } + + protected Settings remoteStoreRepoSettings() { + Settings remoteStoreRepoSettings; + if (asyncUploadMockFsRepo) { + String repoType = metadataSupportedType ? MockFsMetadataSupportedRepositoryPlugin.TYPE_MD : MockFsRepositoryPlugin.TYPE; + remoteStoreRepoSettings = remoteStoreClusterSettings( + REPOSITORY_NAME, + segmentRepoPath, + repoType, + REPOSITORY_2_NAME, + translogRepoPath, + repoType + ); + } else { + remoteStoreRepoSettings = remoteStoreClusterSettings(REPOSITORY_NAME, segmentRepoPath, REPOSITORY_2_NAME, translogRepoPath); } + return remoteStoreRepoSettings; } protected void setFailRate(String repoName, int value) throws ExecutionException, InterruptedException { diff --git a/test/framework/src/main/java/org/opensearch/remotestore/RemoteStoreCoreTestCase.java b/test/framework/src/main/java/org/opensearch/remotestore/RemoteStoreCoreTestCase.java new file mode 100644 index 0000000000000..d34db204a112f --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/remotestore/RemoteStoreCoreTestCase.java @@ -0,0 +1,1172 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore; + +import org.opensearch.OpenSearchException; +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; +import org.opensearch.action.admin.indices.flush.FlushRequest; +import org.opensearch.action.admin.indices.recovery.RecoveryResponse; +import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.opensearch.action.index.IndexResponse; +import org.opensearch.action.search.SearchPhaseExecutionException; +import org.opensearch.cluster.health.ClusterHealthStatus; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.routing.RecoverySource; +import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; +import org.opensearch.common.Priority; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.BufferedAsyncIOProcessor; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.IndexShardClosedException; +import org.opensearch.index.translog.Translog; +import org.opensearch.index.translog.Translog.Durability; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.RemoteStoreSettings; +import org.opensearch.indices.recovery.PeerRecoveryTargetService; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.indices.recovery.RecoveryState; +import org.opensearch.plugins.Plugin; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.snapshots.SnapshotInfo; +import org.opensearch.snapshots.SnapshotState; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.transport.MockTransportService; +import org.opensearch.transport.TransportService; +import org.opensearch.transport.client.Requests; +import org.hamcrest.MatcherAssert; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.TRANSLOG; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; +import static org.opensearch.index.shard.IndexShardTestCase.getTranslog; +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING; +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.comparesEqualTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.oneOf; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RemoteStoreCoreTestCase extends RemoteStoreBaseIntegTestCase { + + protected final String INDEX_NAME = "remote-store-test-idx-1"; + + @Override + protected Collection> nodePlugins() { + return Stream.concat(super.nodePlugins().stream(), Stream.of(MockTransportService.TestPlugin.class)).collect(Collectors.toList()); + } + + @Override + public Settings indexSettings() { + return remoteStoreIndexSettings(0); + } + + private void testPeerRecovery(int numberOfIterations, boolean invokeFlush) throws Exception { + internalCluster().startNodes(3); + createIndex(INDEX_NAME, remoteStoreIndexSettings(0)); + ensureYellowAndNoInitializingShards(INDEX_NAME); + ensureGreen(INDEX_NAME); + + Map indexStats = indexData(numberOfIterations, invokeFlush, INDEX_NAME); + + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)) + .get(); + ensureYellowAndNoInitializingShards(INDEX_NAME); + ensureGreen(INDEX_NAME); + + refresh(INDEX_NAME); + String replicaNodeName = replicaNodeName(INDEX_NAME); + assertBusy( + () -> assertHitCount(client(replicaNodeName).prepareSearch(INDEX_NAME).setSize(0).get(), indexStats.get(TOTAL_OPERATIONS)), + 30, + TimeUnit.SECONDS + ); + + RecoveryResponse recoveryResponse = client(replicaNodeName).admin().indices().prepareRecoveries().get(); + + Optional recoverySource = recoveryResponse.shardRecoveryStates() + .get(INDEX_NAME) + .stream() + .filter(rs -> rs.getRecoverySource().getType() == RecoverySource.Type.PEER) + .findFirst(); + assertFalse(recoverySource.isEmpty()); + // segments_N file is copied to new replica + assertEquals(1, recoverySource.get().getIndex().recoveredFileCount()); + + IndexResponse response = indexSingleDoc(INDEX_NAME); + assertEquals(indexStats.get(MAX_SEQ_NO_TOTAL) + 1, response.getSeqNo()); + refresh(INDEX_NAME); + assertBusy( + () -> assertHitCount(client(replicaNodeName).prepareSearch(INDEX_NAME).setSize(0).get(), indexStats.get(TOTAL_OPERATIONS) + 1), + 30, + TimeUnit.SECONDS + ); + } + + public void testRemoteStoreIndexCreationAndDeletionWithReferencedStore() throws InterruptedException, ExecutionException { + String dataNode = internalCluster().startNodes(1).get(0); + createIndex(INDEX_NAME, remoteStoreIndexSettings(0)); + ensureYellowAndNoInitializingShards(INDEX_NAME); + ensureGreen(INDEX_NAME); + + IndexShard indexShard = getIndexShard(dataNode, INDEX_NAME); + + // Simulating a condition where store is already in use by increasing ref count, this helps in testing index + // deletion when refresh is in-progress. + indexShard.store().incRef(); + assertAcked(client().admin().indices().prepareDelete(INDEX_NAME)); + indexShard.store().decRef(); + } + + public void testPeerRecoveryWithRemoteStoreAndRemoteTranslogNoDataFlush() throws Exception { + testPeerRecovery(1, true); + } + + public void testPeerRecoveryWithRemoteStoreAndRemoteTranslogFlush() throws Exception { + testPeerRecovery(randomIntBetween(1, 2), true); + } + + public void testPeerRecoveryWithLowActivityTimeout() throws Exception { + ClusterUpdateSettingsRequest req = new ClusterUpdateSettingsRequest().persistentSettings( + Settings.builder() + .put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "20kb") + .put(RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING.getKey(), "1s") + ); + internalCluster().client().admin().cluster().updateSettings(req).get(); + testPeerRecovery(randomIntBetween(1, 3), true); + } + + public void testPeerRecoveryWithRemoteStoreAndRemoteTranslogNoDataRefresh() throws Exception { + testPeerRecovery(1, false); + } + + public void testPeerRecoveryWithRemoteStoreAndRemoteTranslogRefresh() throws Exception { + testPeerRecovery(randomIntBetween(1, 3), false); + } + + private void verifyRemoteStoreCleanup() throws Exception { + internalCluster().startNodes(3); + createIndex(INDEX_NAME, remoteStoreIndexSettings(1)); + + indexData(5, randomBoolean(), INDEX_NAME); + String indexUUID = client().admin() + .indices() + .prepareGetSettings(INDEX_NAME) + .get() + .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); + Path indexPath = Path.of(String.valueOf(segmentRepoPath), indexUUID); + assertTrue(getFileCount(indexPath) > 0); + assertAcked(client().admin().indices().delete(new DeleteIndexRequest(INDEX_NAME)).get()); + // Delete is async. Give time for it + assertBusy(() -> { + try { + assertThat(getFileCount(indexPath), comparesEqualTo(0)); + } catch (Exception e) {} + }, 30, TimeUnit.SECONDS); + } + + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9327") + public void testRemoteTranslogCleanup() throws Exception { + verifyRemoteStoreCleanup(); + } + + public void testStaleCommitDeletionWithInvokeFlush() throws Exception { + String clusterManagerName = internalCluster().startClusterManagerOnlyNode(); + client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().put(CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), randomIntBetween(2, 4)) + ) + .get(); + String dataNode = internalCluster().startDataOnlyNode(); + createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000L, -1)); + int numberOfIterations = randomIntBetween(1, 5); + indexData(numberOfIterations, true, INDEX_NAME); + String segmentsPathFixedPrefix = RemoteStoreSettings.CLUSTER_REMOTE_STORE_SEGMENTS_PATH_PREFIX.get(getNodeSettings()); + String shardPath = getShardLevelBlobPath( + client(), + INDEX_NAME, + getSegmentBasePath(), + "0", + SEGMENTS, + METADATA, + segmentsPathFixedPrefix + ).buildAsString(); + IndexShard indexShard = getIndexShard(dataNode, INDEX_NAME); + int lastNMetadataFilesToKeep = indexShard.getRemoteStoreSettings().getMinRemoteSegmentMetadataFiles(); + // Delete is async. + assertBusy(() -> { + int actualFileCount = getActualFileCount(segmentRepoPath, shardPath); + if (numberOfIterations <= lastNMetadataFilesToKeep) { + MatcherAssert.assertThat(actualFileCount, is(oneOf(numberOfIterations - 1, numberOfIterations, numberOfIterations + 1))); + } else { + // As delete is async its possible that the file gets created before the deletion or after + // deletion. + if (RemoteStoreSettings.isPinnedTimestampsEnabled()) { + // With pinned timestamp, we also keep md files since last successful fetch + assertTrue(actualFileCount >= lastNMetadataFilesToKeep); + } else { + MatcherAssert.assertThat( + actualFileCount, + is(oneOf(lastNMetadataFilesToKeep - 1, lastNMetadataFilesToKeep, lastNMetadataFilesToKeep + 1)) + ); + } + } + }, 30, TimeUnit.SECONDS); + } + + public void testStaleCommitDeletionWithoutInvokeFlush() throws Exception { + String clusterManagerName = internalCluster().startClusterManagerOnlyNode(); + client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().put(CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), randomIntBetween(2, 4)) + ) + .get(); + internalCluster().startDataOnlyNode(); + createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); + int numberOfIterations = randomIntBetween(1, 5); + indexData(numberOfIterations, false, INDEX_NAME); + String segmentsPathFixedPrefix = RemoteStoreSettings.CLUSTER_REMOTE_STORE_SEGMENTS_PATH_PREFIX.get(getNodeSettings()); + String shardPath = getShardLevelBlobPath( + client(), + INDEX_NAME, + getSegmentBasePath(), + "0", + SEGMENTS, + METADATA, + segmentsPathFixedPrefix + ).buildAsString(); + int actualFileCount = getActualFileCount(segmentRepoPath, shardPath); + // We also allow (numberOfIterations + 1) as index creation also triggers refresh. + MatcherAssert.assertThat(actualFileCount, is(oneOf(numberOfIterations - 1, numberOfIterations, numberOfIterations + 1))); + } + + public void testStaleCommitDeletionWithMinSegmentFiles_3() throws Exception { + Settings.Builder settings = Settings.builder() + .put(RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), "3"); + internalCluster().startNode(settings); + String segmentsPathFixedPrefix = RemoteStoreSettings.CLUSTER_REMOTE_STORE_SEGMENTS_PATH_PREFIX.get(getNodeSettings()); + createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); + int numberOfIterations = randomIntBetween(5, 15); + indexData(numberOfIterations, true, INDEX_NAME); + String shardPath = getShardLevelBlobPath( + client(), + INDEX_NAME, + getSegmentBasePath(), + "0", + SEGMENTS, + METADATA, + segmentsPathFixedPrefix + ).buildAsString(); + int actualFileCount = getActualFileCount(segmentRepoPath, shardPath); + // We also allow (numberOfIterations + 1) as index creation also triggers refresh. + if (RemoteStoreSettings.isPinnedTimestampsEnabled()) { + // With pinned timestamp, we also keep md files since last successful fetch + assertTrue(actualFileCount >= 4); + } else { + assertEquals(4, actualFileCount); + } + } + + protected BlobPath getSegmentBasePath() { + return BlobPath.cleanPath(); + } + + public void testStaleCommitDeletionWithMinSegmentFiles_Disabled() throws Exception { + Settings.Builder settings = Settings.builder() + .put(RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), "-1"); + internalCluster().startNode(settings); + + createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); + int numberOfIterations = randomIntBetween(2, 5); + indexData(numberOfIterations, true, INDEX_NAME); + String segmentsPathFixedPrefix = RemoteStoreSettings.CLUSTER_REMOTE_STORE_SEGMENTS_PATH_PREFIX.get(getNodeSettings()); + String shardPath = getShardLevelBlobPath( + client(), + INDEX_NAME, + getSegmentBasePath(), + "0", + SEGMENTS, + METADATA, + segmentsPathFixedPrefix + ).buildAsString(); + int actualFileCount = getActualFileCount(segmentRepoPath, shardPath); + // We also allow (numberOfIterations + 1) as index creation also triggers refresh. + MatcherAssert.assertThat(actualFileCount, is(oneOf(numberOfIterations, numberOfIterations + 1))); + } + + protected int getActualFileCount(Path segmentRepoPath, String shardPath) throws IOException { + Path indexPath = Path.of(segmentRepoPath + "/" + shardPath); + return getFileCount(indexPath); + } + + /** + * Tests that when the index setting is not passed during index creation, the buffer interval picked up is the cluster + * default. + */ + public void testDefaultBufferInterval() throws ExecutionException, InterruptedException { + internalCluster().startClusterManagerOnlyNode(); + String clusterManagerName = internalCluster().getClusterManagerName(); + String dataNode = internalCluster().startDataOnlyNodes(1).get(0); + createIndex(INDEX_NAME); + ensureYellowAndNoInitializingShards(INDEX_NAME); + ensureGreen(INDEX_NAME); + assertClusterRemoteBufferInterval(IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, dataNode); + + IndexShard indexShard = getIndexShard(dataNode, INDEX_NAME); + assertTrue(indexShard.getTranslogSyncProcessor() instanceof BufferedAsyncIOProcessor); + assertBufferInterval(IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, indexShard); + + // Next, we change the default buffer interval and the same should reflect in the buffer interval of the index created + TimeValue clusterBufferInterval = TimeValue.timeValueSeconds(randomIntBetween(100, 200)); + client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), clusterBufferInterval)) + .get(); + assertBufferInterval(clusterBufferInterval, indexShard); + clearClusterBufferIntervalSetting(clusterManagerName); + } + + /** + * This tests multiple cases where the index setting is passed during the index creation with multiple combinations + * with and without cluster default. + */ + public void testOverriddenBufferInterval() throws ExecutionException, InterruptedException { + internalCluster().startClusterManagerOnlyNode(); + String clusterManagerName = internalCluster().getClusterManagerName(); + String dataNode = internalCluster().startDataOnlyNodes(1).get(0); + + TimeValue bufferInterval = TimeValue.timeValueSeconds(randomIntBetween(0, 100)); + Settings indexSettings = Settings.builder() + .put(indexSettings()) + .put(IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), bufferInterval) + .build(); + createIndex(INDEX_NAME, indexSettings); + ensureYellowAndNoInitializingShards(INDEX_NAME); + ensureGreen(INDEX_NAME); + + IndexShard indexShard = getIndexShard(dataNode, INDEX_NAME); + assertTrue(indexShard.getTranslogSyncProcessor() instanceof BufferedAsyncIOProcessor); + assertBufferInterval(bufferInterval, indexShard); + + // Set the cluster default with a different value, validate that the buffer interval is still the overridden value + TimeValue clusterBufferInterval = TimeValue.timeValueSeconds(randomIntBetween(100, 200)); + client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), clusterBufferInterval)) + .get(); + assertBufferInterval(bufferInterval, indexShard); + + // Set the index setting (index.remote_store.translog.buffer_interval) with a different value and validate that + // the buffer interval is updated + bufferInterval = TimeValue.timeValueSeconds(bufferInterval.seconds() + randomIntBetween(1, 100)); + client(clusterManagerName).admin() + .indices() + .updateSettings( + new UpdateSettingsRequest(INDEX_NAME).settings( + Settings.builder().put(IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), bufferInterval) + ) + ) + .get(); + assertBufferInterval(bufferInterval, indexShard); + + // Set the index setting (index.remote_store.translog.buffer_interval) with null and validate the buffer interval + // which will be the cluster default now. + client(clusterManagerName).admin() + .indices() + .updateSettings( + new UpdateSettingsRequest(INDEX_NAME).settings( + Settings.builder().putNull(IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey()) + ) + ) + .get(); + assertBufferInterval(clusterBufferInterval, indexShard); + clearClusterBufferIntervalSetting(clusterManagerName); + } + + /** + * This tests validation which kicks in during index creation failing creation if the value is less than minimum allowed value. + */ + public void testOverriddenBufferIntervalValidation() { + internalCluster().startClusterManagerOnlyNode(); + TimeValue bufferInterval = TimeValue.timeValueSeconds(-1); + Settings indexSettings = Settings.builder() + .put(indexSettings()) + .put(IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), bufferInterval) + .build(); + IllegalArgumentException exceptionDuringCreateIndex = assertThrows( + IllegalArgumentException.class, + () -> createIndex(INDEX_NAME, indexSettings) + ); + assertEquals( + "failed to parse value [-1] for setting [index.remote_store.translog.buffer_interval], must be >= [0ms]", + exceptionDuringCreateIndex.getMessage() + ); + } + + /** + * This tests validation of the cluster setting when being set. + */ + public void testClusterBufferIntervalValidation() { + String clusterManagerName = internalCluster().startClusterManagerOnlyNode(); + IllegalArgumentException exception = assertThrows( + IllegalArgumentException.class, + () -> client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().put(CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), TimeValue.timeValueSeconds(-1)) + ) + .get() + ); + assertEquals( + "failed to parse value [-1] for setting [cluster.remote_store.translog.buffer_interval], must be >= [0ms]", + exception.getMessage() + ); + } + + public void testRequestDurabilityWhenRestrictSettingExplicitFalse() throws ExecutionException, InterruptedException { + // Explicit node settings and request durability + testRestrictSettingFalse(true, Durability.REQUEST); + } + + public void testAsyncDurabilityWhenRestrictSettingExplicitFalse() throws ExecutionException, InterruptedException { + // Explicit node settings and async durability + testRestrictSettingFalse(true, Durability.ASYNC); + } + + public void testRequestDurabilityWhenRestrictSettingImplicitFalse() throws ExecutionException, InterruptedException { + // No node settings and request durability + testRestrictSettingFalse(false, Durability.REQUEST); + } + + public void testAsyncDurabilityWhenRestrictSettingImplicitFalse() throws ExecutionException, InterruptedException { + // No node settings and async durability + testRestrictSettingFalse(false, Durability.ASYNC); + } + + private void testRestrictSettingFalse(boolean setRestrictFalse, Durability durability) throws ExecutionException, InterruptedException { + String clusterManagerName; + if (setRestrictFalse) { + clusterManagerName = internalCluster().startClusterManagerOnlyNode( + Settings.builder().put(IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING.getKey(), false).build() + ); + } else { + clusterManagerName = internalCluster().startClusterManagerOnlyNode(); + } + String dataNode = internalCluster().startDataOnlyNodes(1).get(0); + Settings indexSettings = Settings.builder() + .put(indexSettings()) + .put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), durability) + .build(); + createIndex(INDEX_NAME, indexSettings); + IndexShard indexShard = getIndexShard(dataNode, INDEX_NAME); + assertEquals(durability, indexShard.indexSettings().getTranslogDurability()); + + durability = randomFrom(Durability.values()); + client(clusterManagerName).admin() + .indices() + .updateSettings( + new UpdateSettingsRequest(INDEX_NAME).settings( + Settings.builder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), durability) + ) + ) + .get(); + assertEquals(durability, indexShard.indexSettings().getTranslogDurability()); + } + + public void testAsyncDurabilityThrowsExceptionWhenRestrictSettingTrue() throws ExecutionException, InterruptedException { + String expectedExceptionMsg = + "index setting [index.translog.durability=async] is not allowed as cluster setting [cluster.remote_store.index.restrict.async-durability=true]"; + String clusterManagerName = internalCluster().startClusterManagerOnlyNode( + Settings.builder().put(IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING.getKey(), true).build() + ); + String dataNode = internalCluster().startDataOnlyNodes(1).get(0); + + // Case 1 - Test create index fails + Settings indexSettings = Settings.builder() + .put(indexSettings()) + .put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Durability.ASYNC) + .build(); + IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> createIndex(INDEX_NAME, indexSettings)); + assertEquals(expectedExceptionMsg, exception.getMessage()); + + // Case 2 - Test update index fails + createIndex(INDEX_NAME); + IndexShard indexShard = getIndexShard(dataNode, INDEX_NAME); + assertEquals(Durability.REQUEST, indexShard.indexSettings().getTranslogDurability()); + exception = assertThrows( + IllegalArgumentException.class, + () -> client(clusterManagerName).admin() + .indices() + .updateSettings(new UpdateSettingsRequest(INDEX_NAME).settings(indexSettings)) + .actionGet() + ); + assertEquals(expectedExceptionMsg, exception.getMessage()); + } + + private void assertClusterRemoteBufferInterval(TimeValue expectedBufferInterval, String dataNode) { + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, dataNode); + assertEquals(expectedBufferInterval, indicesService.getRemoteStoreSettings().getClusterRemoteTranslogBufferInterval()); + } + + private void assertBufferInterval(TimeValue expectedBufferInterval, IndexShard indexShard) { + assertEquals( + expectedBufferInterval, + ((BufferedAsyncIOProcessor) indexShard.getTranslogSyncProcessor()).getBufferIntervalSupplier().get() + ); + } + + private void clearClusterBufferIntervalSetting(String clusterManagerName) { + client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().putNull(CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey())) + .get(); + } + + public void testRestoreSnapshotToIndexWithSameNameDifferentUUID() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + List dataNodes = internalCluster().startDataOnlyNodes(2); + + Path absolutePath = randomRepoPath().toAbsolutePath(); + createRepository("test-repo", "fs", Settings.builder().put("location", absolutePath)); + + logger.info("--> Create index and ingest 50 docs"); + createIndex(INDEX_NAME, remoteStoreIndexSettings(1)); + indexBulk(INDEX_NAME, 50); + flushAndRefresh(INDEX_NAME); + + String originalIndexUUID = client().admin() + .indices() + .prepareGetSettings(INDEX_NAME) + .get() + .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); + assertNotNull(originalIndexUUID); + assertNotEquals(IndexMetadata.INDEX_UUID_NA_VALUE, originalIndexUUID); + + ensureGreen(); + + logger.info("--> take a snapshot"); + client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setIndices(INDEX_NAME).setWaitForCompletion(true).get(); + + logger.info("--> wipe all indices"); + cluster().wipeIndices(INDEX_NAME); + + logger.info("--> Create index with the same name, different UUID"); + assertAcked( + prepareCreate(INDEX_NAME).setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 1)) + ); + + ensureGreen(TimeValue.timeValueSeconds(30), INDEX_NAME); + + String newIndexUUID = client().admin() + .indices() + .prepareGetSettings(INDEX_NAME) + .get() + .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); + assertNotNull(newIndexUUID); + assertNotEquals(IndexMetadata.INDEX_UUID_NA_VALUE, newIndexUUID); + assertNotEquals(newIndexUUID, originalIndexUUID); + + logger.info("--> close index"); + client().admin().indices().prepareClose(INDEX_NAME).get(); + + logger.info("--> restore all indices from the snapshot"); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") + .setWaitForCompletion(true) + .execute() + .actionGet(); + assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); + + flushAndRefresh(INDEX_NAME); + + ensureGreen(INDEX_NAME); + assertBusy(() -> { + assertHitCount(client(dataNodes.get(0)).prepareSearch(INDEX_NAME).setSize(0).get(), 50); + assertHitCount(client(dataNodes.get(1)).prepareSearch(INDEX_NAME).setSize(0).get(), 50); + }); + } + + public void testNoSearchIdleForAnyReplicaCount() throws ExecutionException, InterruptedException { + internalCluster().startClusterManagerOnlyNode(); + String primaryShardNode = internalCluster().startDataOnlyNodes(1).get(0); + + createIndex(INDEX_NAME, remoteStoreIndexSettings(0)); + ensureGreen(INDEX_NAME); + IndexShard indexShard = getIndexShard(primaryShardNode, INDEX_NAME); + assertFalse(indexShard.isSearchIdleSupported()); + + String replicaShardNode = internalCluster().startDataOnlyNodes(1).get(0); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)) + ); + ensureGreen(INDEX_NAME); + assertFalse(indexShard.isSearchIdleSupported()); + + indexShard = getIndexShard(replicaShardNode, INDEX_NAME); + assertFalse(indexShard.isSearchIdleSupported()); + } + + public void testFallbackToNodeToNodeSegmentCopy() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + List dataNodes = internalCluster().startDataOnlyNodes(2); + + // 1. Create index with 0 replica + createIndex(INDEX_NAME, remoteStoreIndexSettings(0, 10000L, -1)); + ensureGreen(INDEX_NAME); + + // 2. Index docs + indexBulk(INDEX_NAME, 50); + flushAndRefresh(INDEX_NAME); + + String segmentsPathFixedPrefix = RemoteStoreSettings.CLUSTER_REMOTE_STORE_SEGMENTS_PATH_PREFIX.get(getNodeSettings()); + // 3. Delete data from remote segment store + String shardPath = getShardLevelBlobPath(client(), INDEX_NAME, getSegmentBasePath(), "0", SEGMENTS, DATA, segmentsPathFixedPrefix) + .buildAsString(); + delete(segmentRepoPath, shardPath); + + // 4. Start recovery by changing number of replicas to 1 + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)) + ); + + // 5. Ensure green and verify number of docs + ensureGreen(INDEX_NAME); + assertBusy(() -> { + assertHitCount(client(dataNodes.get(0)).prepareSearch(INDEX_NAME).setSize(0).get(), 50); + assertHitCount(client(dataNodes.get(1)).prepareSearch(INDEX_NAME).setSize(0).get(), 50); + }); + } + + protected void delete(Path baseRepoPath, String shardPath) throws IOException { + Path segmentDataPath = Path.of(baseRepoPath + "/" + shardPath); + try (Stream files = Files.list(segmentDataPath)) { + files.forEach(p -> { + try { + Files.delete(p); + } catch (IOException e) { + // Ignore + } + }); + } + } + + public void testNoMultipleWriterDuringPrimaryRelocation() throws ExecutionException, InterruptedException { + // In this test, we trigger a force flush on existing primary while the primary mode on new primary has been + // activated. There was a bug in primary relocation of remote store enabled indexes where the new primary + // starts uploading translog and segments even before the cluster manager has started this shard. With this test, + // we check that we do not overwrite any file on remote store. Here we will also increase the replica count to + // check that there are no duplicate metadata files for translog or upload. + + internalCluster().startClusterManagerOnlyNode(); + String oldPrimary = internalCluster().startDataOnlyNodes(1).get(0); + createIndex(INDEX_NAME, remoteStoreIndexSettings(0)); + ensureGreen(INDEX_NAME); + indexBulk(INDEX_NAME, randomIntBetween(5, 10)); + String newPrimary = internalCluster().startDataOnlyNodes(1).get(0); + ensureStableCluster(3); + + IndexShard oldPrimaryIndexShard = getIndexShard(oldPrimary, INDEX_NAME); + CountDownLatch flushLatch = new CountDownLatch(1); + + MockTransportService mockTargetTransportService = ((MockTransportService) internalCluster().getInstance( + TransportService.class, + oldPrimary + )); + mockTargetTransportService.addSendBehavior((connection, requestId, action, request, options) -> { + if (PeerRecoveryTargetService.Actions.HANDOFF_PRIMARY_CONTEXT.equals(action)) { + flushLatch.countDown(); + } + connection.sendRequest(requestId, action, request, options); + }); + + logger.info("--> relocate the shard"); + client().admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand(INDEX_NAME, 0, oldPrimary, newPrimary)) + .execute() + .actionGet(); + + CountDownLatch flushDone = new CountDownLatch(1); + Thread flushThread = new Thread(() -> { + try { + flushLatch.await(2, TimeUnit.SECONDS); + oldPrimaryIndexShard.flush(new FlushRequest().waitIfOngoing(true).force(true)); + // newPrimaryTranslogRepo.setSleepSeconds(0); + } catch (IndexShardClosedException e) { + // this is fine + } catch (InterruptedException e) { + throw new AssertionError(e); + } finally { + flushDone.countDown(); + } + }); + flushThread.start(); + flushDone.await(5, TimeUnit.SECONDS); + flushThread.join(); + + ClusterHealthResponse clusterHealthResponse = client().admin() + .cluster() + .prepareHealth() + .setWaitForStatus(ClusterHealthStatus.GREEN) + .setWaitForEvents(Priority.LANGUID) + .setWaitForNoRelocatingShards(true) + .setTimeout(TimeValue.timeValueSeconds(5)) + .execute() + .actionGet(); + assertFalse(clusterHealthResponse.isTimedOut()); + + client().admin() + .indices() + .updateSettings( + new UpdateSettingsRequest(INDEX_NAME).settings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)) + ) + .get(); + + clusterHealthResponse = client().admin() + .cluster() + .prepareHealth() + .setWaitForStatus(ClusterHealthStatus.GREEN) + .setWaitForEvents(Priority.LANGUID) + .setWaitForNoRelocatingShards(true) + .setTimeout(TimeValue.timeValueSeconds(5)) + .execute() + .actionGet(); + assertFalse(clusterHealthResponse.isTimedOut()); + } + + public void testResumeUploadAfterFailedPrimaryRelocation() throws ExecutionException, InterruptedException, IOException { + // In this test, we fail the hand off during the primary relocation. This will undo the drainRefreshes and + // drainSync performed as part of relocation handoff (before performing the handoff transport action). + // We validate the same here by failing the peer recovery and ensuring we can index afterward as well. + + internalCluster().startClusterManagerOnlyNode(); + String oldPrimary = internalCluster().startDataOnlyNodes(1).get(0); + createIndex(INDEX_NAME, remoteStoreIndexSettings(0)); + ensureGreen(INDEX_NAME); + int docs = randomIntBetween(5, 10); + indexBulk(INDEX_NAME, docs); + flushAndRefresh(INDEX_NAME); + assertHitCount(client(oldPrimary).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), docs); + String newPrimary = internalCluster().startDataOnlyNodes(1).get(0); + ensureStableCluster(3); + + IndexShard oldPrimaryIndexShard = getIndexShard(oldPrimary, INDEX_NAME); + CountDownLatch handOffLatch = new CountDownLatch(1); + + MockTransportService mockTargetTransportService = ((MockTransportService) internalCluster().getInstance( + TransportService.class, + oldPrimary + )); + mockTargetTransportService.addSendBehavior((connection, requestId, action, request, options) -> { + if (PeerRecoveryTargetService.Actions.HANDOFF_PRIMARY_CONTEXT.equals(action)) { + handOffLatch.countDown(); + throw new OpenSearchException("failing recovery for test purposes"); + } + connection.sendRequest(requestId, action, request, options); + }); + + logger.info("--> relocate the shard"); + client().admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand(INDEX_NAME, 0, oldPrimary, newPrimary)) + .execute() + .actionGet(); + + handOffLatch.await(30, TimeUnit.SECONDS); + + assertTrue(oldPrimaryIndexShard.isStartedPrimary()); + assertEquals(oldPrimary, primaryNodeName(INDEX_NAME)); + assertHitCount(client(oldPrimary).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), docs); + + SearchPhaseExecutionException ex = assertThrows( + SearchPhaseExecutionException.class, + () -> client(newPrimary).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get() + ); + assertEquals("all shards failed", ex.getMessage()); + + int moreDocs = randomIntBetween(5, 10); + indexBulk(INDEX_NAME, moreDocs); + flushAndRefresh(INDEX_NAME); + int uncommittedOps = randomIntBetween(5, 10); + indexBulk(INDEX_NAME, uncommittedOps); + assertHitCount(client(oldPrimary).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), docs + moreDocs); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(INDEX_NAME))); + + restore(true, INDEX_NAME); + ensureGreen(INDEX_NAME); + assertHitCount( + client(newPrimary).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), + docs + moreDocs + uncommittedOps + ); + + String newNode = internalCluster().startDataOnlyNodes(1).get(0); + ensureStableCluster(3); + client().admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand(INDEX_NAME, 0, newPrimary, newNode)) + .execute() + .actionGet(); + + ClusterHealthResponse clusterHealthResponse = client().admin() + .cluster() + .prepareHealth() + .setWaitForStatus(ClusterHealthStatus.GREEN) + .setWaitForEvents(Priority.LANGUID) + .setWaitForNoRelocatingShards(true) + .setTimeout(TimeValue.timeValueSeconds(10)) + .execute() + .actionGet(); + assertFalse(clusterHealthResponse.isTimedOut()); + + ex = assertThrows( + SearchPhaseExecutionException.class, + () -> client(newPrimary).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get() + ); + assertEquals("all shards failed", ex.getMessage()); + assertHitCount( + client(newNode).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), + docs + moreDocs + uncommittedOps + ); + } + + // Test local only translog files which are not uploaded to remote store (no metadata present in remote) + // Without the cleanup change in RemoteFsTranslog.createEmptyTranslog, this test fails with NPE. + public void testLocalOnlyTranslogCleanupOnNodeRestart() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + String dataNode = internalCluster().startDataOnlyNode(); + + // 1. Create index with 0 replica + createIndex(INDEX_NAME, remoteStoreIndexSettings(0, 10000L, -1)); + ensureGreen(INDEX_NAME); + + // 2. Index docs + int searchableDocs = 0; + for (int i = 0; i < randomIntBetween(1, 3); i++) { + indexBulk(INDEX_NAME, 15); + refresh(INDEX_NAME); + searchableDocs += 15; + } + indexBulk(INDEX_NAME, 15); + + assertHitCount(client(dataNode).prepareSearch(INDEX_NAME).setSize(0).get(), searchableDocs); + + // 3. Delete metadata from remote translog + String translogPathFixedPrefix = RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_PATH_PREFIX.get(getNodeSettings()); + String shardPath = getShardLevelBlobPath( + client(), + INDEX_NAME, + getSegmentBasePath(), + "0", + TRANSLOG, + METADATA, + translogPathFixedPrefix + ).buildAsString(); + delete(translogRepoPath, shardPath); + + internalCluster().restartNode(dataNode); + ensureGreen(INDEX_NAME); + + // For remote store, it is possible that the refreshes gets triggered and the refreshed segments for last 15 docs are uploaded + MatcherAssert.assertThat( + (int) client(dataNode).prepareSearch(INDEX_NAME).setSize(0).get().getHits().getTotalHits().value(), + is(oneOf(searchableDocs, searchableDocs + 15)) + ); + indexBulk(INDEX_NAME, 15); + refresh(INDEX_NAME); + MatcherAssert.assertThat( + (int) client(dataNode).prepareSearch(INDEX_NAME).setSize(0).get().getHits().getTotalHits().value(), + is(oneOf(searchableDocs + 15, searchableDocs + 30)) + ); + } + + public void testFlushOnTooManyRemoteTranslogFiles() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + String datanode = internalCluster().startDataOnlyNodes(1).get(0); + createIndex(INDEX_NAME, remoteStoreIndexSettings(0, 10000L, -1)); + ensureGreen(INDEX_NAME); + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings( + Settings.builder() + .put(RemoteStoreSettings.CLUSTER_REMOTE_MAX_TRANSLOG_READERS.getKey(), "100") + .put(CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), "0ms") + ); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + IndexShard indexShard = getIndexShard(datanode, INDEX_NAME); + Path translogLocation = getTranslog(indexShard).location(); + assertFalse(indexShard.shouldPeriodicallyFlush()); + + try (Stream files = Files.list(translogLocation)) { + long totalFiles = files.filter(f -> f.getFileName().toString().endsWith(Translog.TRANSLOG_FILE_SUFFIX)).count(); + assertEquals(totalFiles, 1L); + } + + // indexing 100 documents (100 bulk requests), no flush will be triggered yet + for (int i = 0; i < 100; i++) { + indexBulk(INDEX_NAME, 1); + } + + try (Stream files = Files.list(translogLocation)) { + long totalFiles = files.filter(f -> f.getFileName().toString().endsWith(Translog.TRANSLOG_FILE_SUFFIX)).count(); + assertEquals(totalFiles, 101L); + } + // Will flush and trim the translog readers + indexBulk(INDEX_NAME, 1); + + assertBusy(() -> { + try (Stream files = Files.list(translogLocation)) { + long totalFiles = files.filter(f -> f.getFileName().toString().endsWith(Translog.TRANSLOG_FILE_SUFFIX)).count(); + assertEquals(totalFiles, 1L); + } + }, 30, TimeUnit.SECONDS); + + // Disabling max translog readers + assertAcked( + internalCluster().client() + .admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put(RemoteStoreSettings.CLUSTER_REMOTE_MAX_TRANSLOG_READERS.getKey(), "-1")) + .get() + ); + + // Indexing 500 more docs + for (int i = 0; i < 500; i++) { + indexBulk(INDEX_NAME, 1); + } + + // No flush is triggered since max_translog_readers is set to -1 + // Total tlog files would be incremented by 500 + try (Stream files = Files.list(translogLocation)) { + long totalFiles = files.filter(f -> f.getFileName().toString().endsWith(Translog.TRANSLOG_FILE_SUFFIX)).count(); + assertEquals(totalFiles, 501L); + } + } + + public void testAsyncTranslogDurabilityRestrictionsThroughIdxTemplates() throws Exception { + logger.info("Starting up cluster manager with cluster.remote_store.index.restrict.async-durability set to true"); + String cm1 = internalCluster().startClusterManagerOnlyNode( + Settings.builder().put(IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING.getKey(), true).build() + ); + internalCluster().startDataOnlyNode(); + ensureStableCluster(2); + assertThrows( + IllegalArgumentException.class, + () -> internalCluster().client() + .admin() + .indices() + .preparePutTemplate("test") + .setPatterns(Arrays.asList("test*")) + .setSettings(Settings.builder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), "async")) + .get() + ); + logger.info("Starting up another cluster manager with cluster.remote_store.index.restrict.async-durability set to false"); + internalCluster().startClusterManagerOnlyNode( + Settings.builder().put(IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING.getKey(), false).build() + ); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(cm1)); + ensureStableCluster(2); + assertAcked( + internalCluster().client() + .admin() + .indices() + .preparePutTemplate("test") + .setPatterns(Arrays.asList("test*")) + .setSettings(Settings.builder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), "async")) + .get() + ); + } + + public void testCloseIndexWithNoOpSyncAndFlushForSyncTranslog() throws InterruptedException { + internalCluster().startNodes(3); + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), "5s")) + .get(); + Settings.Builder settings = Settings.builder() + .put(remoteStoreIndexSettings(0, 10000L, -1)) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "1s"); + createIndex(INDEX_NAME, settings.build()); + CountDownLatch latch = new CountDownLatch(1); + new Thread(() -> { + if (randomBoolean()) { + for (int i = 0; i < randomIntBetween(1, 5); i++) { + indexSingleDoc(INDEX_NAME); + } + flushAndRefresh(INDEX_NAME); + } + // Index single doc to start the asyn io processor to run which will lead to 10s wait time before the next sync. + indexSingleDoc(INDEX_NAME); + // Reduce the latch for the main thread to flush after some sleep. + latch.countDown(); + // Index another doc and in this case the flush would have happened before the sync. + indexSingleDoc(INDEX_NAME); + }).start(); + // Wait for atleast one doc to be ingested. + latch.await(); + // Sleep for some time for the next doc to be present in lucene buffer. If flush happens first before the doc #2 + // gets indexed, then it goes into the happy case where the close index happens succefully. + Thread.sleep(1000); + // Flush so that the subsequent sync or flushes are no-op. + flush(INDEX_NAME); + // Closing the index involves translog.sync and shard.flush which are now no-op. + client().admin().indices().close(Requests.closeIndexRequest(INDEX_NAME)).actionGet(); + Thread.sleep(10000); + ensureGreen(INDEX_NAME); + } + + public void testCloseIndexWithNoOpSyncAndFlushForAsyncTranslog() throws InterruptedException { + internalCluster().startNodes(3); + Settings.Builder settings = Settings.builder() + .put(remoteStoreIndexSettings(0, 10000L, -1)) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "1s") + .put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Durability.ASYNC) + .put(IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.getKey(), "10s"); + createIndex(INDEX_NAME, settings.build()); + CountDownLatch latch = new CountDownLatch(1); + new Thread(() -> { + // Index some docs to start the asyn io processor to run which will lead to 10s wait time before the next sync. + indexSingleDoc(INDEX_NAME); + indexSingleDoc(INDEX_NAME); + indexSingleDoc(INDEX_NAME); + // Reduce the latch for the main thread to flush after some sleep. + latch.countDown(); + }).start(); + // Wait for atleast one doc to be ingested. + latch.await(); + // Flush so that the subsequent sync or flushes are no-op. + flush(INDEX_NAME); + // Closing the index involves translog.sync and shard.flush which are now no-op. + client().admin().indices().close(Requests.closeIndexRequest(INDEX_NAME)).actionGet(); + Thread.sleep(10000); + ensureGreen(INDEX_NAME); + } + + public void testSuccessfulShallowV1SnapshotPostIndexClose() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + String dataNode = internalCluster().startDataOnlyNodes(1).get(0); + createIndex(INDEX_NAME, remoteStoreIndexSettings(0, 10000L, -1)); + ensureGreen(INDEX_NAME); + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings(Settings.builder().put(CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), "0ms")); + + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + logger.info("Create shallow snapshot setting enabled repo"); + String shallowSnapshotRepoName = "shallow-snapshot-repo-name"; + Path shallowSnapshotRepoPath = randomRepoPath(); + Settings.Builder settings = Settings.builder() + .put("location", shallowSnapshotRepoPath) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), Boolean.TRUE); + createRepository(shallowSnapshotRepoName, "fs", settings); + + for (int i = 0; i < 3; i++) { + indexBulk(INDEX_NAME, 1); + } + flushAndRefresh(INDEX_NAME); + + logger.info("Verify shallow snapshot created before close"); + final String snapshot1 = "snapshot1"; + SnapshotInfo snapshotInfo1 = internalCluster().client() + .admin() + .cluster() + .prepareCreateSnapshot(shallowSnapshotRepoName, snapshot1) + .setIndices(INDEX_NAME) + .setWaitForCompletion(true) + .get() + .getSnapshotInfo(); + + assertEquals(SnapshotState.SUCCESS, snapshotInfo1.state()); + assertTrue(snapshotInfo1.successfulShards() > 0); + assertEquals(0, snapshotInfo1.failedShards()); + + for (int i = 0; i < 3; i++) { + indexBulk(INDEX_NAME, 1); + } + + // close index + client().admin().indices().close(Requests.closeIndexRequest(INDEX_NAME)).actionGet(); + Thread.sleep(1000); + logger.info("Verify shallow snapshot created after close"); + final String snapshot2 = "snapshot2"; + + SnapshotInfo snapshotInfo2 = internalCluster().client() + .admin() + .cluster() + .prepareCreateSnapshot(shallowSnapshotRepoName, snapshot2) + .setIndices(INDEX_NAME) + .setWaitForCompletion(true) + .get() + .getSnapshotInfo(); + + assertEquals(SnapshotState.SUCCESS, snapshotInfo2.state()); + assertTrue(snapshotInfo2.successfulShards() > 0); + assertEquals(0, snapshotInfo2.failedShards()); + + // delete the index + cluster().wipeIndices(INDEX_NAME); + // try restoring the snapshot + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(shallowSnapshotRepoName, snapshot2) + .setWaitForCompletion(true) + .execute() + .actionGet(); + assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); + ensureGreen(INDEX_NAME); + flushAndRefresh(INDEX_NAME); + assertBusy(() -> { assertHitCount(client(dataNode).prepareSearch(INDEX_NAME).setSize(0).get(), 6); }); + } +} diff --git a/test/framework/src/main/java/org/opensearch/remotestore/mocks/MockFsMetadataSupportedBlobContainer.java b/test/framework/src/main/java/org/opensearch/remotestore/mocks/MockFsMetadataSupportedBlobContainer.java new file mode 100644 index 0000000000000..e795882a5a707 --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/remotestore/mocks/MockFsMetadataSupportedBlobContainer.java @@ -0,0 +1,92 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore.mocks; + +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.InputStreamWithMetadata; +import org.opensearch.common.blobstore.fs.FsBlobStore; +import org.opensearch.common.blobstore.stream.write.WriteContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.remotestore.multipart.mocks.MockFsAsyncBlobContainer; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Path; +import java.util.Base64; +import java.util.HashMap; +import java.util.Map; + +public class MockFsMetadataSupportedBlobContainer extends MockFsAsyncBlobContainer { + + private static String CHECKPOINT_FILE_DATA_KEY = "ckp-data"; + + public MockFsMetadataSupportedBlobContainer(FsBlobStore blobStore, BlobPath blobPath, Path path, boolean triggerDataIntegrityFailure) { + super(blobStore, blobPath, path, triggerDataIntegrityFailure); + } + + @Override + public void asyncBlobUpload(WriteContext writeContext, ActionListener completionListener) throws IOException { + // If the upload writeContext have a non-null metadata, we store the metadata content as translog.ckp file. + if (writeContext.getMetadata() != null) { + String base64String = writeContext.getMetadata().get(CHECKPOINT_FILE_DATA_KEY); + byte[] decodedBytes = Base64.getDecoder().decode(base64String); + ByteArrayInputStream inputStream = new ByteArrayInputStream(decodedBytes); + int length = decodedBytes.length; + String ckpFileName = getCheckpointFileName(writeContext.getFileName()); + writeBlob(ckpFileName, inputStream, length, true); + } + super.asyncBlobUpload(writeContext, completionListener); + } + + // This is utility to get the translog.ckp file name for a given translog.tlog file. + private String getCheckpointFileName(String translogFileName) { + if (!translogFileName.endsWith(".tlog")) { + throw new IllegalArgumentException("Invalid translog file name format: " + translogFileName); + } + + int dotIndex = translogFileName.lastIndexOf('.'); + String baseName = translogFileName.substring(0, dotIndex); + return baseName + ".ckp"; + } + + public static String convertToBase64(InputStream inputStream) throws IOException { + try (ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream()) { + byte[] buffer = new byte[128]; + int bytesRead; + int totalBytesRead = 0; + + while ((bytesRead = inputStream.read(buffer)) != -1) { + byteArrayOutputStream.write(buffer, 0, bytesRead); + totalBytesRead += bytesRead; + if (totalBytesRead > 1024) { + // We enforce a limit of 1KB on the size of the checkpoint file. + throw new AssertionError("Input stream exceeds 1KB limit"); + } + } + + byte[] bytes = byteArrayOutputStream.toByteArray(); + return Base64.getEncoder().encodeToString(bytes); + } + } + + // during readBlobWithMetadata call we separately download translog.ckp file and return it as metadata. + @Override + public InputStreamWithMetadata readBlobWithMetadata(String blobName) throws IOException { + String ckpFileName = getCheckpointFileName(blobName); + InputStream inputStream = readBlob(blobName); + try (InputStream ckpInputStream = readBlob(ckpFileName)) { + String ckpString = convertToBase64(ckpInputStream); + Map metadata = new HashMap<>(); + metadata.put(CHECKPOINT_FILE_DATA_KEY, ckpString); + return new InputStreamWithMetadata(inputStream, metadata); + } + } +} diff --git a/test/framework/src/main/java/org/opensearch/remotestore/mocks/MockFsMetadataSupportedBlobStore.java b/test/framework/src/main/java/org/opensearch/remotestore/mocks/MockFsMetadataSupportedBlobStore.java new file mode 100644 index 0000000000000..76d4f383697a8 --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/remotestore/mocks/MockFsMetadataSupportedBlobStore.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore.mocks; + +import org.opensearch.OpenSearchException; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.fs.FsBlobStore; + +import java.io.IOException; +import java.nio.file.Path; + +public class MockFsMetadataSupportedBlobStore extends FsBlobStore { + + private final boolean triggerDataIntegrityFailure; + + public MockFsMetadataSupportedBlobStore(int bufferSizeInBytes, Path path, boolean readonly, boolean triggerDataIntegrityFailure) + throws IOException { + super(bufferSizeInBytes, path, readonly); + this.triggerDataIntegrityFailure = triggerDataIntegrityFailure; + } + + @Override + public BlobContainer blobContainer(BlobPath path) { + try { + return new MockFsMetadataSupportedBlobContainer(this, path, buildAndCreate(path), triggerDataIntegrityFailure); + } catch (IOException ex) { + throw new OpenSearchException("failed to create blob container", ex); + } + } + + // Make MockFs metadata supported + @Override + public boolean isBlobMetadataEnabled() { + return true; + } + +} diff --git a/test/framework/src/main/java/org/opensearch/remotestore/mocks/MockFsMetadataSupportedRepository.java b/test/framework/src/main/java/org/opensearch/remotestore/mocks/MockFsMetadataSupportedRepository.java new file mode 100644 index 0000000000000..a62556b3aa152 --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/remotestore/mocks/MockFsMetadataSupportedRepository.java @@ -0,0 +1,51 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore.mocks; + +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.blobstore.BlobStore; +import org.opensearch.common.blobstore.fs.FsBlobStore; +import org.opensearch.common.settings.Setting; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.env.Environment; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.repositories.fs.ReloadableFsRepository; + +public class MockFsMetadataSupportedRepository extends ReloadableFsRepository { + + public static Setting TRIGGER_DATA_INTEGRITY_FAILURE = Setting.boolSetting( + "mock_fs_repository.trigger_data_integrity_failure", + false + ); + + private final boolean triggerDataIntegrityFailure; + + public MockFsMetadataSupportedRepository( + RepositoryMetadata metadata, + Environment environment, + NamedXContentRegistry namedXContentRegistry, + ClusterService clusterService, + RecoverySettings recoverySettings + ) { + super(metadata, environment, namedXContentRegistry, clusterService, recoverySettings); + triggerDataIntegrityFailure = TRIGGER_DATA_INTEGRITY_FAILURE.get(metadata.settings()); + } + + @Override + protected BlobStore createBlobStore() throws Exception { + FsBlobStore fsBlobStore = (FsBlobStore) super.createBlobStore(); + return new MockFsMetadataSupportedBlobStore( + fsBlobStore.bufferSizeInBytes(), + fsBlobStore.path(), + isReadOnly(), + triggerDataIntegrityFailure + ); + } +} diff --git a/test/framework/src/main/java/org/opensearch/remotestore/mocks/MockFsMetadataSupportedRepositoryPlugin.java b/test/framework/src/main/java/org/opensearch/remotestore/mocks/MockFsMetadataSupportedRepositoryPlugin.java new file mode 100644 index 0000000000000..2ea1acb0b2eb3 --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/remotestore/mocks/MockFsMetadataSupportedRepositoryPlugin.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore.mocks; + +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.env.Environment; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.RepositoryPlugin; +import org.opensearch.repositories.Repository; + +import java.util.Collections; +import java.util.Map; + +public class MockFsMetadataSupportedRepositoryPlugin extends Plugin implements RepositoryPlugin { + + public static final String TYPE_MD = "fs_metadata_supported_repository"; + + @Override + public Map getRepositories( + Environment env, + NamedXContentRegistry namedXContentRegistry, + ClusterService clusterService, + RecoverySettings recoverySettings + ) { + return Collections.singletonMap( + "fs_metadata_supported_repository", + metadata -> new MockFsMetadataSupportedRepository(metadata, env, namedXContentRegistry, clusterService, recoverySettings) + ); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsAsyncBlobContainer.java b/test/framework/src/main/java/org/opensearch/remotestore/multipart/mocks/MockFsAsyncBlobContainer.java similarity index 100% rename from server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsAsyncBlobContainer.java rename to test/framework/src/main/java/org/opensearch/remotestore/multipart/mocks/MockFsAsyncBlobContainer.java diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsBlobStore.java b/test/framework/src/main/java/org/opensearch/remotestore/multipart/mocks/MockFsBlobStore.java similarity index 100% rename from server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsBlobStore.java rename to test/framework/src/main/java/org/opensearch/remotestore/multipart/mocks/MockFsBlobStore.java diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsRepository.java b/test/framework/src/main/java/org/opensearch/remotestore/multipart/mocks/MockFsRepository.java similarity index 100% rename from server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsRepository.java rename to test/framework/src/main/java/org/opensearch/remotestore/multipart/mocks/MockFsRepository.java diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsRepositoryPlugin.java b/test/framework/src/main/java/org/opensearch/remotestore/multipart/mocks/MockFsRepositoryPlugin.java similarity index 100% rename from server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsRepositoryPlugin.java rename to test/framework/src/main/java/org/opensearch/remotestore/multipart/mocks/MockFsRepositoryPlugin.java diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 45a4402d71dab..e69b5984bce8d 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -399,11 +399,11 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase { protected static final String REMOTE_BACKED_STORAGE_REPOSITORY_NAME = "test-remote-store-repo"; - private static Boolean prefixModeVerificationEnable; + protected static Boolean prefixModeVerificationEnable; - private static Boolean translogPathFixedPrefix; + protected static Boolean translogPathFixedPrefix; - private static Boolean segmentsPathFixedPrefix; + protected static Boolean segmentsPathFixedPrefix; protected static Boolean snapshotShardPathFixedPrefix; From 8ee5eebc3728a19487c0604d9828f92770fcd7ec Mon Sep 17 00:00:00 2001 From: Kaushal Kumar Date: Fri, 14 Mar 2025 11:54:08 -0700 Subject: [PATCH 072/550] Add search backpressure service check for query group tasks (#17576) * add validation check for SBP service Signed-off-by: Kaushal Kumar * add SBP should not handle the task tracking UT Signed-off-by: Kaushal Kumar * add CHANGELOG entry Signed-off-by: Kaushal Kumar * fix broken UT Signed-off-by: Kaushal Kumar * address comments Signed-off-by: Kaushal Kumar --------- Signed-off-by: Kaushal Kumar --- CHANGELOG.md | 1 + .../org/opensearch/wlm/QueryGroupService.java | 4 ++-- .../opensearch/wlm/QueryGroupServiceTests.java | 15 +++++++++++++-- 3 files changed, 16 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d5abd09d63d50..90b0131de2509 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -46,6 +46,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix exists queries on nested flat_object fields throws exception ([#16803](https://github.com/opensearch-project/OpenSearch/pull/16803)) - Add highlighting for wildcard search on `match_only_text` field ([#17101](https://github.com/opensearch-project/OpenSearch/pull/17101)) - Fix illegal argument exception when creating a PIT ([#16781](https://github.com/opensearch-project/OpenSearch/pull/16781)) +- Fix NPE in node stats due to QueryGroupTasks ([#17576](https://github.com/opensearch-project/OpenSearch/pull/17576)) ### Security diff --git a/server/src/main/java/org/opensearch/wlm/QueryGroupService.java b/server/src/main/java/org/opensearch/wlm/QueryGroupService.java index 14002a2b38134..4451b3e7b62f4 100644 --- a/server/src/main/java/org/opensearch/wlm/QueryGroupService.java +++ b/server/src/main/java/org/opensearch/wlm/QueryGroupService.java @@ -331,7 +331,7 @@ public Set getDeletedQueryGroups() { public boolean shouldSBPHandle(Task t) { QueryGroupTask task = (QueryGroupTask) t; boolean isInvalidQueryGroupTask = true; - if (!task.getQueryGroupId().equals(QueryGroupTask.DEFAULT_QUERY_GROUP_ID_SUPPLIER.get())) { + if (task.isQueryGroupSet() && !QueryGroupTask.DEFAULT_QUERY_GROUP_ID_SUPPLIER.get().equals(task.getQueryGroupId())) { isInvalidQueryGroupTask = activeQueryGroups.stream() .noneMatch(queryGroup -> queryGroup.get_id().equals(task.getQueryGroupId())); } @@ -340,7 +340,7 @@ public boolean shouldSBPHandle(Task t) { @Override public void onTaskCompleted(Task task) { - if (!(task instanceof QueryGroupTask)) { + if (!(task instanceof QueryGroupTask) || !((QueryGroupTask) task).isQueryGroupSet()) { return; } final QueryGroupTask queryGroupTask = (QueryGroupTask) task; diff --git a/server/src/test/java/org/opensearch/wlm/QueryGroupServiceTests.java b/server/src/test/java/org/opensearch/wlm/QueryGroupServiceTests.java index f22759ce968aa..579d65846f69b 100644 --- a/server/src/test/java/org/opensearch/wlm/QueryGroupServiceTests.java +++ b/server/src/test/java/org/opensearch/wlm/QueryGroupServiceTests.java @@ -395,7 +395,7 @@ public void testRejectIfNeeded_whenFeatureIsNotEnabled() { } public void testOnTaskCompleted() { - Task task = createMockTaskWithResourceStats(SearchTask.class, 100, 200, 0, 12); + Task task = new SearchTask(12, "", "", () -> "", null, null); mockThreadPool = new TestThreadPool("queryGroupServiceTests"); mockThreadPool.getThreadContext().putHeader(QueryGroupTask.QUERY_GROUP_ID_HEADER, "testId"); QueryGroupState queryGroupState = new QueryGroupState(); @@ -442,7 +442,7 @@ public void testOnTaskCompleted() { } public void testShouldSBPHandle() { - QueryGroupTask task = createMockTaskWithResourceStats(SearchTask.class, 100, 200, 0, 12); + SearchTask task = createMockTaskWithResourceStats(SearchTask.class, 100, 200, 0, 12); QueryGroupState queryGroupState = new QueryGroupState(); Set activeQueryGroups = new HashSet<>(); mockQueryGroupStateMap.put("testId", queryGroupState); @@ -464,6 +464,8 @@ public void testShouldSBPHandle() { mockThreadPool = new TestThreadPool("queryGroupServiceTests"); mockThreadPool.getThreadContext() .putHeader(QueryGroupTask.QUERY_GROUP_ID_HEADER, QueryGroupTask.DEFAULT_QUERY_GROUP_ID_SUPPLIER.get()); + // we haven't set the queryGroupId yet SBP should still track the task for cancellation + assertTrue(queryGroupService.shouldSBPHandle(task)); task.setQueryGroupId(mockThreadPool.getThreadContext()); assertTrue(queryGroupService.shouldSBPHandle(task)); @@ -490,6 +492,15 @@ public void testShouldSBPHandle() { ); assertTrue(queryGroupService.shouldSBPHandle(task)); + mockThreadPool.shutdownNow(); + + // test the case when SBP should not track the task + when(mockWorkloadManagementSettings.getWlmMode()).thenReturn(WlmMode.ENABLED); + task = new SearchTask(1, "", "test", () -> "", null, null); + mockThreadPool = new TestThreadPool("queryGroupServiceTests"); + mockThreadPool.getThreadContext().putHeader(QueryGroupTask.QUERY_GROUP_ID_HEADER, "testId"); + task.setQueryGroupId(mockThreadPool.getThreadContext()); + assertFalse(queryGroupService.shouldSBPHandle(task)); } private static Set getActiveQueryGroups( From 444df2cf548b1378009a1dd9ea932de4b0157caf Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Sun, 16 Mar 2025 11:09:30 -0400 Subject: [PATCH 073/550] Update tj-actions/changed-files usage from the Github Action checks since it has been compromised (#17597) Signed-off-by: Andriy Redko --- .github/workflows/gradle-check.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index 577ab0c79535b..4b79bd8310c76 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -24,7 +24,7 @@ jobs: - uses: actions/checkout@v4 - name: Get changed files id: changed-files-specific - uses: tj-actions/changed-files@v45 + uses: tj-actions/changed-files@v46.0.1 with: files_ignore: | release-notes/*.md From f224a6d769e446ed6dcb58bd8aa10dd992404103 Mon Sep 17 00:00:00 2001 From: Gaurav Bafna <85113518+gbbafna@users.noreply.github.com> Date: Mon, 17 Mar 2025 10:50:09 +0530 Subject: [PATCH 074/550] Give more time for relocation to complete (#17590) Signed-off-by: Gaurav Bafna --- .../opensearch/remotemigration/RemotePrimaryRelocationIT.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemotePrimaryRelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemotePrimaryRelocationIT.java index c32d3520e83cf..4cf624ec8da3e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemotePrimaryRelocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemotePrimaryRelocationIT.java @@ -272,7 +272,7 @@ public void testMixedModeRelocation_FailInFinalize() throws Exception { failFinalize.set(false); client().admin().cluster().prepareReroute().add(new MoveAllocationCommand("test", 0, docRepNode, remoteNode)).execute().actionGet(); - waitForRelocation(); + waitForRelocation(TimeValue.timeValueSeconds(90)); asyncIndexingService.stopIndexing(); client().admin() From e27cf58335ee9cf0f8b1a5811128732ed4ce6bcb Mon Sep 17 00:00:00 2001 From: mayank sharma Date: Mon, 17 Mar 2025 14:27:22 +0530 Subject: [PATCH 075/550] Added warm settings and updated nomenclature to differentiate between current tiering implementation for warm and upcoming hot implementation (#17490) Signed-off-by: Mayank Sharma --- CHANGELOG-3.0.md | 1 + .../WarmIndexSegmentReplicationIT.java | 2 +- .../opensearch/remotestore/WritableWarmIT.java | 16 ++++++++++------ .../org/opensearch/action/ActionModule.java | 4 ++-- .../admin/indices/tiering/TieringUtils.java | 17 ++++++++--------- .../cluster/routing/OperationRouting.java | 2 +- .../opensearch/cluster/routing/RoutingPool.java | 7 +++++-- .../allocator/LocalShardsBalancer.java | 4 ++-- .../allocator/RemoteShardsBalancer.java | 4 ++-- .../common/settings/FeatureFlagSettings.java | 2 +- .../common/settings/IndexScopedSettings.java | 5 +++-- .../opensearch/common/util/FeatureFlags.java | 12 ++++++++---- .../java/org/opensearch/index/IndexModule.java | 4 +++- .../java/org/opensearch/index/IndexService.java | 4 ++-- .../org/opensearch/index/IndexSettings.java | 16 ++++++++-------- .../index/engine/NRTReplicationEngine.java | 4 ++-- .../org/opensearch/index/shard/IndexShard.java | 4 ++-- .../replication/SegmentReplicationTarget.java | 2 +- .../TransportHotToWarmTieringActionTests.java | 2 +- .../cluster/routing/OperationRoutingTests.java | 4 ++-- .../ShardsTieringAllocationTests.java | 7 +++++-- .../TieringAllocationBaseTestCase.java | 2 ++ 22 files changed, 72 insertions(+), 53 deletions(-) diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index 5a3b2ce3a273e..05bc03ab28b83 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -25,6 +25,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add filter function for AbstractQueryBuilder, BoolQueryBuilder, ConstantScoreQueryBuilder([#17409](https://github.com/opensearch-project/OpenSearch/pull/17409)) - [Star Tree] [Search] Resolving keyword & numeric bucket aggregation with metric aggregation using star-tree ([#17165](https://github.com/opensearch-project/OpenSearch/pull/17165)) - Added error handling support for the pull-based ingestion ([#17427](https://github.com/opensearch-project/OpenSearch/pull/17427)) +- Added Warm index setting and Updated nomenclature to differentiate between hot and warm tiering implementation ([#17490](https://github.com/opensearch-project/OpenSearch/pull/17490)) ### Dependencies diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/WarmIndexSegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/WarmIndexSegmentReplicationIT.java index d7f1c2209f798..a50ec7a0d0fa1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/WarmIndexSegmentReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/WarmIndexSegmentReplicationIT.java @@ -150,7 +150,7 @@ protected Settings nodeSettings(int nodeOrdinal) { @Override protected Settings featureFlagSettings() { Settings.Builder featureSettings = Settings.builder(); - featureSettings.put(FeatureFlags.TIERED_REMOTE_INDEX, true); + featureSettings.put(FeatureFlags.WRITABLE_WARM_INDEX_EXPERIMENTAL_FLAG, true); return featureSettings.build(); } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/WritableWarmIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/WritableWarmIT.java index 88c9ae436e85f..9050b7eff008d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/WritableWarmIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/WritableWarmIT.java @@ -63,7 +63,7 @@ protected boolean addMockIndexStorePlugin() { @Override protected Settings featureFlagSettings() { Settings.Builder featureSettings = Settings.builder(); - featureSettings.put(FeatureFlags.TIERED_REMOTE_INDEX, true); + featureSettings.put(FeatureFlags.WRITABLE_WARM_INDEX_EXPERIMENTAL_FLAG, true); return featureSettings.build(); } @@ -77,7 +77,11 @@ protected Settings nodeSettings(int nodeOrdinal) { } public void testWritableWarmFeatureFlagDisabled() { - Settings clusterSettings = Settings.builder().put(super.nodeSettings(0)).put(FeatureFlags.TIERED_REMOTE_INDEX, false).build(); + Settings clusterSettings = Settings.builder() + .put(super.nodeSettings(0)) + .put(FeatureFlags.WRITABLE_WARM_INDEX_EXPERIMENTAL_FLAG, false) + .build(); + InternalTestCluster internalTestCluster = internalCluster(); internalTestCluster.startClusterManagerOnlyNode(clusterSettings); internalTestCluster.startDataAndSearchNodes(1); @@ -85,7 +89,7 @@ public void testWritableWarmFeatureFlagDisabled() { Settings indexSettings = Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexModule.INDEX_STORE_LOCALITY_SETTING.getKey(), IndexModule.DataLocalityType.PARTIAL.name()) + .put(IndexModule.IS_WARM_INDEX_SETTING.getKey(), false) .build(); try { @@ -94,7 +98,7 @@ public void testWritableWarmFeatureFlagDisabled() { } catch (SettingsException ex) { assertEquals( "unknown setting [" - + IndexModule.INDEX_STORE_LOCALITY_SETTING.getKey() + + IndexModule.IS_WARM_INDEX_SETTING.getKey() + "] please check that any required plugins are installed, or check the " + "breaking changes documentation for removed settings", ex.getMessage() @@ -109,7 +113,7 @@ public void testWritableWarmBasic() throws Exception { Settings settings = Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexModule.INDEX_STORE_LOCALITY_SETTING.getKey(), IndexModule.DataLocalityType.PARTIAL.name()) + .put(IndexModule.IS_WARM_INDEX_SETTING.getKey(), true) .build(); assertAcked(client().admin().indices().prepareCreate(INDEX_NAME).setSettings(settings).get()); @@ -119,7 +123,7 @@ public void testWritableWarmBasic() throws Exception { .getIndex(new GetIndexRequest().indices(INDEX_NAME).includeDefaults(true)) .get(); Settings indexSettings = getIndexResponse.settings().get(INDEX_NAME); - assertEquals(IndexModule.DataLocalityType.PARTIAL.name(), indexSettings.get(IndexModule.INDEX_STORE_LOCALITY_SETTING.getKey())); + assertTrue(indexSettings.getAsBoolean(IndexModule.IS_WARM_INDEX_SETTING.getKey(), false)); // Ingesting some docs indexBulk(INDEX_NAME, NUM_DOCS_IN_BULK); diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index baf15d25dbfd4..2a8a675c6fcea 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -654,7 +654,7 @@ public void reg actions.register(CreateSnapshotAction.INSTANCE, TransportCreateSnapshotAction.class); actions.register(CloneSnapshotAction.INSTANCE, TransportCloneSnapshotAction.class); actions.register(RestoreSnapshotAction.INSTANCE, TransportRestoreSnapshotAction.class); - if (FeatureFlags.isEnabled(FeatureFlags.TIERED_REMOTE_INDEX)) { + if (FeatureFlags.isEnabled(FeatureFlags.WRITABLE_WARM_INDEX_EXPERIMENTAL_FLAG)) { actions.register(HotToWarmTieringAction.INSTANCE, TransportHotToWarmTieringAction.class); } actions.register(SnapshotsStatusAction.INSTANCE, TransportSnapshotsStatusAction.class); @@ -996,7 +996,7 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestNodeAttrsAction()); registerHandler.accept(new RestRepositoriesAction()); registerHandler.accept(new RestSnapshotAction()); - if (FeatureFlags.isEnabled(FeatureFlags.TIERED_REMOTE_INDEX)) { + if (FeatureFlags.isEnabled(FeatureFlags.WRITABLE_WARM_INDEX_EXPERIMENTAL_FLAG)) { registerHandler.accept(new RestWarmTieringAction()); } registerHandler.accept(new RestTemplatesAction()); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/tiering/TieringUtils.java b/server/src/main/java/org/opensearch/action/admin/indices/tiering/TieringUtils.java index 46912de17f213..5c77f1091c1c6 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/tiering/TieringUtils.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/tiering/TieringUtils.java @@ -22,26 +22,25 @@ public class TieringUtils { /** * Checks if the specified shard is a partial shard by - * checking the INDEX_STORE_LOCALITY_SETTING for its index. - * see {@link #isPartialIndex(IndexMetadata)} + * checking the WARM_INDEX_ENABLED_SETTING for its index. + * see {@link #isWarmIndex(IndexMetadata)} (IndexMetadata)} * @param shard ShardRouting object representing the shard * @param allocation RoutingAllocation object representing the allocation * @return true if the shard is a partial shard, false otherwise */ public static boolean isPartialShard(ShardRouting shard, RoutingAllocation allocation) { IndexMetadata indexMetadata = allocation.metadata().getIndexSafe(shard.index()); - return isPartialIndex(indexMetadata); + return isWarmIndex(indexMetadata); } /** - * Checks if the specified index is a partial index by - * checking the INDEX_STORE_LOCALITY_SETTING for the index. + * Checks if the specified index is a warm index by + * checking the WARM_INDEX_ENABLED_SETTING for the index. * * @param indexMetadata the metadata of the index - * @return true if the index is a partial index, false otherwise + * @return true if the index is a warm index, false otherwise */ - public static boolean isPartialIndex(final IndexMetadata indexMetadata) { - return IndexModule.DataLocalityType.PARTIAL.name() - .equals(indexMetadata.getSettings().get(IndexModule.INDEX_STORE_LOCALITY_SETTING.getKey())); + public static boolean isWarmIndex(final IndexMetadata indexMetadata) { + return indexMetadata.getSettings().getAsBoolean(IndexModule.IS_WARM_INDEX_SETTING.getKey(), false); } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java b/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java index eac6f41acde4c..9f23ba3a01539 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java +++ b/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java @@ -256,7 +256,7 @@ public GroupShardsIterator searchShards( preference = Preference.PRIMARY.type(); } - if (FeatureFlags.isEnabled(FeatureFlags.TIERED_REMOTE_INDEX) + if (FeatureFlags.isEnabled(FeatureFlags.WRITABLE_WARM_INDEX_EXPERIMENTAL_FLAG) && IndexModule.DataLocalityType.PARTIAL.name() .equals(indexMetadataForShard.getSettings().get(IndexModule.INDEX_STORE_LOCALITY_SETTING.getKey())) && (preference == null || preference.isEmpty())) { diff --git a/server/src/main/java/org/opensearch/cluster/routing/RoutingPool.java b/server/src/main/java/org/opensearch/cluster/routing/RoutingPool.java index 647e993339476..53788c6559ee5 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RoutingPool.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RoutingPool.java @@ -13,7 +13,7 @@ import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.common.util.FeatureFlags; -import static org.opensearch.action.admin.indices.tiering.TieringUtils.isPartialIndex; +import static org.opensearch.action.admin.indices.tiering.TieringUtils.isWarmIndex; /** * {@link RoutingPool} defines the different node types based on the assigned capabilities. The methods @@ -62,6 +62,9 @@ public static RoutingPool getShardPool(ShardRouting shard, RoutingAllocation all */ public static RoutingPool getIndexPool(IndexMetadata indexMetadata) { return indexMetadata.isRemoteSnapshot() - || (FeatureFlags.isEnabled(FeatureFlags.TIERED_REMOTE_INDEX) && isPartialIndex(indexMetadata)) ? REMOTE_CAPABLE : LOCAL_ONLY; + || (FeatureFlags.isEnabled(FeatureFlags.WRITABLE_WARM_INDEX_EXPERIMENTAL_FLAG) && isWarmIndex(indexMetadata)) + ? REMOTE_CAPABLE + : LOCAL_ONLY; + } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java index 7f6a7790d1db0..f6e3e94e9b9a6 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java @@ -561,7 +561,7 @@ private void checkAndAddInEligibleTargetNode(RoutingNode targetNode) { */ private boolean canShardBeSkipped(ShardRouting shardRouting) { return (RoutingPool.REMOTE_CAPABLE.equals(RoutingPool.getShardPool(shardRouting, allocation)) - && !(FeatureFlags.isEnabled(FeatureFlags.TIERED_REMOTE_INDEX) && isPartialShard(shardRouting, allocation))); + && !(FeatureFlags.isEnabled(FeatureFlags.WRITABLE_WARM_INDEX_EXPERIMENTAL_FLAG) && isPartialShard(shardRouting, allocation))); } /** @@ -771,7 +771,7 @@ private Map buildModelFromAssigned() assert rn.nodeId().equals(shard.currentNodeId()); /* we skip relocating shards here since we expect an initializing shard with the same id coming in */ if ((RoutingPool.LOCAL_ONLY.equals(RoutingPool.getShardPool(shard, allocation)) - || (FeatureFlags.isEnabled(FeatureFlags.TIERED_REMOTE_INDEX) && isPartialShard(shard, allocation))) + || (FeatureFlags.isEnabled(FeatureFlags.WRITABLE_WARM_INDEX_EXPERIMENTAL_FLAG) && isPartialShard(shard, allocation))) && shard.state() != RELOCATING) { node.addShard(shard); ++totalShardCount; diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/RemoteShardsBalancer.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/RemoteShardsBalancer.java index e0543b396728e..5bd663826ea5f 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/RemoteShardsBalancer.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/RemoteShardsBalancer.java @@ -33,7 +33,7 @@ import java.util.Queue; import java.util.Set; -import static org.opensearch.action.admin.indices.tiering.TieringUtils.isPartialIndex; +import static org.opensearch.action.admin.indices.tiering.TieringUtils.isWarmIndex; /** * A {@link RemoteShardsBalancer} used by the {@link BalancedShardsAllocator} to perform allocation operations @@ -348,7 +348,7 @@ private void unassignIgnoredRemoteShards(RoutingAllocation routingAllocation) { // to re-fetch any shard blocks from the repository. if (shard.primary()) { if (RecoverySource.Type.SNAPSHOT.equals(shard.recoverySource().getType()) == false - && isPartialIndex(allocation.metadata().getIndexSafe(shard.index())) == false) { + && isWarmIndex(allocation.metadata().getIndexSafe(shard.index())) == false) { unassignedShard = shard.updateUnassigned(shard.unassignedInfo(), RecoverySource.EmptyStoreRecoverySource.INSTANCE); } } diff --git a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java index b1d13b1ae8693..ba35a8bd1133a 100644 --- a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java @@ -33,7 +33,7 @@ protected FeatureFlagSettings( FeatureFlags.EXTENSIONS_SETTING, FeatureFlags.TELEMETRY_SETTING, FeatureFlags.DATETIME_FORMATTER_CACHING_SETTING, - FeatureFlags.TIERED_REMOTE_INDEX_SETTING, + FeatureFlags.WRITABLE_WARM_INDEX_SETTING, FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING, FeatureFlags.STAR_TREE_INDEX_SETTING, FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES_SETTING, diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index 12bee5cd14f57..14cd7479866d2 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -291,8 +291,9 @@ public final class IndexScopedSettings extends AbstractScopedSettings { * setting should be moved to {@link #BUILT_IN_INDEX_SETTINGS}. */ public static final Map> FEATURE_FLAGGED_INDEX_SETTINGS = Map.of( - FeatureFlags.TIERED_REMOTE_INDEX, - List.of(IndexModule.INDEX_STORE_LOCALITY_SETTING, IndexModule.INDEX_TIERING_STATE), + FeatureFlags.WRITABLE_WARM_INDEX_EXPERIMENTAL_FLAG, + // TODO: Create a separate feature flag for hot tiering index state. + List.of(IndexModule.INDEX_STORE_LOCALITY_SETTING, IndexModule.INDEX_TIERING_STATE, IndexModule.IS_WARM_INDEX_SETTING), FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL, List.of(IndexMetadata.INDEX_NUMBER_OF_SEARCH_REPLICAS_SETTING) ); diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java index f0b26d562c52b..4ff81cf0c1c96 100644 --- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -51,10 +51,10 @@ public class FeatureFlags { public static final String DATETIME_FORMATTER_CACHING = "opensearch.experimental.optimization.datetime_formatter_caching.enabled"; /** - * Gates the functionality of remote index having the capability to move across different tiers + * Gates the functionality of warm index having the capability to store data remotely. * Once the feature is ready for release, this feature flag can be removed. */ - public static final String TIERED_REMOTE_INDEX = "opensearch.experimental.feature.tiered_remote_index.enabled"; + public static final String WRITABLE_WARM_INDEX_EXPERIMENTAL_FLAG = "opensearch.experimental.feature.writable_warm_index.enabled"; /** * Gates the functionality of background task execution. @@ -79,7 +79,11 @@ public class FeatureFlags { Property.NodeScope ); - public static final Setting TIERED_REMOTE_INDEX_SETTING = Setting.boolSetting(TIERED_REMOTE_INDEX, false, Property.NodeScope); + public static final Setting WRITABLE_WARM_INDEX_SETTING = Setting.boolSetting( + WRITABLE_WARM_INDEX_EXPERIMENTAL_FLAG, + false, + Property.NodeScope + ); public static final Setting READER_WRITER_SPLIT_EXPERIMENTAL_SETTING = Setting.boolSetting( READER_WRITER_SPLIT_EXPERIMENTAL, @@ -128,7 +132,7 @@ public class FeatureFlags { EXTENSIONS_SETTING, TELEMETRY_SETTING, DATETIME_FORMATTER_CACHING_SETTING, - TIERED_REMOTE_INDEX_SETTING, + WRITABLE_WARM_INDEX_SETTING, STAR_TREE_INDEX_SETTING, APPLICATION_BASED_CONFIGURATION_TEMPLATES_SETTING, READER_WRITER_SPLIT_EXPERIMENTAL_SETTING, diff --git a/server/src/main/java/org/opensearch/index/IndexModule.java b/server/src/main/java/org/opensearch/index/IndexModule.java index 7016ddb8e59b8..961b77ac20c5e 100644 --- a/server/src/main/java/org/opensearch/index/IndexModule.java +++ b/server/src/main/java/org/opensearch/index/IndexModule.java @@ -145,7 +145,7 @@ public final class IndexModule { ); /** - * Index setting which used to determine how the data is cached locally fully or partially + * Index setting which used to determine how the data is cached locally fully or partially. */ public static final Setting INDEX_STORE_LOCALITY_SETTING = new Setting<>( "index.store.data_locality", @@ -155,6 +155,8 @@ public final class IndexModule { Property.NodeScope ); + public static final Setting IS_WARM_INDEX_SETTING = Setting.boolSetting("index.warm", false, Property.IndexScope); + public static final Setting INDEX_RECOVERY_TYPE_SETTING = new Setting<>( "index.recovery.type", "", diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index e265ce3590121..0696058e86f08 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -652,9 +652,9 @@ protected void closeInternal() { } Directory directory = null; - if (FeatureFlags.isEnabled(FeatureFlags.TIERED_REMOTE_INDEX_SETTING) && + if (FeatureFlags.isEnabled(FeatureFlags.WRITABLE_WARM_INDEX_SETTING) && // TODO : Need to remove this check after support for hot indices is added in Composite Directory - this.indexSettings.isStoreLocalityPartial()) { + this.indexSettings.isWarmIndex()) { Directory localDirectory = directoryFactory.newDirectory(this.indexSettings, path); directory = new CompositeDirectory(localDirectory, remoteDirectory, fileCache); } else { diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 554e99764c1a1..38604ffd8bf8f 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -790,7 +790,8 @@ public static IndexMergePolicy fromString(String text) { private final int numberOfShards; private final ReplicationType replicationType; private volatile boolean isRemoteStoreEnabled; - private final boolean isStoreLocalityPartial; + // For warm index we would partially store files in local. + private final boolean isWarmIndex; private volatile TimeValue remoteTranslogUploadBufferInterval; private volatile String remoteStoreTranslogRepository; private volatile String remoteStoreRepository; @@ -994,10 +995,9 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti numberOfShards = settings.getAsInt(IndexMetadata.SETTING_NUMBER_OF_SHARDS, null); replicationType = IndexMetadata.INDEX_REPLICATION_TYPE_SETTING.get(settings); isRemoteStoreEnabled = settings.getAsBoolean(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, false); - isStoreLocalityPartial = settings.get( - IndexModule.INDEX_STORE_LOCALITY_SETTING.getKey(), - IndexModule.DataLocalityType.FULL.toString() - ).equalsIgnoreCase(IndexModule.DataLocalityType.PARTIAL.toString()); + + isWarmIndex = settings.getAsBoolean(IndexModule.IS_WARM_INDEX_SETTING.getKey(), false); + remoteStoreTranslogRepository = settings.get(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY); remoteTranslogUploadBufferInterval = INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(settings); remoteStoreRepository = settings.get(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY); @@ -1372,10 +1372,10 @@ public String getRemoteStoreTranslogRepository() { } /** - * Returns true if the store locality is partial + * Returns true if the index is writable warm index and has partial store locality. */ - public boolean isStoreLocalityPartial() { - return isStoreLocalityPartial; + public boolean isWarmIndex() { + return isWarmIndex; } /** diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java index 7f3010ff0937a..80e24fa0c5a7e 100644 --- a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java @@ -370,7 +370,7 @@ public void flush(boolean force, boolean waitIfOngoing) throws EngineException { ensureOpen(); // Skip flushing for indices with partial locality (warm indices) // For these indices, we don't need to commit as we will sync from the remote store on re-open - if (engineConfig.getIndexSettings().isStoreLocalityPartial()) { + if (engineConfig.getIndexSettings().isWarmIndex()) { return; } // readLock is held here to wait/block any concurrent close that acquires the writeLock. @@ -447,7 +447,7 @@ protected final void closeNoLock(String reason, CountDownLatch closedLatch) { latestSegmentInfos.changed(); } try { - if (engineConfig.getIndexSettings().isStoreLocalityPartial() == false) { + if (engineConfig.getIndexSettings().isWarmIndex() == false) { commitSegmentInfos(latestSegmentInfos); } } catch (IOException e) { diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 173268997895e..f861dbdab360b 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -5142,7 +5142,7 @@ public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal, final Runn } else { storeDirectory = store.directory(); } - if (indexSettings.isStoreLocalityPartial() == false) { + if (indexSettings.isWarmIndex() == false) { copySegmentFiles(storeDirectory, remoteDirectory, null, uploadedSegments, overrideLocal, onFileSync); } @@ -5160,7 +5160,7 @@ public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal, final Runn } } assert Arrays.stream(store.directory().listAll()).filter(f -> f.startsWith(IndexFileNames.SEGMENTS)).findAny().isEmpty() - || indexSettings.isStoreLocalityPartial() : "There should not be any segments file in the dir"; + || indexSettings.isWarmIndex() : "There should not be any segments file in the dir"; store.commitSegmentInfos(infosSnapshot, processedLocalCheckpoint, processedLocalCheckpoint); } syncSegmentSuccess = true; diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java index 6922ade22b714..550be9fb12965 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java @@ -206,7 +206,7 @@ private List getFiles(CheckpointInfoResponse checkpointInfo) // Return an empty list for warm indices, In this case, replica shards don't require downloading files from remote storage // as replicas will sync all files from remote in case of failure. - if (indexShard.indexSettings().isStoreLocalityPartial()) { + if (indexShard.indexSettings().isWarmIndex()) { return Collections.emptyList(); } final Store.RecoveryDiff diff = Store.segmentReplicationDiff(checkpointInfo.getMetadataMap(), indexShard.getSegmentMetadataMap()); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/tiering/TransportHotToWarmTieringActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/tiering/TransportHotToWarmTieringActionTests.java index 10273366af804..ec0c1a8ebe3b9 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/tiering/TransportHotToWarmTieringActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/tiering/TransportHotToWarmTieringActionTests.java @@ -44,7 +44,7 @@ public class TransportHotToWarmTieringActionTests extends OpenSearchIntegTestCas @Override protected Settings featureFlagSettings() { Settings.Builder featureSettings = Settings.builder(); - featureSettings.put(FeatureFlags.TIERED_REMOTE_INDEX, true); + featureSettings.put(FeatureFlags.WRITABLE_WARM_INDEX_EXPERIMENTAL_FLAG, true); return featureSettings.build(); } diff --git a/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java b/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java index 4263e1aa347dc..8cfdcce45c523 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java @@ -1060,7 +1060,7 @@ public void testSearchableSnapshotPrimaryDefault() throws Exception { @SuppressForbidden(reason = "feature flag overrides") public void testPartialIndexPrimaryDefault() throws Exception { - System.setProperty(FeatureFlags.TIERED_REMOTE_INDEX, "true"); + System.setProperty(FeatureFlags.WRITABLE_WARM_INDEX_EXPERIMENTAL_FLAG, "true"); final int numIndices = 1; final int numShards = 2; final int numReplicas = 2; @@ -1116,7 +1116,7 @@ public void testPartialIndexPrimaryDefault() throws Exception { } finally { IOUtils.close(clusterService); terminate(threadPool); - System.setProperty(FeatureFlags.TIERED_REMOTE_INDEX, "false"); + System.setProperty(FeatureFlags.WRITABLE_WARM_INDEX_EXPERIMENTAL_FLAG, "false"); } } diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/ShardsTieringAllocationTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/ShardsTieringAllocationTests.java index 8d45ebd2781b1..765d88f7af360 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/ShardsTieringAllocationTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/ShardsTieringAllocationTests.java @@ -29,7 +29,7 @@ public class ShardsTieringAllocationTests extends TieringAllocationBaseTestCase @Before public void setup() { - FeatureFlagSetter.set(FeatureFlags.TIERED_REMOTE_INDEX); + FeatureFlagSetter.set(FeatureFlags.WRITABLE_WARM_INDEX_EXPERIMENTAL_FLAG); } public void testShardsInLocalPool() { @@ -107,7 +107,10 @@ public void testShardsWithTiering() { public void testShardPoolForPartialIndices() { String index = "test-index"; IndexMetadata indexMetadata = IndexMetadata.builder(index) - .settings(settings(Version.CURRENT).put(INDEX_STORE_LOCALITY_SETTING.getKey(), IndexModule.DataLocalityType.PARTIAL.name())) + .settings( + settings(Version.CURRENT).put(INDEX_STORE_LOCALITY_SETTING.getKey(), IndexModule.DataLocalityType.PARTIAL.name()) + .put(IndexModule.IS_WARM_INDEX_SETTING.getKey(), true) + ) .numberOfShards(PRIMARIES) .numberOfReplicas(REPLICAS) .build(); diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/TieringAllocationBaseTestCase.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/TieringAllocationBaseTestCase.java index aba6fe74e0634..e90c959dc0b18 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/TieringAllocationBaseTestCase.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/TieringAllocationBaseTestCase.java @@ -13,6 +13,7 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.settings.Settings; +import org.opensearch.index.IndexModule; import static org.opensearch.index.IndexModule.INDEX_STORE_LOCALITY_SETTING; import static org.opensearch.index.IndexModule.INDEX_TIERING_STATE; @@ -37,6 +38,7 @@ public ClusterState updateIndexMetadataForTiering( .put(settings) .put(settings) .put(INDEX_TIERING_STATE.getKey(), tieringState) + .put(IndexModule.IS_WARM_INDEX_SETTING.getKey(), true) .put(INDEX_STORE_LOCALITY_SETTING.getKey(), dataLocality) ) ); From 6c0a95b9e1658e3ecb7cabd0cde183c40902f144 Mon Sep 17 00:00:00 2001 From: expani1729 <110471048+expani@users.noreply.github.com> Date: Mon, 17 Mar 2025 08:38:05 -0700 Subject: [PATCH 076/550] Removing references to 2.20.0 (#17605) Signed-off-by: expani --- .ci/bwcVersions | 1 - libs/core/src/main/java/org/opensearch/Version.java | 1 - 2 files changed, 2 deletions(-) diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 2167875966fb2..e43e4c0fc40a6 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -45,4 +45,3 @@ BWC_VERSION: - "2.19.0" - "2.19.1" - "2.19.2" - - "2.20.0" diff --git a/libs/core/src/main/java/org/opensearch/Version.java b/libs/core/src/main/java/org/opensearch/Version.java index 35bd1f2051b04..1fd759494a19a 100644 --- a/libs/core/src/main/java/org/opensearch/Version.java +++ b/libs/core/src/main/java/org/opensearch/Version.java @@ -116,7 +116,6 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_19_0 = new Version(2190099, org.apache.lucene.util.Version.LUCENE_9_12_1); public static final Version V_2_19_1 = new Version(2190199, org.apache.lucene.util.Version.LUCENE_9_12_1); public static final Version V_2_19_2 = new Version(2190299, org.apache.lucene.util.Version.LUCENE_9_12_1); - public static final Version V_2_20_0 = new Version(2200099, org.apache.lucene.util.Version.LUCENE_9_12_1); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_10_1_0); public static final Version CURRENT = V_3_0_0; From 116699864d30238a08f0919c8706a83b0607aa14 Mon Sep 17 00:00:00 2001 From: Varun Bharadwaj Date: Mon, 17 Mar 2025 13:42:30 -0700 Subject: [PATCH 077/550] [Pull-based Ingestion] Add support for dynamically updating ingestion error handling strategy with minor fixes (#17565) * Fix global checkpoint for p2p segrep in ingestion mode Signed-off-by: Varun Bharadwaj * Support updating ingestion error strategy Signed-off-by: Varun Bharadwaj * Handle race condition on calling flush before poller is initialized Signed-off-by: Varun Bharadwaj --------- Signed-off-by: Varun Bharadwaj --- .../plugin/kafka/IngestFromKafkaIT.java | 30 ++++----- .../plugin/kafka/KafkaIngestionBaseIT.java | 31 +++++++++ .../plugin/kafka/RemoteStoreKafkaIT.java | 66 +++++++++++++++---- .../cluster/metadata/IndexMetadata.java | 6 +- .../index/engine/IngestionEngine.java | 29 +++++++- .../opensearch/index/shard/IndexShard.java | 16 ++++- .../BlockIngestionErrorStrategy.java | 4 +- .../pollingingest/DefaultStreamPoller.java | 22 +++++-- .../DropIngestionErrorStrategy.java | 4 +- .../pollingingest/IngestionErrorStrategy.java | 4 +- .../MessageProcessorRunnable.java | 49 ++++++++++---- .../indices/pollingingest/StreamPoller.java | 7 ++ .../DefaultStreamPollerTests.java | 9 ++- 13 files changed, 216 insertions(+), 61 deletions(-) diff --git a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/IngestFromKafkaIT.java b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/IngestFromKafkaIT.java index 6fe670d4d5b62..f890d913ad8cf 100644 --- a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/IngestFromKafkaIT.java +++ b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/IngestFromKafkaIT.java @@ -19,6 +19,7 @@ import org.opensearch.indices.pollingingest.PollingIngestStats; import org.opensearch.plugins.PluginInfo; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.transport.client.Requests; import org.junit.Assert; import java.util.List; @@ -56,27 +57,14 @@ public void testPluginsAreInstalled() { public void testKafkaIngestion() { produceData("1", "name1", "24"); produceData("2", "name2", "20"); - - createIndex( - "test", - Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put("ingestion_source.type", "kafka") - .put("ingestion_source.pointer.init.reset", "earliest") - .put("ingestion_source.param.topic", "test") - .put("ingestion_source.param.bootstrap_servers", kafka.getBootstrapServers()) - .put("index.replication.type", "SEGMENT") - .build(), - "{\"properties\":{\"name\":{\"type\": \"text\"},\"age\":{\"type\": \"integer\"}}}}" - ); + createIndexWithDefaultSettings(1, 0); RangeQueryBuilder query = new RangeQueryBuilder("age").gte(21); await().atMost(10, TimeUnit.SECONDS).untilAsserted(() -> { - refresh("test"); - SearchResponse response = client().prepareSearch("test").setQuery(query).get(); + refresh(indexName); + SearchResponse response = client().prepareSearch(indexName).setQuery(query).get(); assertThat(response.getHits().getTotalHits().value(), is(1L)); - PollingIngestStats stats = client().admin().indices().prepareStats("test").get().getIndex("test").getShards()[0] + PollingIngestStats stats = client().admin().indices().prepareStats(indexName).get().getIndex(indexName).getShards()[0] .getPollingIngestStats(); assertNotNull(stats); assertThat(stats.getMessageProcessorStats().getTotalProcessedCount(), is(2L)); @@ -135,10 +123,16 @@ public void testKafkaIngestion_RewindByOffset() { ); RangeQueryBuilder query = new RangeQueryBuilder("age").gte(0); - await().atMost(10, TimeUnit.SECONDS).untilAsserted(() -> { + await().atMost(1, TimeUnit.MINUTES).untilAsserted(() -> { refresh("test_rewind_by_offset"); SearchResponse response = client().prepareSearch("test_rewind_by_offset").setQuery(query).get(); assertThat(response.getHits().getTotalHits().value(), is(1L)); }); } + + public void testCloseIndex() throws Exception { + createIndexWithDefaultSettings(1, 0); + ensureGreen(indexName); + client().admin().indices().close(Requests.closeIndexRequest(indexName)).get(); + } } diff --git a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/KafkaIngestionBaseIT.java b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/KafkaIngestionBaseIT.java index 087bc9786872f..e3250fc806679 100644 --- a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/KafkaIngestionBaseIT.java +++ b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/KafkaIngestionBaseIT.java @@ -15,6 +15,8 @@ import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.common.serialization.StringSerializer; import org.opensearch.action.search.SearchResponse; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.After; @@ -25,6 +27,7 @@ import java.util.List; import java.util.Locale; import java.util.Properties; +import java.util.concurrent.Callable; import java.util.concurrent.TimeUnit; import org.testcontainers.containers.KafkaContainer; @@ -108,4 +111,32 @@ protected void waitForSearchableDocs(long docCount, List nodes) throws E } }, 1, TimeUnit.MINUTES); } + + protected void waitForState(Callable checkState) throws Exception { + assertBusy(() -> { + if (checkState.call() == false) { + fail("Provided state requirements not met"); + } + }, 1, TimeUnit.MINUTES); + } + + protected String getSettings(String indexName, String setting) { + return client().admin().indices().prepareGetSettings(indexName).get().getSetting(indexName, setting); + } + + protected void createIndexWithDefaultSettings(int numShards, int numReplicas) { + createIndex( + indexName, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .put("ingestion_source.type", "kafka") + .put("ingestion_source.pointer.init.reset", "earliest") + .put("ingestion_source.param.topic", topicName) + .put("ingestion_source.param.bootstrap_servers", kafka.getBootstrapServers()) + .put("index.replication.type", "SEGMENT") + .build(), + "{\"properties\":{\"name\":{\"type\": \"text\"},\"age\":{\"type\": \"integer\"}}}}" + ); + } } diff --git a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/RemoteStoreKafkaIT.java b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/RemoteStoreKafkaIT.java index a9f818a9ca825..ecd7b72bc8349 100644 --- a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/RemoteStoreKafkaIT.java +++ b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/RemoteStoreKafkaIT.java @@ -16,6 +16,7 @@ import org.opensearch.index.query.RangeQueryBuilder; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.transport.client.Requests; import java.nio.file.Path; import java.util.Arrays; @@ -46,20 +47,7 @@ public void testSegmentReplicationWithRemoteStore() throws Exception { internalCluster().startClusterManagerOnlyNode(); final String nodeA = internalCluster().startDataOnlyNode(); - - createIndex( - indexName, - Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) - .put("ingestion_source.type", "kafka") - .put("ingestion_source.pointer.init.reset", "earliest") - .put("ingestion_source.param.topic", topicName) - .put("ingestion_source.param.bootstrap_servers", kafka.getBootstrapServers()) - .put("index.replication.type", "SEGMENT") - .build(), - mapping - ); + createIndexWithDefaultSettings(1, 1); ensureYellowAndNoInitializingShards(indexName); final String nodeB = internalCluster().startDataOnlyNode(); @@ -117,6 +105,56 @@ public void testSegmentReplicationWithRemoteStore() throws Exception { waitForSearchableDocs(6, Arrays.asList(nodeB, nodeC)); } + public void testCloseIndex() throws Exception { + produceData("1", "name1", "24"); + produceData("2", "name2", "20"); + internalCluster().startClusterManagerOnlyNode(); + final String nodeA = internalCluster().startDataOnlyNode(); + final String nodeB = internalCluster().startDataOnlyNode(); + + createIndexWithDefaultSettings(1, 1); + ensureGreen(indexName); + waitForSearchableDocs(2, Arrays.asList(nodeA, nodeB)); + client().admin().indices().close(Requests.closeIndexRequest(indexName)).get(); + } + + public void testErrorStrategy() throws Exception { + produceData("1", "name1", "25"); + // malformed message + produceData("2", "", ""); + produceData("3", "name3", "25"); + + internalCluster().startClusterManagerOnlyNode(); + final String node = internalCluster().startDataOnlyNode(); + + createIndex( + indexName, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("ingestion_source.type", "kafka") + .put("ingestion_source.error_strategy", "block") + .put("ingestion_source.pointer.init.reset", "earliest") + .put("ingestion_source.param.topic", topicName) + .put("ingestion_source.param.bootstrap_servers", kafka.getBootstrapServers()) + .put("index.replication.type", "SEGMENT") + .build(), + "{\"properties\":{\"name\":{\"type\": \"text\"},\"age\":{\"type\": \"integer\"}}}}" + ); + + ensureGreen(indexName); + waitForState(() -> "block".equalsIgnoreCase(getSettings(indexName, "index.ingestion_source.error_strategy"))); + waitForSearchableDocs(1, Arrays.asList(node)); + + client().admin() + .indices() + .prepareUpdateSettings(indexName) + .setSettings(Settings.builder().put("ingestion_source.error_strategy", "drop")) + .get(); + waitForState(() -> "drop".equalsIgnoreCase(getSettings(indexName, "index.ingestion_source.error_strategy"))); + waitForSearchableDocs(2, Arrays.asList(node)); + } + private void verifyRemoteStoreEnabled(String node) { GetSettingsResponse settingsResponse = client(node).admin().indices().prepareGetSettings(indexName).get(); String remoteStoreEnabled = settingsResponse.getIndexToSettings().get(indexName).get("index.remote_store.enabled"); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index e9bd3b74404b1..f70f136fc9a54 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -771,13 +771,17 @@ public Iterator> settings() { Property.Final ); + /** + * Defines the error strategy for pull-based ingestion. + */ public static final String SETTING_INGESTION_SOURCE_ERROR_STRATEGY = "index.ingestion_source.error_strategy"; public static final Setting INGESTION_SOURCE_ERROR_STRATEGY_SETTING = new Setting<>( SETTING_INGESTION_SOURCE_ERROR_STRATEGY, IngestionErrorStrategy.ErrorStrategy.DROP.name(), IngestionErrorStrategy.ErrorStrategy::parseFromString, (errorStrategy) -> {}, - Property.IndexScope + Property.IndexScope, + Property.Dynamic ); public static final Setting.AffixSetting INGESTION_SOURCE_PARAMS_SETTING = Setting.prefixKeySetting( diff --git a/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java b/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java index b919e15b56211..b35873845165a 100644 --- a/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java @@ -57,7 +57,7 @@ public IngestionEngine(EngineConfig engineConfig, IngestionConsumerFactory inges super(engineConfig); this.ingestionConsumerFactory = Objects.requireNonNull(ingestionConsumerFactory); this.documentMapperForType = engineConfig.getDocumentMapperForTypeSupplier().get(); - + registerDynamicIndexSettingsHandlers(); } /** @@ -215,8 +215,14 @@ protected void commitIndexWriter(final IndexWriter writer, final String translog commitData.put(HISTORY_UUID_KEY, historyUUID); commitData.put(Engine.MIN_RETAINED_SEQNO, Long.toString(softDeletesPolicy.getMinRetainedSeqNo())); - // ingestion engine needs to record batch start pointer - commitData.put(StreamPoller.BATCH_START, streamPoller.getBatchStartPointer().asString()); + /* + * Ingestion engine needs to record batch start pointer. + * Batch start pointer can be null at index creation time, if flush is called before the stream + * poller has been completely initialized. + */ + if (streamPoller.getBatchStartPointer() != null) { + commitData.put(StreamPoller.BATCH_START, streamPoller.getBatchStartPointer().asString()); + } final String currentForceMergeUUID = forceMergeUUID; if (currentForceMergeUUID != null) { commitData.put(FORCE_MERGE_UUID_KEY, currentForceMergeUUID); @@ -304,4 +310,21 @@ protected Map commitDataAsMap() { public PollingIngestStats pollingIngestStats() { return streamPoller.getStats(); } + + private void registerDynamicIndexSettingsHandlers() { + engineConfig.getIndexSettings() + .getScopedSettings() + .addSettingsUpdateConsumer(IndexMetadata.INGESTION_SOURCE_ERROR_STRATEGY_SETTING, this::updateErrorHandlingStrategy); + } + + /** + * Handler for updating ingestion error strategy in the stream poller on dynamic index settings update. + */ + private void updateErrorHandlingStrategy(IngestionErrorStrategy.ErrorStrategy errorStrategy) { + IngestionErrorStrategy updatedIngestionErrorStrategy = IngestionErrorStrategy.create( + errorStrategy, + engineConfig.getIndexSettings().getIndexMetadata().getIngestionSource().getType() + ); + streamPoller.updateErrorStrategy(updatedIngestionErrorStrategy); + } } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index f861dbdab360b..5174a179cdc6b 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -238,6 +238,7 @@ import static org.opensearch.index.seqno.RetentionLeaseActions.RETAIN_ALL; import static org.opensearch.index.seqno.SequenceNumbers.LOCAL_CHECKPOINT_KEY; import static org.opensearch.index.seqno.SequenceNumbers.MAX_SEQ_NO; +import static org.opensearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; import static org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; import static org.opensearch.index.shard.IndexShard.ShardMigrationState.REMOTE_MIGRATING_SEEDED; import static org.opensearch.index.shard.IndexShard.ShardMigrationState.REMOTE_MIGRATING_UNSEEDED; @@ -451,7 +452,7 @@ public IndexShard( aId, indexSettings, primaryTerm, - UNASSIGNED_SEQ_NO, + getInitialGlobalCheckpointForShard(indexSettings), globalCheckpointListeners::globalCheckpointUpdated, threadPool::absoluteTimeInMillis, (retentionLeases, listener) -> retentionLeaseSyncer.sync(shardId, aId, getPendingPrimaryTerm(), retentionLeases, listener), @@ -499,6 +500,19 @@ public boolean shouldCache(Query query) { this.segmentReplicationStatsProvider = segmentReplicationStatsProvider; } + /** + * By default, UNASSIGNED_SEQ_NO is used as the initial global checkpoint for new shard initialization. Ingestion + * source does not track sequence numbers explicitly and hence defaults to NO_OPS_PERFORMED for compatibility. + * + */ + private long getInitialGlobalCheckpointForShard(IndexSettings indexSettings) { + if (indexSettings.getIndexMetadata().useIngestionSource()) { + return NO_OPS_PERFORMED; + } + + return UNASSIGNED_SEQ_NO; + } + public ThreadPool getThreadPool() { return this.threadPool; } diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/BlockIngestionErrorStrategy.java b/server/src/main/java/org/opensearch/indices/pollingingest/BlockIngestionErrorStrategy.java index d0febd0909be2..e342c48632494 100644 --- a/server/src/main/java/org/opensearch/indices/pollingingest/BlockIngestionErrorStrategy.java +++ b/server/src/main/java/org/opensearch/indices/pollingingest/BlockIngestionErrorStrategy.java @@ -30,7 +30,7 @@ public void handleError(Throwable e, ErrorStage stage) { } @Override - public boolean shouldPauseIngestion(Throwable e, ErrorStage stage) { - return true; + public boolean shouldIgnoreError(Throwable e, ErrorStage stage) { + return false; } } diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java b/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java index 6fe010504f1a8..5d0988c3ca0eb 100644 --- a/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java +++ b/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java @@ -42,6 +42,7 @@ public class DefaultStreamPoller implements StreamPoller { private volatile boolean started; private volatile boolean closed; private volatile boolean paused; + private volatile IngestionErrorStrategy errorStrategy; private IngestionShardConsumer consumer; @@ -67,8 +68,6 @@ public class DefaultStreamPoller implements StreamPoller { @Nullable private IngestionShardPointer maxPersistedPointer; - private IngestionErrorStrategy errorStrategy; - public DefaultStreamPoller( IngestionShardPointer startPointer, Set persistedPointers, @@ -231,14 +230,14 @@ protected void startPoll() { logger.error("Error in polling the shard {}: {}", consumer.getShardId(), e); errorStrategy.handleError(e, IngestionErrorStrategy.ErrorStage.POLLING); - if (errorStrategy.shouldPauseIngestion(e, IngestionErrorStrategy.ErrorStage.POLLING)) { - // Blocking error encountered. Pause poller to stop processing remaining updates. - pause(); - } else { + if (errorStrategy.shouldIgnoreError(e, IngestionErrorStrategy.ErrorStage.POLLING)) { // Advance the batch start pointer to ignore the error and continue from next record batchStartPointer = lastSuccessfulPointer == null ? consumer.nextPointer(batchStartPointer) : consumer.nextPointer(lastSuccessfulPointer); + } else { + // Blocking error encountered. Pause poller to stop processing remaining updates. + pause(); } } } @@ -332,4 +331,15 @@ public PollingIngestStats getStats() { public State getState() { return state; } + + @Override + public IngestionErrorStrategy getErrorStrategy() { + return this.errorStrategy; + } + + @Override + public void updateErrorStrategy(IngestionErrorStrategy errorStrategy) { + this.errorStrategy = errorStrategy; + processorRunnable.setErrorStrategy(errorStrategy); + } } diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/DropIngestionErrorStrategy.java b/server/src/main/java/org/opensearch/indices/pollingingest/DropIngestionErrorStrategy.java index 4598bf1248cfd..610718d816230 100644 --- a/server/src/main/java/org/opensearch/indices/pollingingest/DropIngestionErrorStrategy.java +++ b/server/src/main/java/org/opensearch/indices/pollingingest/DropIngestionErrorStrategy.java @@ -30,8 +30,8 @@ public void handleError(Throwable e, ErrorStage stage) { } @Override - public boolean shouldPauseIngestion(Throwable e, ErrorStage stage) { - return false; + public boolean shouldIgnoreError(Throwable e, ErrorStage stage) { + return true; } } diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/IngestionErrorStrategy.java b/server/src/main/java/org/opensearch/indices/pollingingest/IngestionErrorStrategy.java index a6e992a460cc1..930fb69e1534f 100644 --- a/server/src/main/java/org/opensearch/indices/pollingingest/IngestionErrorStrategy.java +++ b/server/src/main/java/org/opensearch/indices/pollingingest/IngestionErrorStrategy.java @@ -25,9 +25,9 @@ public interface IngestionErrorStrategy { void handleError(Throwable e, ErrorStage stage); /** - * Indicates if ingestion must be paused, blocking further writes. + * Indicates if the error should be ignored. */ - boolean shouldPauseIngestion(Throwable e, ErrorStage stage); + boolean shouldIgnoreError(Throwable e, ErrorStage stage); static IngestionErrorStrategy create(ErrorStrategy errorStrategy, String ingestionSource) { switch (errorStrategy) { diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/MessageProcessorRunnable.java b/server/src/main/java/org/opensearch/indices/pollingingest/MessageProcessorRunnable.java index 0ac791e60de5a..2066f348243b8 100644 --- a/server/src/main/java/org/opensearch/indices/pollingingest/MessageProcessorRunnable.java +++ b/server/src/main/java/org/opensearch/indices/pollingingest/MessageProcessorRunnable.java @@ -46,15 +46,15 @@ */ public class MessageProcessorRunnable implements Runnable { private static final Logger logger = LogManager.getLogger(MessageProcessorRunnable.class); + private static final String ID = "_id"; + private static final String OP_TYPE = "_op_type"; + private static final String SOURCE = "_source"; + private static final int WAIT_BEFORE_RETRY_DURATION_MS = 5000; + private volatile IngestionErrorStrategy errorStrategy; private final BlockingQueue> blockingQueue; private final MessageProcessor messageProcessor; private final CounterMetric stats = new CounterMetric(); - private IngestionErrorStrategy errorStrategy; - - private static final String ID = "_id"; - private static final String OP_TYPE = "_op_type"; - private static final String SOURCE = "_source"; /** * Constructor. @@ -223,32 +223,59 @@ private static BytesReference convertToBytes(Object object) throws IOException { return blockingQueue; } + /** + * Polls messages from the blocking queue and processes messages. If message processing fails, the failed message + * is retried indefinitely after a retry wait time, unless a DROP error policy is used to skip the failed message. + */ @Override public void run() { + IngestionShardConsumer.ReadResult readResult = null; + while (!(Thread.currentThread().isInterrupted())) { - IngestionShardConsumer.ReadResult result = null; try { - result = blockingQueue.poll(1000, TimeUnit.MILLISECONDS); + if (readResult == null) { + readResult = blockingQueue.poll(1000, TimeUnit.MILLISECONDS); + } } catch (InterruptedException e) { // TODO: add metric logger.debug("MessageProcessorRunnable poll interruptedException", e); Thread.currentThread().interrupt(); // Restore interrupt status } - if (result != null) { + if (readResult != null) { try { stats.inc(); - messageProcessor.process(result.getMessage(), result.getPointer()); + messageProcessor.process(readResult.getMessage(), readResult.getPointer()); + readResult = null; } catch (Exception e) { errorStrategy.handleError(e, IngestionErrorStrategy.ErrorStage.PROCESSING); - if (errorStrategy.shouldPauseIngestion(e, IngestionErrorStrategy.ErrorStage.PROCESSING)) { - Thread.currentThread().interrupt(); + if (errorStrategy.shouldIgnoreError(e, IngestionErrorStrategy.ErrorStage.PROCESSING)) { + readResult = null; + } else { + waitBeforeRetry(); } } } } } + private void waitBeforeRetry() { + try { + Thread.sleep(WAIT_BEFORE_RETRY_DURATION_MS); + } catch (InterruptedException e) { + logger.debug("MessageProcessor thread interrupted while waiting for retry", e); + Thread.currentThread().interrupt(); // Restore interrupt status + } + } + public CounterMetric getStats() { return stats; } + + public IngestionErrorStrategy getErrorStrategy() { + return this.errorStrategy; + } + + public void setErrorStrategy(IngestionErrorStrategy errorStrategy) { + this.errorStrategy = errorStrategy; + } } diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/StreamPoller.java b/server/src/main/java/org/opensearch/indices/pollingingest/StreamPoller.java index 15e1745433df2..81e2bddfa687b 100644 --- a/server/src/main/java/org/opensearch/indices/pollingingest/StreamPoller.java +++ b/server/src/main/java/org/opensearch/indices/pollingingest/StreamPoller.java @@ -52,6 +52,13 @@ public interface StreamPoller extends Closeable { PollingIngestStats getStats(); + IngestionErrorStrategy getErrorStrategy(); + + /** + * Update the error strategy for the poller. + */ + void updateErrorStrategy(IngestionErrorStrategy errorStrategy); + /** * a state to indicate the current state of the poller */ diff --git a/server/src/test/java/org/opensearch/indices/pollingingest/DefaultStreamPollerTests.java b/server/src/test/java/org/opensearch/indices/pollingingest/DefaultStreamPollerTests.java index 0f0f90f392242..11d130aef469b 100644 --- a/server/src/test/java/org/opensearch/indices/pollingingest/DefaultStreamPollerTests.java +++ b/server/src/test/java/org/opensearch/indices/pollingingest/DefaultStreamPollerTests.java @@ -54,7 +54,6 @@ public class DefaultStreamPollerTests extends OpenSearchTestCase { public void setUp() throws Exception { super.setUp(); messages = new ArrayList<>(); - ; messages.add("{\"_id\":\"1\",\"_source\":{\"name\":\"bob\", \"age\": 24}}".getBytes(StandardCharsets.UTF_8)); messages.add("{\"_id\":\"2\",\"_source\":{\"name\":\"alice\", \"age\": 21}}".getBytes(StandardCharsets.UTF_8)); fakeConsumer = new FakeIngestionSource.FakeIngestionConsumer(messages, 0); @@ -346,4 +345,12 @@ public void testProcessingErrorWithBlockErrorIngestionStrategy() throws TimeoutE // the write to blockingQueue assertEquals(DefaultStreamPoller.State.POLLING, poller.getState()); } + + public void testUpdateErrorStrategy() { + assertTrue(poller.getErrorStrategy() instanceof DropIngestionErrorStrategy); + assertTrue(processorRunnable.getErrorStrategy() instanceof DropIngestionErrorStrategy); + poller.updateErrorStrategy(new BlockIngestionErrorStrategy("ingestion_source")); + assertTrue(poller.getErrorStrategy() instanceof BlockIngestionErrorStrategy); + assertTrue(processorRunnable.getErrorStrategy() instanceof BlockIngestionErrorStrategy); + } } From f58d846f35ebfecaf09ed5fb4b8f9fa1522af26c Mon Sep 17 00:00:00 2001 From: expani1729 <110471048+expani@users.noreply.github.com> Date: Mon, 17 Mar 2025 16:39:39 -0700 Subject: [PATCH 078/550] Add release notes for 3.0.0-alpha1 (#17616) * Add release notes for 3.0.0-alpha1 Signed-off-by: expani * Merged all change logs into 3.0.0-alpha1 release notes and made them empty Signed-off-by: expani * Remove header Signed-off-by: expani * Addressing comments Signed-off-by: expani --------- Signed-off-by: expani --- CHANGELOG-3.0.md | 70 --------- CHANGELOG.md | 32 ----- .../opensearch.release-notes-3.0.0-alpha1.md | 134 ++++++++++++++++++ 3 files changed, 134 insertions(+), 102 deletions(-) create mode 100644 release-notes/opensearch.release-notes-3.0.0-alpha1.md diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index 05bc03ab28b83..f301c4669de96 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -5,86 +5,16 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 3.0] ### Added -- Support for HTTP/2 (server-side) ([#3847](https://github.com/opensearch-project/OpenSearch/pull/3847)) -- Allow mmap to use new JDK-19 preview APIs in Apache Lucene 9.4+ ([#5151](https://github.com/opensearch-project/OpenSearch/pull/5151)) -- Add events correlation engine plugin ([#6854](https://github.com/opensearch-project/OpenSearch/issues/6854)) -- Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679), [#10664](https://github.com/opensearch-project/OpenSearch/pull/10664)) -- Provide service accounts tokens to extensions ([#9618](https://github.com/opensearch-project/OpenSearch/pull/9618)) -- GHA to verify checklist items completion in PR descriptions ([#10800](https://github.com/opensearch-project/OpenSearch/pull/10800)) -- [WLM] Add WLM support for search scroll API ([#16981](https://github.com/opensearch-project/OpenSearch/pull/16981)) -- Allow to pass the list settings through environment variables (like [], ["a", "b", "c"], ...) ([#10625](https://github.com/opensearch-project/OpenSearch/pull/10625)) -- Views, simplify data access and manipulation by providing a virtual layer over one or more indices ([#11957](https://github.com/opensearch-project/OpenSearch/pull/11957)) -- Add systemd configurations to strengthen OS core security ([#17107](https://github.com/opensearch-project/OpenSearch/pull/17107)) -- Added pull-based Ingestion (APIs, for ingestion source, a Kafka plugin, and IngestionEngine that pulls data from the ingestion source) ([#16958](https://github.com/opensearch-project/OpenSearch/pull/16958)) -- Added ConfigurationUtils to core for the ease of configuration parsing [#17223](https://github.com/opensearch-project/OpenSearch/pull/17223) -- Add cluster and index level settings to limit the total primary shards per node and per index [#17295](https://github.com/opensearch-project/OpenSearch/pull/17295) -- Add execution_hint to cardinality aggregator request (#[17312](https://github.com/opensearch-project/OpenSearch/pull/17312)) -- Arrow Flight RPC plugin with Flight server bootstrap logic and client for internode communication ([#16962](https://github.com/opensearch-project/OpenSearch/pull/16962)) -- Added offset management for the pull-based Ingestion ([#17354](https://github.com/opensearch-project/OpenSearch/pull/17354)) -- Added integ tests for systemd configs ([#17410](https://github.com/opensearch-project/OpenSearch/pull/17410)) -- Add filter function for AbstractQueryBuilder, BoolQueryBuilder, ConstantScoreQueryBuilder([#17409](https://github.com/opensearch-project/OpenSearch/pull/17409)) -- [Star Tree] [Search] Resolving keyword & numeric bucket aggregation with metric aggregation using star-tree ([#17165](https://github.com/opensearch-project/OpenSearch/pull/17165)) -- Added error handling support for the pull-based ingestion ([#17427](https://github.com/opensearch-project/OpenSearch/pull/17427)) -- Added Warm index setting and Updated nomenclature to differentiate between hot and warm tiering implementation ([#17490](https://github.com/opensearch-project/OpenSearch/pull/17490)) - ### Dependencies -- Update Apache Lucene to 10.1.0 ([#16366](https://github.com/opensearch-project/OpenSearch/pull/16366)) -- Bump Apache HttpCore5/HttpClient5 dependencies from 5.2.5/5.3.1 to 5.3.1/5.4.1 to support ExtendedSocketOption in HttpAsyncClient ([#16757](https://github.com/opensearch-project/OpenSearch/pull/16757)) -- Bumps `jetty` version from 9.4.55.v20240627 to 9.4.57.v20241219 -- Switch main/3.x to use JDK21 LTS version ([#17515](https://github.com/opensearch-project/OpenSearch/pull/17515)) ### Changed -- Changed locale provider from COMPAT to CLDR ([#14345](https://github.com/opensearch-project/OpenSearch/pull/14345)) -- Migrate client transports to Apache HttpClient / Core 5.x ([#4459](https://github.com/opensearch-project/OpenSearch/pull/4459)) -- Change http code on create index API with bad input raising NotXContentException from 500 to 400 ([#4773](https://github.com/opensearch-project/OpenSearch/pull/4773)) -- Improve summary error message for invalid setting updates ([#4792](https://github.com/opensearch-project/OpenSearch/pull/4792)) -- Return 409 Conflict HTTP status instead of 503 on failure to concurrently execute snapshots ([#8986](https://github.com/opensearch-project/OpenSearch/pull/5855)) -- Add task completion count in search backpressure stats API ([#10028](https://github.com/opensearch-project/OpenSearch/pull/10028/)) -- Deprecate CamelCase `PathHierarchy` tokenizer name in favor to lowercase `path_hierarchy` ([#10894](https://github.com/opensearch-project/OpenSearch/pull/10894)) -- Breaking change: Do not request "search_pipelines" metrics by default in NodesInfoRequest ([#12497](https://github.com/opensearch-project/OpenSearch/pull/12497)) -- Use simpler matching logic for source fields when explicit field names (no wildcards or dot-paths) are specified ([#17160](https://github.com/opensearch-project/OpenSearch/pull/17160)) -- Refactor `:libs` module `bootstrap` package to eliminate top level split packages for JPMS support ([#17117](https://github.com/opensearch-project/OpenSearch/pull/17117)) -- Refactor the codebase to eliminate top level split packages for JPMS support ([#17153](https://github.com/opensearch-project/OpenSearch/pull/17153) -- Refactor `:server` module `org.apacge.lucene` package to eliminate top level split packages for JPMS support ([#17241](https://github.com/opensearch-project/OpenSearch/pull/17241)) -- Stop minimizing automata used for case-insensitive matches ([#17268](https://github.com/opensearch-project/OpenSearch/pull/17268)) -- Refactor the `:server` module `org.opensearch.client` to `org.opensearch.transport.client` to eliminate top level split packages for JPMS support ([#17272](https://github.com/opensearch-project/OpenSearch/pull/17272)) -- Use Lucene `BM25Similarity` as default since the `LegacyBM25Similarity` is marked as deprecated ([#17306](https://github.com/opensearch-project/OpenSearch/pull/17306)) -- Wildcard field index only 3gram of the input data [#17349](https://github.com/opensearch-project/OpenSearch/pull/17349) -- Use BC libraries to parse PEM files, increase key length, allow general use of known cryptographic binary extensions, remove unused BC dependencies ([#3420](https://github.com/opensearch-project/OpenSearch/pull/14912)) -- Add optional enum set read / write functionality to stream input / output ([#17556](https://github.com/opensearch-project/OpenSearch/pull/17556)) ### Deprecated ### Removed -- Remove deprecated code to add node name into log pattern of log4j property file ([#4568](https://github.com/opensearch-project/OpenSearch/pull/4568)) -- Unused object and import within TransportClusterAllocationExplainAction ([#4639](https://github.com/opensearch-project/OpenSearch/pull/4639)) -- Remove LegacyESVersion.V_7_0_* and V_7_1_* Constants ([#2768](https://https://github.com/opensearch-project/OpenSearch/pull/2768)) -- Remove LegacyESVersion.V_7_2_ and V_7_3_ Constants ([#4702](https://github.com/opensearch-project/OpenSearch/pull/4702)) -- Always auto release the flood stage block ([#4703](https://github.com/opensearch-project/OpenSearch/pull/4703)) -- Remove LegacyESVersion.V_7_4_ and V_7_5_ Constants ([#4704](https://github.com/opensearch-project/OpenSearch/pull/4704)) -- Remove Legacy Version support from Snapshot/Restore Service ([#4728](https://github.com/opensearch-project/OpenSearch/pull/4728)) -- Remove deprecated serialization logic from pipeline aggs ([#4847](https://github.com/opensearch-project/OpenSearch/pull/4847)) -- Remove unused private methods ([#4926](https://github.com/opensearch-project/OpenSearch/pull/4926)) -- Remove LegacyESVersion.V_7_8_ and V_7_9_ Constants ([#4855](https://github.com/opensearch-project/OpenSearch/pull/4855)) -- Remove LegacyESVersion.V_7_6_ and V_7_7_ Constants ([#4837](https://github.com/opensearch-project/OpenSearch/pull/4837)) -- Remove LegacyESVersion.V_7_10_ Constants ([#5018](https://github.com/opensearch-project/OpenSearch/pull/5018)) -- Remove Version.V_1_ Constants ([#5021](https://github.com/opensearch-project/OpenSearch/pull/5021)) -- Remove custom Map, List and Set collection classes ([#6871](https://github.com/opensearch-project/OpenSearch/pull/6871)) -- Remove `index.store.hybrid.mmap.extensions` setting in favor of `index.store.hybrid.nio.extensions` setting ([#9392](https://github.com/opensearch-project/OpenSearch/pull/9392)) -- Remove package org.opensearch.action.support.master ([#4856](https://github.com/opensearch-project/OpenSearch/issues/4856)) -- Remove transport-nio plugin ([#16887](https://github.com/opensearch-project/OpenSearch/issues/16887)) -- Remove deprecated 'gateway' settings used to defer cluster recovery ([#3117](https://github.com/opensearch-project/OpenSearch/issues/3117)) -- Remove FeatureFlags.PLUGGABLE_CACHE as the feature is no longer experimental ([#17344](https://github.com/opensearch-project/OpenSearch/pull/17344)) ### Fixed -- Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827)) -- Fix compression support for h2c protocol ([#4944](https://github.com/opensearch-project/OpenSearch/pull/4944)) -- Don't over-allocate in HeapBufferedAsyncEntityConsumer in order to consume the response ([#9993](https://github.com/opensearch-project/OpenSearch/pull/9993)) -- Fix swapped field formats in nodes API where `total_indexing_buffer_in_bytes` and `total_indexing_buffer` values were reversed ([#17070](https://github.com/opensearch-project/OpenSearch/pull/17070)) -- Add HTTP/2 protocol support to HttpRequest.HttpVersion ([#17248](https://github.com/opensearch-project/OpenSearch/pull/17248)) -- Fix missing bucket in terms aggregation with missing value ([#17418](https://github.com/opensearch-project/OpenSearch/pull/17418)) -- Fix explain action on query rewrite ([#17286](https://github.com/opensearch-project/OpenSearch/pull/17286)) ### Security diff --git a/CHANGELOG.md b/CHANGELOG.md index 90b0131de2509..27a35ca711ed3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,48 +5,16 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added -- Latency and Memory allocation improvements to Multi Term Aggregation queries ([#14993](https://github.com/opensearch-project/OpenSearch/pull/14993)) -- Add logic in master service to optimize performance and retain detailed logging for critical cluster operations. ([#14795](https://github.com/opensearch-project/OpenSearch/pull/14795)) -- Add Setting to adjust the primary constraint weights ([#16471](https://github.com/opensearch-project/OpenSearch/pull/16471)) -- Introduce a setting to disable download of full cluster state from remote on term mismatch([#16798](https://github.com/opensearch-project/OpenSearch/pull/16798/)) -- Added ability to retrieve value from DocValues in a flat_object filed([#16802](https://github.com/opensearch-project/OpenSearch/pull/16802)) -- Improve performace of NumericTermAggregation by avoiding unnecessary sorting([#17252](https://github.com/opensearch-project/OpenSearch/pull/17252)) -- Fix Bug - Handle unsigned long in sorting order assertion of LongHashSet ([#17207](https://github.com/opensearch-project/OpenSearch/pull/17207)) -- Implemented computation of segment replication stats at shard level ([#17055](https://github.com/opensearch-project/OpenSearch/pull/17055)) -- [Rule Based Auto-tagging] Add in-memory attribute value store ([#17342](https://github.com/opensearch-project/OpenSearch/pull/17342)) ### Dependencies -- Bump `org.awaitility:awaitility` from 4.2.0 to 4.3.0 ([#17230](https://github.com/opensearch-project/OpenSearch/pull/17230), [#17439](https://github.com/opensearch-project/OpenSearch/pull/17439)) -- Bump `dnsjava:dnsjava` from 3.6.2 to 3.6.3 ([#17231](https://github.com/opensearch-project/OpenSearch/pull/17231)) -- Bump `com.google.code.gson:gson` from 2.11.0 to 2.12.1 ([#17229](https://github.com/opensearch-project/OpenSearch/pull/17229)) -- Bump `org.jruby.joni:joni` from 2.2.1 to 2.2.3 ([#17136](https://github.com/opensearch-project/OpenSearch/pull/17136)) -- Bump `org.apache.ant:ant` from 1.10.14 to 1.10.15 ([#17288](https://github.com/opensearch-project/OpenSearch/pull/17288)) -- Bump `reactor_netty` from 1.1.26 to 1.1.27 ([#17322](https://github.com/opensearch-project/OpenSearch/pull/17322)) -- Bump `me.champeau.gradle.japicmp` from 0.4.5 to 0.4.6 ([#17375](https://github.com/opensearch-project/OpenSearch/pull/17375)) -- Bump `com.google.api.grpc:proto-google-common-protos` from 2.37.1 to 2.52.0 ([#17379](https://github.com/opensearch-project/OpenSearch/pull/17379)) -- Bump `net.minidev:json-smart` from 2.5.1 to 2.5.2 ([#17378](https://github.com/opensearch-project/OpenSearch/pull/17378)) -- Bump `com.netflix.nebula.ospackage-base` from 11.10.1 to 11.11.1 ([#17374](https://github.com/opensearch-project/OpenSearch/pull/17374)) -- Bump `ch.qos.logback:logback-classic` from 1.5.16 to 1.5.17 ([#17497](https://github.com/opensearch-project/OpenSearch/pull/17497)) -- Bump `software.amazon.awssdk` from 2.20.86 to 2.30.31 ([17396](https://github.com/opensearch-project/OpenSearch/pull/17396)) -- Bump `org.jruby.jcodings:jcodings` from 1.0.61 to 1.0.63 ([#17560](https://github.com/opensearch-project/OpenSearch/pull/17560)) -- Bump `com.azure:azure-storage-blob` from 12.28.1 to 12.29.1 ([#17562](https://github.com/opensearch-project/OpenSearch/pull/17562)) ### Changed -- Convert transport-reactor-netty4 to use gradle version catalog [#17233](https://github.com/opensearch-project/OpenSearch/pull/17233) -- Increase force merge threads to 1/8th of cores [#17255](https://github.com/opensearch-project/OpenSearch/pull/17255) -- TieredSpilloverCache took-time threshold now guards heap tier as well as disk tier [#17190](https://github.com/opensearch-project/OpenSearch/pull/17190) ### Deprecated ### Removed ### Fixed -- Fix visit of inner query for FunctionScoreQueryBuilder ([#16776](https://github.com/opensearch-project/OpenSearch/pull/16776)) -- Fix case insensitive and escaped query on wildcard ([#16827](https://github.com/opensearch-project/OpenSearch/pull/16827)) -- Fix exists queries on nested flat_object fields throws exception ([#16803](https://github.com/opensearch-project/OpenSearch/pull/16803)) -- Add highlighting for wildcard search on `match_only_text` field ([#17101](https://github.com/opensearch-project/OpenSearch/pull/17101)) -- Fix illegal argument exception when creating a PIT ([#16781](https://github.com/opensearch-project/OpenSearch/pull/16781)) -- Fix NPE in node stats due to QueryGroupTasks ([#17576](https://github.com/opensearch-project/OpenSearch/pull/17576)) ### Security diff --git a/release-notes/opensearch.release-notes-3.0.0-alpha1.md b/release-notes/opensearch.release-notes-3.0.0-alpha1.md new file mode 100644 index 0000000000000..405d28f755ac3 --- /dev/null +++ b/release-notes/opensearch.release-notes-3.0.0-alpha1.md @@ -0,0 +1,134 @@ +## 2025-03-17 Version 3.0.0-alpha1 Release Notes + +## [3.0.0] +### Breaking Changes +- Upgrade to Lucene 10.1.0 - PR ([#16366](https://github.com/opensearch-project/OpenSearch/pull/16366)) +- JDK21 as minimum supported Java runtime ([#10745](https://github.com/opensearch-project/OpenSearch/issues/10745)) +- Remove deprecated terms from Java API ([#5214](https://github.com/opensearch-project/OpenSearch/issues/5214)) +- JPMS Support (Eliminate top level split packages ) Phase-0 only ([#8110](https://github.com/opensearch-project/OpenSearch/issues/8110)) +- Add ThreadContextPermission for stashAndMergeHeaders and stashWithOrigin ([#15039](https://github.com/opensearch-project/OpenSearch/pull/15039)) +- Add ThreadContextPermission for markAsSystemContext and allow core to perform the method ([#15016](https://github.com/opensearch-project/OpenSearch/pull/15016)) +- Migrate client transports to Apache HttpClient / Core 5.x ([#4459](https://github.com/opensearch-project/OpenSearch/pull/4459)) +- Validation changes on the Bulk Index API like enforcing 512 byte _id size limit ([#6595](https://github.com/opensearch-project/OpenSearch/issues/6595)) +- Ability to use the node as coordinating node by passing node.roles as empty array ([#3412](https://github.com/opensearch-project/OpenSearch/issues/3412)) +- Treat Setting value with empty array string as empty array ([#10625](https://github.com/opensearch-project/OpenSearch/pull/10625)) +- Ensure Jackson default maximums introduced in 2.16.0 do not conflict with OpenSearch settings ([#11811](https://github.com/opensearch-project/OpenSearch/pull/11811)) +- Setting a maximum depth for nested queries ([#11670](https://github.com/opensearch-project/OpenSearch/pull/11670)) +- Fix interchanged formats of total_indexing_buffer_in_bytes and total_indexing_buffer ([#17070](https://github.com/opensearch-project/OpenSearch/pull/17070)) +- Cleanup deprecated thread pool settings ([#2595](https://github.com/opensearch-project/OpenSearch/issues/2595)) +- Replace "blacklist/whitelist" terminology in Java APIs ([#1683](https://github.com/opensearch-project/OpenSearch/issues/1683)) +- Remove deprecated methods from JodaCompatibleZonedDateTime which are called by scripts ([#3346](https://github.com/opensearch-project/OpenSearch/pull/3346)) +- List of deprecated code removal in 3.0- partially done ([#2773](https://github.com/opensearch-project/OpenSearch/issues/2773)) +- Remove mmap.extensions setting ([#9392](https://github.com/opensearch-project/OpenSearch/pull/9392)) +- Remove COMPAT locale provider ([#13988](https://github.com/opensearch-project/OpenSearch/pull/13988)) +- Remove transport-nio plugin ([#16887](https://github.com/opensearch-project/OpenSearch/issues/16887)) +- Deprecate CamelCase PathHierarchy tokenizer name ([#10894](https://github.com/opensearch-project/OpenSearch/pull/10894)) +- Rename Class ending with Plugin to Module under modules dir ([#4042](https://github.com/opensearch-project/OpenSearch/pull/4042)) + +### Added +- Support for HTTP/2 (server-side) ([#3847](https://github.com/opensearch-project/OpenSearch/pull/3847)) +- Allow mmap to use new JDK-19 preview APIs in Apache Lucene 9.4+ ([#5151](https://github.com/opensearch-project/OpenSearch/pull/5151)) +- Add events correlation engine plugin ([#6854](https://github.com/opensearch-project/OpenSearch/issues/6854)) +- Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679), [#10664](https://github.com/opensearch-project/OpenSearch/pull/10664)) +- Provide service accounts tokens to extensions ([#9618](https://github.com/opensearch-project/OpenSearch/pull/9618)) +- GHA to verify checklist items completion in PR descriptions ([#10800](https://github.com/opensearch-project/OpenSearch/pull/10800)) +- [WLM] Add WLM support for search scroll API ([#16981](https://github.com/opensearch-project/OpenSearch/pull/16981)) +- Views, simplify data access and manipulation by providing a virtual layer over one or more indices ([#11957](https://github.com/opensearch-project/OpenSearch/pull/11957)) +- Add systemd configurations to strengthen OS core security ([#17107](https://github.com/opensearch-project/OpenSearch/pull/17107)) +- Added pull-based Ingestion (APIs, for ingestion source, a Kafka plugin, and IngestionEngine that pulls data from the ingestion source) ([#16958](https://github.com/opensearch-project/OpenSearch/pull/16958)) +- Added ConfigurationUtils to core for the ease of configuration parsing [#17223](https://github.com/opensearch-project/OpenSearch/pull/17223) +- Add cluster and index level settings to limit the total primary shards per node and per index [#17295](https://github.com/opensearch-project/OpenSearch/pull/17295) +- Add execution_hint to cardinality aggregator request ([#17312](https://github.com/opensearch-project/OpenSearch/pull/17312)) +- Arrow Flight RPC plugin with Flight server bootstrap logic and client for internode communication ([#16962](https://github.com/opensearch-project/OpenSearch/pull/16962)) +- Added offset management for the pull-based Ingestion ([#17354](https://github.com/opensearch-project/OpenSearch/pull/17354)) +- Added integ tests for systemd configs ([#17410](https://github.com/opensearch-project/OpenSearch/pull/17410)) +- Add filter function for AbstractQueryBuilder, BoolQueryBuilder, ConstantScoreQueryBuilder([#17409](https://github.com/opensearch-project/OpenSearch/pull/17409)) +- [Star Tree] [Search] Resolving keyword & numeric bucket aggregation with metric aggregation using star-tree ([#17165](https://github.com/opensearch-project/OpenSearch/pull/17165)) +- Added error handling support for the pull-based ingestion ([#17427](https://github.com/opensearch-project/OpenSearch/pull/17427)) +- Added Warm index setting and Updated nomenclature to differentiate between hot and warm tiering implementation ([#17490](https://github.com/opensearch-project/OpenSearch/pull/17490)) +- Latency and Memory allocation improvements to Multi Term Aggregation queries ([#14993](https://github.com/opensearch-project/OpenSearch/pull/14993)) +- Add logic in master service to optimize performance and retain detailed logging for critical cluster operations. ([#14795](https://github.com/opensearch-project/OpenSearch/pull/14795)) +- Add Setting to adjust the primary constraint weights ([#16471](https://github.com/opensearch-project/OpenSearch/pull/16471)) +- Introduce a setting to disable download of full cluster state from remote on term mismatch([#16798](https://github.com/opensearch-project/OpenSearch/pull/16798/)) +- Added ability to retrieve value from DocValues in a flat_object filed([#16802](https://github.com/opensearch-project/OpenSearch/pull/16802)) +- Improve performace of NumericTermAggregation by avoiding unnecessary sorting([#17252](https://github.com/opensearch-project/OpenSearch/pull/17252)) +- Fix Bug - Handle unsigned long in sorting order assertion of LongHashSet ([#17207](https://github.com/opensearch-project/OpenSearch/pull/17207)) +- Implemented computation of segment replication stats at shard level ([#17055](https://github.com/opensearch-project/OpenSearch/pull/17055)) +- [Rule Based Auto-tagging] Add in-memory attribute value store ([#17342](https://github.com/opensearch-project/OpenSearch/pull/17342)) + + +### Dependencies +- Bump Apache HttpCore5/HttpClient5 dependencies from 5.2.5/5.3.1 to 5.3.1/5.4.1 to support ExtendedSocketOption in HttpAsyncClient ([#16757](https://github.com/opensearch-project/OpenSearch/pull/16757)) +- Bumps `jetty` version from 9.4.55.v20240627 to 9.4.57.v20241219 ([#17395](https://github.com/opensearch-project/OpenSearch/pull/17395)) +- Switch main/3.x to use JDK21 LTS version ([#17515](https://github.com/opensearch-project/OpenSearch/pull/17515)) +- Bump `org.awaitility:awaitility` from 4.2.0 to 4.3.0 ([#17230](https://github.com/opensearch-project/OpenSearch/pull/17230), [#17439](https://github.com/opensearch-project/OpenSearch/pull/17439)) +- Bump `dnsjava:dnsjava` from 3.6.2 to 3.6.3 ([#17231](https://github.com/opensearch-project/OpenSearch/pull/17231)) +- Bump `com.google.code.gson:gson` from 2.11.0 to 2.12.1 ([#17229](https://github.com/opensearch-project/OpenSearch/pull/17229)) +- Bump `org.jruby.joni:joni` from 2.2.1 to 2.2.3 ([#17136](https://github.com/opensearch-project/OpenSearch/pull/17136)) +- Bump `org.apache.ant:ant` from 1.10.14 to 1.10.15 ([#17288](https://github.com/opensearch-project/OpenSearch/pull/17288)) +- Bump `reactor_netty` from 1.1.26 to 1.1.27 ([#17322](https://github.com/opensearch-project/OpenSearch/pull/17322)) +- Bump `me.champeau.gradle.japicmp` from 0.4.5 to 0.4.6 ([#17375](https://github.com/opensearch-project/OpenSearch/pull/17375)) +- Bump `com.google.api.grpc:proto-google-common-protos` from 2.37.1 to 2.52.0 ([#17379](https://github.com/opensearch-project/OpenSearch/pull/17379)) +- Bump `net.minidev:json-smart` from 2.5.1 to 2.5.2 ([#17378](https://github.com/opensearch-project/OpenSearch/pull/17378)) +- Bump `com.netflix.nebula.ospackage-base` from 11.10.1 to 11.11.1 ([#17374](https://github.com/opensearch-project/OpenSearch/pull/17374)) +- Bump `ch.qos.logback:logback-classic` from 1.5.16 to 1.5.17 ([#17497](https://github.com/opensearch-project/OpenSearch/pull/17497)) +- Bump `software.amazon.awssdk` from 2.20.86 to 2.30.31 ([#17396](https://github.com/opensearch-project/OpenSearch/pull/17396)) +- Bump `org.jruby.jcodings:jcodings` from 1.0.61 to 1.0.63 ([#17560](https://github.com/opensearch-project/OpenSearch/pull/17560)) +- Bump `com.azure:azure-storage-blob` from 12.28.1 to 12.29.1 ([#17562](https://github.com/opensearch-project/OpenSearch/pull/17562)) + +### Changed +- Changed locale provider from COMPAT to CLDR ([#14345](https://github.com/opensearch-project/OpenSearch/pull/14345)) +- Change http code on create index API with bad input raising NotXContentException from 500 to 400 ([#4773](https://github.com/opensearch-project/OpenSearch/pull/4773)) +- Improve summary error message for invalid setting updates ([#4792](https://github.com/opensearch-project/OpenSearch/pull/4792)) +- Return 409 Conflict HTTP status instead of 503 on failure to concurrently execute snapshots ([#8986](https://github.com/opensearch-project/OpenSearch/pull/5855)) +- Add task completion count in search backpressure stats API ([#10028](https://github.com/opensearch-project/OpenSearch/pull/10028/)) +- Breaking change: Do not request "search_pipelines" metrics by default in NodesInfoRequest ([#12497](https://github.com/opensearch-project/OpenSearch/pull/12497)) +- Use simpler matching logic for source fields when explicit field names (no wildcards or dot-paths) are specified ([#17160](https://github.com/opensearch-project/OpenSearch/pull/17160)) +- Refactor `:libs` module `bootstrap` package to eliminate top level split packages for JPMS support ([#17117](https://github.com/opensearch-project/OpenSearch/pull/17117)) +- Refactor the codebase to eliminate top level split packages for JPMS support ([#17153](https://github.com/opensearch-project/OpenSearch/pull/17153) +- Refactor `:server` module `org.apacge.lucene` package to eliminate top level split packages for JPMS support ([#17241](https://github.com/opensearch-project/OpenSearch/pull/17241)) +- Stop minimizing automata used for case-insensitive matches ([#17268](https://github.com/opensearch-project/OpenSearch/pull/17268)) +- Refactor the `:server` module `org.opensearch.client` to `org.opensearch.transport.client` to eliminate top level split packages for JPMS support ([#17272](https://github.com/opensearch-project/OpenSearch/pull/17272)) +- Use Lucene `BM25Similarity` as default since the `LegacyBM25Similarity` is marked as deprecated ([#17306](https://github.com/opensearch-project/OpenSearch/pull/17306)) +- Wildcard field index only 3gram of the input data [#17349](https://github.com/opensearch-project/OpenSearch/pull/17349) +- Use BC libraries to parse PEM files, increase key length, allow general use of known cryptographic binary extensions, remove unused BC dependencies ([#3420](https://github.com/opensearch-project/OpenSearch/pull/14912)) +- Add optional enum set read / write functionality to stream input / output ([#17556](https://github.com/opensearch-project/OpenSearch/pull/17556)) +- Convert transport-reactor-netty4 to use gradle version catalog [#17233](https://github.com/opensearch-project/OpenSearch/pull/17233) +- Increase force merge threads to 1/8th of cores [#17255](https://github.com/opensearch-project/OpenSearch/pull/17255) +- TieredSpilloverCache took-time threshold now guards heap tier as well as disk tier [#17190](https://github.com/opensearch-project/OpenSearch/pull/17190) + +### Deprecated + +### Removed +- Remove deprecated code to add node name into log pattern of log4j property file ([#4568](https://github.com/opensearch-project/OpenSearch/pull/4568)) +- Unused object and import within TransportClusterAllocationExplainAction ([#4639](https://github.com/opensearch-project/OpenSearch/pull/4639)) +- Remove LegacyESVersion.V_7_0_* and V_7_1_* Constants ([#2768](https://https://github.com/opensearch-project/OpenSearch/pull/2768)) +- Remove LegacyESVersion.V_7_2_ and V_7_3_ Constants ([#4702](https://github.com/opensearch-project/OpenSearch/pull/4702)) +- Always auto release the flood stage block ([#4703](https://github.com/opensearch-project/OpenSearch/pull/4703)) +- Remove LegacyESVersion.V_7_4_ and V_7_5_ Constants ([#4704](https://github.com/opensearch-project/OpenSearch/pull/4704)) +- Remove Legacy Version support from Snapshot/Restore Service ([#4728](https://github.com/opensearch-project/OpenSearch/pull/4728)) +- Remove deprecated serialization logic from pipeline aggs ([#4847](https://github.com/opensearch-project/OpenSearch/pull/4847)) +- Remove unused private methods ([#4926](https://github.com/opensearch-project/OpenSearch/pull/4926)) +- Remove LegacyESVersion.V_7_8_ and V_7_9_ Constants ([#4855](https://github.com/opensearch-project/OpenSearch/pull/4855)) +- Remove LegacyESVersion.V_7_6_ and V_7_7_ Constants ([#4837](https://github.com/opensearch-project/OpenSearch/pull/4837)) +- Remove LegacyESVersion.V_7_10_ Constants ([#5018](https://github.com/opensearch-project/OpenSearch/pull/5018)) +- Remove Version.V_1_ Constants ([#5021](https://github.com/opensearch-project/OpenSearch/pull/5021)) +- Remove custom Map, List and Set collection classes ([#6871](https://github.com/opensearch-project/OpenSearch/pull/6871)) +- Remove package org.opensearch.action.support.master ([#4856](https://github.com/opensearch-project/OpenSearch/issues/4856)) +- Remove deprecated 'gateway' settings used to defer cluster recovery ([#3117](https://github.com/opensearch-project/OpenSearch/issues/3117)) +- Remove FeatureFlags.PLUGGABLE_CACHE as the feature is no longer experimental ([#17344](https://github.com/opensearch-project/OpenSearch/pull/17344)) + +### Fixed +- Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827)) +- Fix compression support for h2c protocol ([#4944](https://github.com/opensearch-project/OpenSearch/pull/4944)) +- Don't over-allocate in HeapBufferedAsyncEntityConsumer in order to consume the response ([#9993](https://github.com/opensearch-project/OpenSearch/pull/9993)) +- Add HTTP/2 protocol support to HttpRequest.HttpVersion ([#17248](https://github.com/opensearch-project/OpenSearch/pull/17248)) +- Fix missing bucket in terms aggregation with missing value ([#17418](https://github.com/opensearch-project/OpenSearch/pull/17418)) +- Fix explain action on query rewrite ([#17286](https://github.com/opensearch-project/OpenSearch/pull/17286)) +- Fix visit of inner query for FunctionScoreQueryBuilder ([#16776](https://github.com/opensearch-project/OpenSearch/pull/16776)) +- Fix case insensitive and escaped query on wildcard ([#16827](https://github.com/opensearch-project/OpenSearch/pull/16827)) +- Fix exists queries on nested flat_object fields throws exception ([#16803](https://github.com/opensearch-project/OpenSearch/pull/16803)) +- Add highlighting for wildcard search on `match_only_text` field ([#17101](https://github.com/opensearch-project/OpenSearch/pull/17101)) +- Fix illegal argument exception when creating a PIT ([#16781](https://github.com/opensearch-project/OpenSearch/pull/16781)) +- Fix NPE in node stats due to QueryGroupTasks ([#17576](https://github.com/opensearch-project/OpenSearch/pull/17576)) + From 1c86dd17b69e51e3934930de92a7adc52d715662 Mon Sep 17 00:00:00 2001 From: Vinay Krishna Pudyodu Date: Mon, 17 Mar 2025 23:18:49 -0700 Subject: [PATCH 079/550] Renaming the node role search to warm (#17573) * Renaming search node role to warm Signed-off-by: Vinay Krishna Pudyodu * Added Changelog Signed-off-by: Vinay Krishna Pudyodu * fixed failing tests Signed-off-by: Vinay Krishna Pudyodu * fixed PR comments Signed-off-by: Vinay Krishna Pudyodu --------- Signed-off-by: Vinay Krishna Pudyodu --- CHANGELOG.md | 2 +- .../admin/cluster/stats/ClusterStatsIT.java | 2 +- .../cluster/ClusterInfoServiceIT.java | 6 +- .../WarmIndexSegmentReplicationIT.java | 122 +++++++++--------- .../remotestore/WritableWarmIT.java | 4 +- .../snapshots/SearchableSnapshotIT.java | 100 +++++++------- .../opensearch/bootstrap/BootstrapChecks.java | 6 +- .../cluster/InternalClusterInfoService.java | 2 +- .../cluster/node/DiscoveryNode.java | 16 +-- .../cluster/node/DiscoveryNodeRole.java | 8 +- .../cluster/routing/RoutingPool.java | 2 +- .../org/opensearch/env/NodeEnvironment.java | 8 +- .../opensearch/env/NodeRepurposeCommand.java | 30 ++--- .../tiering/TieringRequestValidator.java | 16 +-- .../main/java/org/opensearch/node/Node.java | 18 +-- .../TransportHotToWarmTieringActionTests.java | 2 +- .../bootstrap/BootstrapChecksTests.java | 6 +- .../cluster/node/DiscoveryNodeTests.java | 27 ++-- .../RemoteShardsBalancerBaseTestCase.java | 10 +- .../decider/DiskThresholdDeciderTests.java | 14 +- .../TargetPoolAllocationDeciderTests.java | 2 +- .../structure/RoutingIteratorTests.java | 4 +- .../env/NodeRepurposeCommandTests.java | 44 +++---- .../tiering/TieringRequestValidatorTests.java | 30 ++--- .../java/org/opensearch/node/NodeTests.java | 22 ++-- .../snapshots/SnapshotResiliencyTests.java | 16 +-- .../opensearch/test/InternalTestCluster.java | 76 +++++------ .../java/org/opensearch/test/NodeRoles.java | 16 +-- 28 files changed, 305 insertions(+), 306 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 27a35ca711ed3..33154c97ff5b7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added - +- Renaming the node role search to warm ([#17573](https://github.com/opensearch-project/OpenSearch/pull/17573)) ### Dependencies ### Changed diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java index 9058dc2f5b147..4dd5e7b74c96d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java @@ -896,7 +896,7 @@ private Map getExpectedCounts( expectedCounts.put(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE.roleName(), clusterManagerRoleCount); expectedCounts.put(DiscoveryNodeRole.INGEST_ROLE.roleName(), ingestRoleCount); expectedCounts.put(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE.roleName(), remoteClusterClientRoleCount); - expectedCounts.put(DiscoveryNodeRole.SEARCH_ROLE.roleName(), searchRoleCount); + expectedCounts.put(DiscoveryNodeRole.WARM_ROLE.roleName(), searchRoleCount); expectedCounts.put(ClusterStatsNodes.Counts.COORDINATING_ONLY, coordinatingOnlyCount); return expectedCounts; } diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterInfoServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterInfoServiceIT.java index 35b8bdf3dafe5..3ba885812bcfc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterInfoServiceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterInfoServiceIT.java @@ -195,7 +195,7 @@ public void testClusterInfoServiceCollectsInformation() { final Map nodeFileCacheStats = info.nodeFileCacheStats; assertNotNull(nodeFileCacheStats); - assertThat("file cache is empty on non search nodes", nodeFileCacheStats.size(), Matchers.equalTo(0)); + assertThat("file cache is empty on non warm nodes", nodeFileCacheStats.size(), Matchers.equalTo(0)); ClusterService clusterService = internalTestCluster.getInstance(ClusterService.class, internalTestCluster.getClusterManagerName()); ClusterState state = clusterService.state(); @@ -216,7 +216,7 @@ public void testClusterInfoServiceCollectsInformation() { public void testClusterInfoServiceCollectsFileCacheInformation() { internalCluster().startNodes(1); - internalCluster().ensureAtLeastNumSearchAndDataNodes(2); + internalCluster().ensureAtLeastNumWarmAndDataNodes(2); InternalTestCluster internalTestCluster = internalCluster(); // Get the cluster info service on the cluster-manager node @@ -229,7 +229,7 @@ public void testClusterInfoServiceCollectsFileCacheInformation() { assertNotNull("info should not be null", info); final Map nodeFileCacheStats = info.nodeFileCacheStats; assertNotNull(nodeFileCacheStats); - assertThat("file cache is enabled on both search nodes", nodeFileCacheStats.size(), Matchers.equalTo(2)); + assertThat("file cache is enabled on both warm nodes", nodeFileCacheStats.size(), Matchers.equalTo(2)); for (FileCacheStats fileCacheStats : nodeFileCacheStats.values()) { assertThat("file cache is non empty", fileCacheStats.getTotal().getBytes(), greaterThan(0L)); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/WarmIndexSegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/WarmIndexSegmentReplicationIT.java index a50ec7a0d0fa1..49f87c9243b05 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/WarmIndexSegmentReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/WarmIndexSegmentReplicationIT.java @@ -177,7 +177,7 @@ public void teardown() throws Exception { @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/17526") public void testRestartPrimary_NoReplicas() throws Exception { - final String primary = internalCluster().startDataAndSearchNodes(1).get(0); + final String primary = internalCluster().startDataAndWarmNodes(1).get(0); createIndex(INDEX_NAME); ensureYellow(INDEX_NAME); @@ -197,10 +197,10 @@ public void testRestartPrimary_NoReplicas() throws Exception { } public void testPrimaryStopped_ReplicaPromoted() throws Exception { - final String primary = internalCluster().startDataAndSearchNodes(1).get(0); + final String primary = internalCluster().startDataAndWarmNodes(1).get(0); createIndex(INDEX_NAME); ensureYellowAndNoInitializingShards(INDEX_NAME); - final String replica = internalCluster().startDataAndSearchNodes(1).get(0); + final String replica = internalCluster().startDataAndWarmNodes(1).get(0); ensureGreen(INDEX_NAME); client().prepareIndex(INDEX_NAME).setId("1").setSource("foo", "bar").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); @@ -229,7 +229,7 @@ public void testPrimaryStopped_ReplicaPromoted() throws Exception { assertHitCount(client(replica).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), 3); // start another node, index another doc and replicate. - String nodeC = internalCluster().startDataAndSearchNodes(1).get(0); + String nodeC = internalCluster().startDataAndWarmNodes(1).get(0); ensureGreen(INDEX_NAME); client().prepareIndex(INDEX_NAME).setId("4").setSource("baz", "baz").get(); refresh(INDEX_NAME); @@ -239,10 +239,10 @@ public void testPrimaryStopped_ReplicaPromoted() throws Exception { } public void testRestartPrimary() throws Exception { - final String primary = internalCluster().startDataAndSearchNodes(1).get(0); + final String primary = internalCluster().startDataAndWarmNodes(1).get(0); createIndex(INDEX_NAME); ensureYellowAndNoInitializingShards(INDEX_NAME); - final String replica = internalCluster().startDataAndSearchNodes(1).get(0); + final String replica = internalCluster().startDataAndWarmNodes(1).get(0); ensureGreen(INDEX_NAME); assertEquals(getNodeContainingPrimaryShard().getName(), primary); @@ -266,10 +266,10 @@ public void testRestartPrimary() throws Exception { public void testCancelPrimaryAllocation() throws Exception { // this test cancels allocation on the primary - promoting the new replica and recreating the former primary as a replica. - final String primary = internalCluster().startDataAndSearchNodes(1).get(0); + final String primary = internalCluster().startDataAndWarmNodes(1).get(0); createIndex(INDEX_NAME); ensureYellowAndNoInitializingShards(INDEX_NAME); - final String replica = internalCluster().startDataAndSearchNodes(1).get(0); + final String replica = internalCluster().startDataAndWarmNodes(1).get(0); ensureGreen(INDEX_NAME); final int initialDocCount = 1; @@ -296,8 +296,8 @@ public void testCancelPrimaryAllocation() throws Exception { } public void testReplicationAfterPrimaryRefreshAndFlush() throws Exception { - final String nodeA = internalCluster().startDataAndSearchNodes(1).get(0); - final String nodeB = internalCluster().startDataAndSearchNodes(1).get(0); + final String nodeA = internalCluster().startDataAndWarmNodes(1).get(0); + final String nodeB = internalCluster().startDataAndWarmNodes(1).get(0); final Settings settings = Settings.builder() .put(indexSettings()) .put(EngineConfig.INDEX_CODEC_SETTING.getKey(), randomFrom(new ArrayList<>(CODECS) { @@ -340,8 +340,8 @@ public void testReplicationAfterPrimaryRefreshAndFlush() throws Exception { } public void testIndexReopenClose() throws Exception { - final String primary = internalCluster().startDataAndSearchNodes(1).get(0); - final String replica = internalCluster().startDataAndSearchNodes(1).get(0); + final String primary = internalCluster().startDataAndWarmNodes(1).get(0); + final String replica = internalCluster().startDataAndWarmNodes(1).get(0); createIndex(INDEX_NAME); ensureGreen(INDEX_NAME); @@ -374,7 +374,7 @@ public void testIndexReopenClose() throws Exception { @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/17526") public void testStartReplicaAfterPrimaryIndexesDocs() throws Exception { - final String primaryNode = internalCluster().startDataAndSearchNodes(1).get(0); + final String primaryNode = internalCluster().startDataAndWarmNodes(1).get(0); createIndex(INDEX_NAME, Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build()); ensureGreen(INDEX_NAME); @@ -397,7 +397,7 @@ public void testStartReplicaAfterPrimaryIndexesDocs() throws Exception { .prepareUpdateSettings(INDEX_NAME) .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)) ); - final String replicaNode = internalCluster().startDataAndSearchNodes(1).get(0); + final String replicaNode = internalCluster().startDataAndWarmNodes(1).get(0); ensureGreen(INDEX_NAME); assertHitCount(client(primaryNode).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), 2); @@ -420,9 +420,9 @@ public void testStartReplicaAfterPrimaryIndexesDocs() throws Exception { */ @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/17527") public void testReplicationPostDeleteAndForceMerge() throws Exception { - final String primary = internalCluster().startDataAndSearchNodes(1).get(0); + final String primary = internalCluster().startDataAndWarmNodes(1).get(0); createIndex(INDEX_NAME); - final String replica = internalCluster().startDataAndSearchNodes(1).get(0); + final String replica = internalCluster().startDataAndWarmNodes(1).get(0); ensureGreen(INDEX_NAME); final int initialDocCount = scaledRandomIntBetween(1, 10); for (int i = 0; i < initialDocCount; i++) { @@ -480,8 +480,8 @@ public void testReplicationPostDeleteAndForceMerge() throws Exception { } public void testScrollWithConcurrentIndexAndSearch() throws Exception { - final String primary = internalCluster().startDataAndSearchNodes(1).get(0); - final String replica = internalCluster().startDataAndSearchNodes(1).get(0); + final String primary = internalCluster().startDataAndWarmNodes(1).get(0); + final String replica = internalCluster().startDataAndWarmNodes(1).get(0); createIndex(INDEX_NAME); ensureGreen(INDEX_NAME); final List> pendingIndexResponses = new ArrayList<>(); @@ -539,8 +539,8 @@ public void testMultipleShards() throws Exception { .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), false) .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .build(); - final String nodeA = internalCluster().startDataAndSearchNodes(1).get(0); - final String nodeB = internalCluster().startDataAndSearchNodes(1).get(0); + final String nodeA = internalCluster().startDataAndWarmNodes(1).get(0); + final String nodeB = internalCluster().startDataAndWarmNodes(1).get(0); createIndex(INDEX_NAME, indexSettings); ensureGreen(INDEX_NAME); @@ -585,8 +585,8 @@ public void testReplicationAfterForceMergeOnPrimaryShardsOnly() throws Exception } private void performReplicationAfterForceMerge(boolean primaryOnly, int expectedSuccessfulShards) throws Exception { - final String nodeA = internalCluster().startDataAndSearchNodes(1).get(0); - final String nodeB = internalCluster().startDataAndSearchNodes(1).get(0); + final String nodeA = internalCluster().startDataAndWarmNodes(1).get(0); + final String nodeB = internalCluster().startDataAndWarmNodes(1).get(0); createIndex(INDEX_NAME); ensureGreen(INDEX_NAME); @@ -638,11 +638,11 @@ private void performReplicationAfterForceMerge(boolean primaryOnly, int expected public void testClosedIndices() { List nodes = new ArrayList<>(); // start 1st node so that it contains the primary - nodes.add(internalCluster().startDataAndSearchNodes(1).get(0)); + nodes.add(internalCluster().startDataAndWarmNodes(1).get(0)); createIndex(INDEX_NAME, super.indexSettings()); ensureYellowAndNoInitializingShards(INDEX_NAME); // start 2nd node so that it contains the replica - nodes.add(internalCluster().startDataAndSearchNodes(1).get(0)); + nodes.add(internalCluster().startDataAndWarmNodes(1).get(0)); ensureGreen(INDEX_NAME); logger.info("--> Close index"); @@ -657,7 +657,7 @@ public void testClosedIndices() { * @throws Exception when issue is encountered */ public void testNodeDropWithOngoingReplication() throws Exception { - final String primaryNode = internalCluster().startDataAndSearchNodes(1).get(0); + final String primaryNode = internalCluster().startDataAndWarmNodes(1).get(0); createIndex( INDEX_NAME, Settings.builder() @@ -668,7 +668,7 @@ public void testNodeDropWithOngoingReplication() throws Exception { .build() ); ensureYellow(INDEX_NAME); - final String replicaNode = internalCluster().startDataAndSearchNodes(1).get(0); + final String replicaNode = internalCluster().startDataAndWarmNodes(1).get(0); ensureGreen(INDEX_NAME); ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState(); // Get replica allocation id @@ -724,11 +724,11 @@ public void testNodeDropWithOngoingReplication() throws Exception { } public void testCancellation() throws Exception { - final String primaryNode = internalCluster().startDataAndSearchNodes(1).get(0); + final String primaryNode = internalCluster().startDataAndWarmNodes(1).get(0); createIndex(INDEX_NAME, Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).build()); ensureYellow(INDEX_NAME); - final String replicaNode = internalCluster().startDataAndSearchNodes(1).get(0); + final String replicaNode = internalCluster().startDataAndWarmNodes(1).get(0); final SegmentReplicationSourceService segmentReplicationSourceService = internalCluster().getInstance( SegmentReplicationSourceService.class, @@ -776,8 +776,8 @@ public void testCancellation() throws Exception { @TestLogging(reason = "Getting trace logs from replication package", value = "org.opensearch.indices.replication:TRACE") public void testDeleteOperations() throws Exception { - final String nodeA = internalCluster().startDataAndSearchNodes(1).get(0); - final String nodeB = internalCluster().startDataAndSearchNodes(1).get(0); + final String nodeA = internalCluster().startDataAndWarmNodes(1).get(0); + final String nodeB = internalCluster().startDataAndWarmNodes(1).get(0); createIndex(INDEX_NAME); ensureGreen(INDEX_NAME); @@ -818,10 +818,10 @@ public void testDeleteOperations() throws Exception { } public void testUpdateOperations() throws Exception { - final String primary = internalCluster().startDataAndSearchNodes(1).get(0); + final String primary = internalCluster().startDataAndWarmNodes(1).get(0); createIndex(INDEX_NAME); ensureYellow(INDEX_NAME); - final String replica = internalCluster().startDataAndSearchNodes(1).get(0); + final String replica = internalCluster().startDataAndWarmNodes(1).get(0); ensureGreen(INDEX_NAME); final int initialDocCount = scaledRandomIntBetween(1, 5); @@ -870,9 +870,9 @@ public void testDropPrimaryDuringReplication() throws Exception { .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, replica_count) .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .build(); - final String primaryNode = internalCluster().startDataAndSearchNodes(1).get(0); + final String primaryNode = internalCluster().startDataAndWarmNodes(1).get(0); createIndex(INDEX_NAME, settings); - final List dataNodes = internalCluster().startDataAndSearchNodes(6); + final List dataNodes = internalCluster().startDataAndWarmNodes(6); ensureGreen(INDEX_NAME); int initialDocCount = scaledRandomIntBetween(5, 10); @@ -896,7 +896,7 @@ public void testDropPrimaryDuringReplication() throws Exception { ensureYellow(INDEX_NAME); // start another replica. - dataNodes.add(internalCluster().startDataAndSearchNodes(1).get(0)); + dataNodes.add(internalCluster().startDataAndWarmNodes(1).get(0)); ensureGreen(INDEX_NAME); waitForSearchableDocs(initialDocCount, dataNodes); @@ -913,10 +913,10 @@ public void testDropPrimaryDuringReplication() throws Exception { @TestLogging(reason = "Getting trace logs from replication package", value = "org.opensearch.indices.replication:TRACE") public void testReplicaHasDiffFilesThanPrimary() throws Exception { - final String primaryNode = internalCluster().startDataAndSearchNodes(1).get(0); + final String primaryNode = internalCluster().startDataAndWarmNodes(1).get(0); createIndex(INDEX_NAME, Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).build()); ensureYellow(INDEX_NAME); - final String replicaNode = internalCluster().startDataAndSearchNodes(1).get(0); + final String replicaNode = internalCluster().startDataAndWarmNodes(1).get(0); ensureGreen(INDEX_NAME); final IndexShard replicaShard = getIndexShard(replicaNode, INDEX_NAME); @@ -970,10 +970,10 @@ public void testReplicaHasDiffFilesThanPrimary() throws Exception { @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/17527") public void testPressureServiceStats() throws Exception { - final String primaryNode = internalCluster().startDataAndSearchNodes(1).get(0); + final String primaryNode = internalCluster().startDataAndWarmNodes(1).get(0); createIndex(INDEX_NAME); ensureYellow(INDEX_NAME); - final String replicaNode = internalCluster().startDataAndSearchNodes(1).get(0); + final String replicaNode = internalCluster().startDataAndWarmNodes(1).get(0); ensureGreen(INDEX_NAME); int initialDocCount = scaledRandomIntBetween(10, 20); @@ -1036,7 +1036,7 @@ public void testPressureServiceStats() throws Exception { assertTrue(replicaStats.isEmpty()); // start another replica. - String replicaNode_2 = internalCluster().startDataAndSearchNodes(1).get(0); + String replicaNode_2 = internalCluster().startDataAndWarmNodes(1).get(0); ensureGreen(INDEX_NAME); final IndexShard secondReplicaShard = getIndexShard(replicaNode_2, INDEX_NAME); final String second_replica_aid = secondReplicaShard.routingEntry().allocationId().getId(); @@ -1078,7 +1078,7 @@ private void assertAllocationIdsInReplicaShardStats(Set expected, Set nodes = new ArrayList<>(); - final String primaryNode = internalCluster().startDataAndSearchNodes(1).get(0); + final String primaryNode = internalCluster().startDataAndWarmNodes(1).get(0); nodes.add(primaryNode); final Settings settings = Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build(); createIndex(INDEX_NAME, settings); ensureGreen(INDEX_NAME); // start a replica node, initially will be empty with no shard assignment. - final String replicaNode = internalCluster().startDataAndSearchNodes(1).get(0); + final String replicaNode = internalCluster().startDataAndWarmNodes(1).get(0); nodes.add(replicaNode); // index a doc. @@ -1215,7 +1215,7 @@ public void testPrimaryReceivesDocsDuringReplicaRecovery() throws Exception { } public void testIndexWhileRecoveringReplica() throws Exception { - final String primaryNode = internalCluster().startDataAndSearchNodes(1).get(0); + final String primaryNode = internalCluster().startDataAndWarmNodes(1).get(0); assertAcked( prepareCreate(INDEX_NAME).setMapping( jsonBuilder().startObject() @@ -1239,7 +1239,7 @@ public void testIndexWhileRecoveringReplica() throws Exception { ) ); ensureYellow(INDEX_NAME); - final String replicaNode = internalCluster().startDataAndSearchNodes(1).get(0); + final String replicaNode = internalCluster().startDataAndWarmNodes(1).get(0); client().prepareIndex(INDEX_NAME) .setId("1") @@ -1290,13 +1290,13 @@ public void testIndexWhileRecoveringReplica() throws Exception { * Tests whether segment replication supports realtime get requests and reads and parses source from the translog to serve strong reads. */ public void testRealtimeGetRequestsSuccessful() { - final String primary = internalCluster().startDataAndSearchNodes(1).get(0); + final String primary = internalCluster().startDataAndWarmNodes(1).get(0); // refresh interval disabled to ensure refresh rate of index (when data is ready for search) doesn't affect realtime get assertAcked( prepareCreate(INDEX_NAME).setSettings(Settings.builder().put("index.refresh_interval", -1).put(indexSettings())) .addAlias(new Alias("alias")) ); - final String replica = internalCluster().startDataAndSearchNodes(1).get(0); + final String replica = internalCluster().startDataAndWarmNodes(1).get(0); ensureGreen(INDEX_NAME); final String id = routingKeyForShard(INDEX_NAME, 0); @@ -1328,13 +1328,13 @@ public void testRealtimeGetRequestsSuccessful() { } public void testRealtimeGetRequestsUnsuccessful() { - final String primary = internalCluster().startDataAndSearchNodes(1).get(0); + final String primary = internalCluster().startDataAndWarmNodes(1).get(0); assertAcked( prepareCreate(INDEX_NAME).setSettings( Settings.builder().put("index.refresh_interval", -1).put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) ).addAlias(new Alias("alias")) ); - final String replica = internalCluster().startDataAndSearchNodes(1).get(0); + final String replica = internalCluster().startDataAndWarmNodes(1).get(0); ensureGreen(INDEX_NAME); final String id = routingKeyForShard(INDEX_NAME, 0); @@ -1362,14 +1362,14 @@ public void testRealtimeGetRequestsUnsuccessful() { * Tests whether segment replication supports realtime MultiGet requests and reads and parses source from the translog to serve strong reads. */ public void testRealtimeMultiGetRequestsSuccessful() { - final String primary = internalCluster().startDataAndSearchNodes(1).get(0); + final String primary = internalCluster().startDataAndWarmNodes(1).get(0); // refresh interval disabled to ensure refresh rate of index (when data is ready for search) doesn't affect realtime multi get assertAcked( prepareCreate(INDEX_NAME).setSettings( Settings.builder().put("index.refresh_interval", -1).put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) ).addAlias(new Alias("alias")) ); - final String replica = internalCluster().startDataAndSearchNodes(1).get(0); + final String replica = internalCluster().startDataAndWarmNodes(1).get(0); ensureGreen(INDEX_NAME); final String id = routingKeyForShard(INDEX_NAME, 0); @@ -1414,13 +1414,13 @@ public void testRealtimeMultiGetRequestsSuccessful() { } public void testRealtimeMultiGetRequestsUnsuccessful() { - final String primary = internalCluster().startDataAndSearchNodes(1).get(0); + final String primary = internalCluster().startDataAndWarmNodes(1).get(0); assertAcked( prepareCreate(INDEX_NAME).setSettings( Settings.builder().put("index.refresh_interval", -1).put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) ).addAlias(new Alias("alias")) ); - final String replica = internalCluster().startDataAndSearchNodes(1).get(0); + final String replica = internalCluster().startDataAndWarmNodes(1).get(0); ensureGreen(INDEX_NAME); final String id = routingKeyForShard(INDEX_NAME, 0); @@ -1460,7 +1460,7 @@ public void testRealtimeMultiGetRequestsUnsuccessful() { * Tests whether segment replication supports realtime termvector requests and reads and parses source from the translog to serve strong reads. */ public void testRealtimeTermVectorRequestsSuccessful() throws IOException { - final String primary = internalCluster().startDataAndSearchNodes(1).get(0); + final String primary = internalCluster().startDataAndWarmNodes(1).get(0); XContentBuilder mapping = jsonBuilder().startObject() .startObject("properties") .startObject("field") @@ -1482,7 +1482,7 @@ public void testRealtimeTermVectorRequestsSuccessful() throws IOException { .putList("index.analysis.analyzer.tv_test.filter", "lowercase") ) ); - final String replica = internalCluster().startDataAndSearchNodes(1).get(0); + final String replica = internalCluster().startDataAndWarmNodes(1).get(0); ensureGreen(INDEX_NAME); final String id = routingKeyForShard(INDEX_NAME, 0); @@ -1537,7 +1537,7 @@ public void testRealtimeTermVectorRequestsSuccessful() throws IOException { } public void testRealtimeTermVectorRequestsUnSuccessful() throws IOException { - final String primary = internalCluster().startDataAndSearchNodes(1).get(0); + final String primary = internalCluster().startDataAndWarmNodes(1).get(0); XContentBuilder mapping = jsonBuilder().startObject() .startObject("properties") .startObject("field") @@ -1561,7 +1561,7 @@ public void testRealtimeTermVectorRequestsUnSuccessful() throws IOException { .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) ) ); - final String replica = internalCluster().startDataAndSearchNodes(1).get(0); + final String replica = internalCluster().startDataAndWarmNodes(1).get(0); ensureGreen(INDEX_NAME); final String id = routingKeyForShard(INDEX_NAME, 0); final String routingOtherShard = routingKeyForShard(INDEX_NAME, 1); @@ -1607,15 +1607,15 @@ public void testRealtimeTermVectorRequestsUnSuccessful() throws IOException { public void testReplicaAlreadyAtCheckpoint() throws Exception { final List nodes = new ArrayList<>(); - final String primaryNode = internalCluster().startDataAndSearchNodes(1).get(0); + final String primaryNode = internalCluster().startDataAndWarmNodes(1).get(0); nodes.add(primaryNode); final Settings settings = Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build(); createIndex(INDEX_NAME, settings); ensureGreen(INDEX_NAME); // start a replica node, initially will be empty with no shard assignment. - final String replicaNode = internalCluster().startDataAndSearchNodes(1).get(0); + final String replicaNode = internalCluster().startDataAndWarmNodes(1).get(0); nodes.add(replicaNode); - final String replicaNode2 = internalCluster().startDataAndSearchNodes(1).get(0); + final String replicaNode2 = internalCluster().startDataAndWarmNodes(1).get(0); assertAcked( client().admin() .indices() diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/WritableWarmIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/WritableWarmIT.java index 9050b7eff008d..f258822ddd426 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/WritableWarmIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/WritableWarmIT.java @@ -84,7 +84,7 @@ public void testWritableWarmFeatureFlagDisabled() { InternalTestCluster internalTestCluster = internalCluster(); internalTestCluster.startClusterManagerOnlyNode(clusterSettings); - internalTestCluster.startDataAndSearchNodes(1); + internalTestCluster.startDataAndWarmNodes(1); Settings indexSettings = Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) @@ -109,7 +109,7 @@ public void testWritableWarmFeatureFlagDisabled() { public void testWritableWarmBasic() throws Exception { InternalTestCluster internalTestCluster = internalCluster(); internalTestCluster.startClusterManagerOnlyNode(); - internalTestCluster.startDataAndSearchNodes(1); + internalTestCluster.startDataAndWarmNodes(1); Settings settings = Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java index 278328d61f544..5cc4d0706888e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java @@ -107,7 +107,7 @@ private Settings.Builder chunkedRepositorySettings(long chunkSize) { /** * Tests a happy path scenario for searchable snapshots by creating 2 indices, * taking a snapshot, restoring them as searchable snapshots. - * Ensures availability of sufficient data nodes and search capable nodes. + * Ensures availability of sufficient data nodes and warm capable nodes. */ public void testCreateSearchableSnapshot() throws Exception { final String snapshotName = "test-snap"; @@ -128,7 +128,7 @@ public void testCreateSearchableSnapshot() throws Exception { takeSnapshot(client, snapshotName, repoName, indexName1, indexName2); deleteIndicesAndEnsureGreen(client, indexName1, indexName2); - internalCluster().ensureAtLeastNumSearchNodes(Math.max(numReplicasIndex1, numReplicasIndex2) + 1); + internalCluster().ensureAtLeastNumWarmNodes(Math.max(numReplicasIndex1, numReplicasIndex2) + 1); restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); assertRemoteSnapshotIndexSettings(client, restoredIndexName1, restoredIndexName2); @@ -153,7 +153,7 @@ public void testSnapshottingSearchableSnapshots() throws Exception { deleteIndicesAndEnsureGreen(client, indexName); // restore the index as a searchable snapshot - internalCluster().ensureAtLeastNumSearchNodes(1); + internalCluster().ensureAtLeastNumWarmNodes(1); client.admin() .cluster() .prepareRestoreSnapshot(repoName, initSnapName) @@ -235,7 +235,7 @@ public void testCreateSearchableSnapshotWithDefaultChunks() throws Exception { Settings.Builder repositorySettings = chunkedRepositorySettings(2 << 23); - internalCluster().ensureAtLeastNumSearchAndDataNodes(numReplicasIndex + 1); + internalCluster().ensureAtLeastNumWarmAndDataNodes(numReplicasIndex + 1); createIndexWithDocsAndEnsureGreen(numReplicasIndex, 1000, indexName); createRepositoryWithSettings(repositorySettings, repoName); takeSnapshot(client, snapshotName, repoName, indexName); @@ -261,7 +261,7 @@ public void testCreateSearchableSnapshotWithSmallChunks() throws Exception { Settings.Builder repositorySettings = chunkedRepositorySettings(1000); - internalCluster().ensureAtLeastNumSearchAndDataNodes(numReplicasIndex + 1); + internalCluster().ensureAtLeastNumWarmAndDataNodes(numReplicasIndex + 1); createIndexWithDocsAndEnsureGreen(numReplicasIndex, 1000, indexName); createRepositoryWithSettings(repositorySettings, repoName); takeSnapshot(client, snapshotName, repoName, indexName); @@ -276,7 +276,7 @@ public void testCreateSearchableSnapshotWithSmallChunks() throws Exception { /** * Tests the functionality of remote shard allocation to * ensure it can assign remote shards to a node with local shards given it has the - * search role capabilities. + * warm role capabilities. */ public void testSearchableSnapshotAllocationForLocalAndRemoteShardsOnSameNode() throws Exception { final int numReplicasIndex = randomIntBetween(1, 4); @@ -286,7 +286,7 @@ public void testSearchableSnapshotAllocationForLocalAndRemoteShardsOnSameNode() final String snapshotName = "test-snap"; final Client client = client(); - internalCluster().ensureAtLeastNumSearchAndDataNodes(numReplicasIndex + 1); + internalCluster().ensureAtLeastNumWarmAndDataNodes(numReplicasIndex + 1); createIndexWithDocsAndEnsureGreen(numReplicasIndex, 100, indexName); createRepositoryWithSettings(null, repoName); takeSnapshot(client, snapshotName, repoName, indexName); @@ -306,21 +306,21 @@ public void testSearchableSnapshotAllocationFilterSettings() throws Exception { final String snapshotName = "test-snap"; final Client client = client(); - internalCluster().ensureAtLeastNumSearchAndDataNodes(numShardsIndex); + internalCluster().ensureAtLeastNumWarmAndDataNodes(numShardsIndex); createIndexWithDocsAndEnsureGreen(numShardsIndex, 1, 100, indexName); createRepositoryWithSettings(null, repoName); takeSnapshot(client, snapshotName, repoName, indexName); restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); assertRemoteSnapshotIndexSettings(client, restoredIndexName); - final Set searchNodes = StreamSupport.stream(clusterService().state().getNodes().spliterator(), false) - .filter(DiscoveryNode::isSearchNode) + final Set warmNodes = StreamSupport.stream(clusterService().state().getNodes().spliterator(), false) + .filter(DiscoveryNode::isWarmNode) .map(DiscoveryNode::getId) .collect(Collectors.toSet()); - for (int i = searchNodes.size(); i > 2; --i) { - String pickedNode = randomFrom(searchNodes); - searchNodes.remove(pickedNode); + for (int i = warmNodes.size(); i > 2; --i) { + String pickedNode = randomFrom(warmNodes); + warmNodes.remove(pickedNode); assertIndexAssignedToNodeOrNot(restoredIndexName, pickedNode, true); assertTrue( client.admin() @@ -357,7 +357,7 @@ private void assertIndexAssignedToNodeOrNot(String index, String node, boolean a /** * Tests the functionality of remote shard allocation to * ensure it can handle node drops for failover scenarios and the cluster gets back to a healthy state when - * nodes with search capabilities are added back to the cluster. + * nodes with warm capabilities are added back to the cluster. */ public void testSearchableSnapshotAllocationForFailoverAndRecovery() throws Exception { final int numReplicasIndex = 1; @@ -374,27 +374,27 @@ public void testSearchableSnapshotAllocationForFailoverAndRecovery() throws Exce takeSnapshot(client, snapshotName, repoName, indexName); deleteIndicesAndEnsureGreen(client, indexName); - internalCluster().ensureAtLeastNumSearchNodes(numReplicasIndex + 1); + internalCluster().ensureAtLeastNumWarmNodes(numReplicasIndex + 1); restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); assertRemoteSnapshotIndexSettings(client, restoredIndexName); assertDocCount(restoredIndexName, 100L); - logger.info("--> stop a random search node"); - internalCluster().stopRandomSearchNode(); + logger.info("--> stop a random warm node"); + internalCluster().stopRandomWarmNode(); ensureYellow(restoredIndexName); assertDocCount(restoredIndexName, 100L); - logger.info("--> stop the last search node"); - internalCluster().stopRandomSearchNode(); + logger.info("--> stop the last warm node"); + internalCluster().stopRandomWarmNode(); ensureRed(restoredIndexName); - logger.info("--> add 3 new search nodes"); - internalCluster().ensureAtLeastNumSearchNodes(numReplicasIndex + 2); + logger.info("--> add 3 new warm nodes"); + internalCluster().ensureAtLeastNumWarmNodes(numReplicasIndex + 2); ensureGreen(restoredIndexName); assertDocCount(restoredIndexName, 100); - logger.info("--> stop a random search node"); - internalCluster().stopRandomSearchNode(); + logger.info("--> stop a random warm node"); + internalCluster().stopRandomWarmNode(); ensureGreen(restoredIndexName); assertDocCount(restoredIndexName, 100); } @@ -414,7 +414,7 @@ public void testSearchableSnapshotIndexIsReadOnly() throws Exception { takeSnapshot(client, snapshotName, repoName, indexName); deleteIndicesAndEnsureGreen(client, indexName); - internalCluster().ensureAtLeastNumSearchNodes(1); + internalCluster().ensureAtLeastNumWarmNodes(1); restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); assertRemoteSnapshotIndexSettings(client, restoredIndexName); @@ -435,7 +435,7 @@ public void testDeleteSearchableSnapshotBackingIndexThrowsException() throws Exc createRepositoryWithSettings(null, repoName); createIndexWithDocsAndEnsureGreen(0, 100, indexName); takeSnapshot(client, snapshotName, repoName, indexName); - internalCluster().ensureAtLeastNumSearchNodes(1); + internalCluster().ensureAtLeastNumWarmNodes(1); restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); assertThrows( SnapshotInUseDeletionException.class, @@ -455,7 +455,7 @@ public void testDeleteSearchableSnapshotBackingIndex() throws Exception { createIndexWithDocsAndEnsureGreen(0, 100, indexName2); takeSnapshot(client, snapshotName1, repoName, indexName1); takeSnapshot(client, snapshotName2, repoName, indexName2); - internalCluster().ensureAtLeastNumSearchNodes(1); + internalCluster().ensureAtLeastNumWarmNodes(1); restoreSnapshotAndEnsureGreen(client, snapshotName2, repoName); client().admin().cluster().deleteSnapshot(new DeleteSnapshotRequest(repoName, snapshotName1)).actionGet(); } @@ -561,7 +561,7 @@ public void testUpdateIndexSettings() throws InterruptedException { takeSnapshot(client, snapshotName, repoName, indexName); deleteIndicesAndEnsureGreen(client, indexName); - internalCluster().ensureAtLeastNumSearchNodes(1); + internalCluster().ensureAtLeastNumWarmNodes(1); restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); assertRemoteSnapshotIndexSettings(client, restoredIndexName); @@ -618,7 +618,7 @@ public void testFileCacheStats() throws Exception { deleteIndicesAndEnsureGreen(client, indexName1); assertAllNodesFileCacheEmpty(); - internalCluster().ensureAtLeastNumSearchNodes(numNodes); + internalCluster().ensureAtLeastNumWarmNodes(numNodes); restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); assertNodesFileCacheNonEmpty(numNodes); } @@ -644,7 +644,7 @@ public void testFileCacheRestore() throws Exception { takeSnapshot(client, snapshotName, repoName, indexName); deleteIndicesAndEnsureGreen(client, indexName); - internalCluster().ensureAtLeastNumSearchNodes(numReplicasIndex + 1); + internalCluster().ensureAtLeastNumWarmNodes(numReplicasIndex + 1); restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); assertRemoteSnapshotIndexSettings(client, restoredIndexName); @@ -653,7 +653,7 @@ public void testFileCacheRestore() throws Exception { NodesStatsResponse preRestoreStats = client().admin().cluster().nodesStats(new NodesStatsRequest().all()).actionGet(); for (NodeStats nodeStats : preRestoreStats.getNodes()) { - if (nodeStats.getNode().isSearchNode()) { + if (nodeStats.getNode().isWarmNode()) { internalCluster().restartNode(nodeStats.getNode().getName()); } } @@ -664,7 +664,7 @@ public void testFileCacheRestore() throws Exception { for (String node : postRestoreStatsMap.keySet()) { NodeStats preRestoreStat = preRestoreStatsMap.get(node); NodeStats postRestoreStat = postRestoreStatsMap.get(node); - if (preRestoreStat.getNode().isSearchNode()) { + if (preRestoreStat.getNode().isWarmNode()) { assertEquals(preRestoreStat.getFileCacheStats().getUsed(), postRestoreStat.getFileCacheStats().getUsed()); } } @@ -723,7 +723,7 @@ private void assertNodesFileCacheNonEmpty(int numNodes) { int nonEmptyFileCacheNodes = 0; for (NodeStats stats : response.getNodes()) { FileCacheStats fcStats = stats.getFileCacheStats(); - if (stats.getNode().isSearchNode()) { + if (stats.getNode().isWarmNode()) { if (!isFileCacheEmpty(fcStats)) { nonEmptyFileCacheNodes++; } @@ -747,7 +747,7 @@ public void testPruneFileCacheOnIndexDeletion() throws Exception { final Client client = client(); final int numNodes = 2; - internalCluster().ensureAtLeastNumSearchAndDataNodes(numNodes); + internalCluster().ensureAtLeastNumWarmAndDataNodes(numNodes); createIndexWithDocsAndEnsureGreen(1, 100, indexName1); createRepositoryWithSettings(null, repoName); @@ -763,7 +763,7 @@ public void testPruneFileCacheOnIndexDeletion() throws Exception { } /** - * Test scenario that checks the cache folder location on search nodes for the restored index on snapshot restoration + * Test scenario that checks the cache folder location on warm nodes for the restored index on snapshot restoration * and ensures the index folder is cleared on all nodes post index deletion */ public void testCacheIndexFilesClearedOnDelete() throws Exception { @@ -775,7 +775,7 @@ public void testCacheIndexFilesClearedOnDelete() throws Exception { final String snapshotName = "test-snap"; final Client client = client(); - internalCluster().ensureAtLeastNumSearchAndDataNodes(numShards); + internalCluster().ensureAtLeastNumWarmAndDataNodes(numShards); createIndexWithDocsAndEnsureGreen(numReplicas, 100, indexName); createRepositoryWithSettings(null, repoName); takeSnapshot(client, snapshotName, repoName, indexName); @@ -796,7 +796,7 @@ public void testCacheIndexFilesClearedOnDelete() throws Exception { } /** - * Test scenario that validates that the default search preference for searchable snapshot + * Test scenario that validates that the default warm preference for searchable snapshot * is primary shards */ public void testDefaultShardPreference() throws Exception { @@ -808,7 +808,7 @@ public void testDefaultShardPreference() throws Exception { final Client client = client(); // Create an index, snapshot and restore as a searchable snapshot index - internalCluster().ensureAtLeastNumSearchAndDataNodes(numReplicas + 1); + internalCluster().ensureAtLeastNumWarmAndDataNodes(numReplicas + 1); createIndexWithDocsAndEnsureGreen(numReplicas, 100, indexName); createRepositoryWithSettings(null, repoName); takeSnapshot(client, snapshotName, repoName, indexName); @@ -861,7 +861,7 @@ public void testRestoreSearchableSnapshotWithIndexStoreTypeThrowsException() thr takeSnapshot(client, snapshotName, repoName, indexName1); deleteIndicesAndEnsureGreen(client, indexName1); - internalCluster().ensureAtLeastNumSearchNodes(numReplicasIndex1 + 1); + internalCluster().ensureAtLeastNumWarmNodes(numReplicasIndex1 + 1); // set "index.store.type" to "remote_snapshot" in index settings of restore API and assert appropriate exception with error message // is thrown. @@ -898,8 +898,8 @@ private void assertCacheDirectoryReplicaAndIndexCount(int numCacheFolderCount, i // Get the available NodeEnvironment instances Iterable nodes = internalCluster().getInstances(Node.class); - // Filter out search NodeEnvironment(s) since FileCache is initialized only on search nodes and - // collect the path for all the cache locations on search nodes. + // Filter out warm NodeEnvironment(s) since FileCache is initialized only on warm nodes and + // collect the path for all the cache locations on warm nodes. List searchNodeFileCachePaths = StreamSupport.stream(nodes.spliterator(), false) .filter(node -> node.fileCache() != null) .map(node -> node.getNodeEnvironment().fileCacheNodePath().fileCachePath) @@ -931,12 +931,12 @@ public void testRelocateSearchableSnapshotIndex() throws Exception { takeSnapshot(client, snapshotName, repoName, indexName); deleteIndicesAndEnsureGreen(client, indexName); - String searchNode1 = internalCluster().startSearchOnlyNodes(1).get(0); + String searchNode1 = internalCluster().startWarmOnlyNodes(1).get(0); internalCluster().validateClusterFormed(); restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); assertRemoteSnapshotIndexSettings(client, restoredIndexName); - String searchNode2 = internalCluster().startSearchOnlyNodes(1).get(0); + String searchNode2 = internalCluster().startWarmOnlyNodes(1).get(0); internalCluster().validateClusterFormed(); final Index index = resolveIndex(restoredIndexName); @@ -995,7 +995,7 @@ public void testCreateSearchableSnapshotWithSpecifiedRemoteDataRatio() throws Ex createRepositoryWithSettings(null, repoName); takeSnapshot(client, snapshotName, repoName, indexName1, indexName2); - internalCluster().ensureAtLeastNumSearchNodes(Math.max(numReplicasIndex1, numReplicasIndex2) + 1); + internalCluster().ensureAtLeastNumWarmNodes(Math.max(numReplicasIndex1, numReplicasIndex2) + 1); restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); assertDocCount(restoredIndexName1, 100L); @@ -1014,22 +1014,22 @@ public void cleanup() throws Exception { } public void testStartSearchNode() throws Exception { - // test start dedicated search node - internalCluster().startNode(Settings.builder().put(onlyRole(DiscoveryNodeRole.SEARCH_ROLE))); - // test start node without search role + // test start dedicated warm node + internalCluster().startNode(Settings.builder().put(onlyRole(DiscoveryNodeRole.WARM_ROLE))); + // test start node without warm role internalCluster().startNode(Settings.builder().put(onlyRole(DiscoveryNodeRole.DATA_ROLE))); - // test start non-dedicated search node, if the user doesn't configure the cache size, it fails + // test start non-dedicated warm node, if the user doesn't configure the cache size, it fails assertThrows( SettingsException.class, () -> internalCluster().startNode( - Settings.builder().put(onlyRoles(Set.of(DiscoveryNodeRole.SEARCH_ROLE, DiscoveryNodeRole.DATA_ROLE))) + Settings.builder().put(onlyRoles(Set.of(DiscoveryNodeRole.WARM_ROLE, DiscoveryNodeRole.DATA_ROLE))) ) ); - // test start non-dedicated search node + // test start non-dedicated warm node assertThrows( SettingsException.class, () -> internalCluster().startNode( - Settings.builder().put(onlyRoles(Set.of(DiscoveryNodeRole.SEARCH_ROLE, DiscoveryNodeRole.DATA_ROLE))) + Settings.builder().put(onlyRoles(Set.of(DiscoveryNodeRole.WARM_ROLE, DiscoveryNodeRole.DATA_ROLE))) ) ); } diff --git a/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java b/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java index 0e0b4e9be261a..8285f361ee220 100644 --- a/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java +++ b/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java @@ -758,15 +758,15 @@ public BootstrapCheckResult check(BootstrapContext context) { } /** - * Bootstrap check that if a search node contains multiple data paths + * Bootstrap check that if a warm node contains multiple data paths */ static class MultipleDataPathCheck implements BootstrapCheck { @Override public BootstrapCheckResult check(BootstrapContext context) { - if (NodeRoleSettings.NODE_ROLES_SETTING.get(context.settings()).contains(DiscoveryNodeRole.SEARCH_ROLE) + if (NodeRoleSettings.NODE_ROLES_SETTING.get(context.settings()).contains(DiscoveryNodeRole.WARM_ROLE) && Environment.PATH_DATA_SETTING.get(context.settings()).size() > 1) { - return BootstrapCheckResult.failure("Multiple data paths are not allowed for search nodes"); + return BootstrapCheckResult.failure("Multiple data paths are not allowed for warm nodes"); } return BootstrapCheckResult.success(); } diff --git a/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java b/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java index 4121e56fae0f5..804325dc1f565 100644 --- a/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java +++ b/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java @@ -274,7 +274,7 @@ public void onResponse(NodesStatsResponse nodesStatsResponse) { nodeFileCacheStats = Collections.unmodifiableMap( nodesStatsResponse.getNodes() .stream() - .filter(nodeStats -> nodeStats.getNode().isSearchNode()) + .filter(nodeStats -> nodeStats.getNode().isWarmNode()) .collect(Collectors.toMap(nodeStats -> nodeStats.getNode().getId(), NodeStats::getFileCacheStats)) ); } diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java index 12cdafdcdbf1b..9869bfd3fd4e6 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java @@ -122,12 +122,12 @@ public static boolean isRemoteClusterClient(final Settings settings) { return hasRole(settings, DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE); } - public static boolean isSearchNode(Settings settings) { - return hasRole(settings, DiscoveryNodeRole.SEARCH_ROLE); + public static boolean isWarmNode(Settings settings) { + return hasRole(settings, DiscoveryNodeRole.WARM_ROLE); } - public static boolean isDedicatedSearchNode(Settings settings) { - return getRolesFromSettings(settings).stream().allMatch(DiscoveryNodeRole.SEARCH_ROLE::equals); + public static boolean isDedicatedWarmNode(Settings settings) { + return getRolesFromSettings(settings).stream().allMatch(DiscoveryNodeRole.WARM_ROLE::equals); } private final String nodeName; @@ -480,12 +480,12 @@ public boolean isRemoteClusterClient() { } /** - * Returns whether the node is dedicated to provide search capability. + * Returns whether the node is dedicated to hold warm indices. * - * @return true if the node contains search role, false otherwise + * @return true if the node contains warm role, false otherwise */ - public boolean isSearchNode() { - return roles.contains(DiscoveryNodeRole.SEARCH_ROLE); + public boolean isWarmNode() { + return roles.contains(DiscoveryNodeRole.WARM_ROLE); } /** diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java index 0d2b08656c38d..90aa4b6f389d1 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java @@ -298,13 +298,13 @@ public Setting legacySetting() { }; /** - * Represents the role for a search node, which is dedicated to provide search capability. + * Represents the role for a warm node, which is dedicated to hold warm indices. */ - public static final DiscoveryNodeRole SEARCH_ROLE = new DiscoveryNodeRole("search", "s", true) { + public static final DiscoveryNodeRole WARM_ROLE = new DiscoveryNodeRole("warm", "w", true) { @Override public Setting legacySetting() { - // search role is added in 2.4 so doesn't need to configure legacy setting + // warm role is added in 2.4 so doesn't need to configure legacy setting return null; } @@ -314,7 +314,7 @@ public Setting legacySetting() { * The built-in node roles. */ public static SortedSet BUILT_IN_ROLES = Collections.unmodifiableSortedSet( - new TreeSet<>(Arrays.asList(DATA_ROLE, INGEST_ROLE, CLUSTER_MANAGER_ROLE, REMOTE_CLUSTER_CLIENT_ROLE, SEARCH_ROLE)) + new TreeSet<>(Arrays.asList(DATA_ROLE, INGEST_ROLE, CLUSTER_MANAGER_ROLE, REMOTE_CLUSTER_CLIENT_ROLE, WARM_ROLE)) ); /** diff --git a/server/src/main/java/org/opensearch/cluster/routing/RoutingPool.java b/server/src/main/java/org/opensearch/cluster/routing/RoutingPool.java index 53788c6559ee5..82ab450788d88 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RoutingPool.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RoutingPool.java @@ -37,7 +37,7 @@ public static RoutingPool getNodePool(RoutingNode node) { * Helps to determine the appropriate {@link RoutingPool} for a given node from the {@link DiscoveryNode} */ public static RoutingPool getNodePool(DiscoveryNode node) { - if (node.isSearchNode()) { + if (node.isWarmNode()) { return REMOTE_CAPABLE; } return LOCAL_ONLY; diff --git a/server/src/main/java/org/opensearch/env/NodeEnvironment.java b/server/src/main/java/org/opensearch/env/NodeEnvironment.java index 5c6e44d063dd7..9560bfc547480 100644 --- a/server/src/main/java/org/opensearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/opensearch/env/NodeEnvironment.java @@ -388,7 +388,7 @@ public NodeEnvironment(Settings settings, Environment environment, IndexStoreLis ensureNoShardData(nodePaths); } - if (DiscoveryNode.isSearchNode(settings) == false) { + if (DiscoveryNode.isWarmNode(settings) == false) { ensureNoFileCacheData(fileCacheNodePath); } @@ -1202,15 +1202,15 @@ private void ensureNoShardData(final NodePath[] nodePaths) throws IOException { } /** - * Throws an exception if cache exists on a non-search node. + * Throws an exception if cache exists on a non-warm node. */ private void ensureNoFileCacheData(final NodePath fileCacheNodePath) throws IOException { List cacheDataPaths = collectFileCacheDataPath(fileCacheNodePath); if (cacheDataPaths.isEmpty() == false) { final String message = String.format( Locale.ROOT, - "node does not have the %s role but has data within node search cache: %s. Use 'opensearch-node repurpose' tool to clean up", - DiscoveryNodeRole.SEARCH_ROLE.roleName(), + "node does not have the %s role but has data within node warm cache: %s. Use 'opensearch-node repurpose' tool to clean up", + DiscoveryNodeRole.WARM_ROLE.roleName(), cacheDataPaths ); throw new IllegalStateException(message); diff --git a/server/src/main/java/org/opensearch/env/NodeRepurposeCommand.java b/server/src/main/java/org/opensearch/env/NodeRepurposeCommand.java index 3a8996afed34e..1d68829ececfd 100644 --- a/server/src/main/java/org/opensearch/env/NodeRepurposeCommand.java +++ b/server/src/main/java/org/opensearch/env/NodeRepurposeCommand.java @@ -69,14 +69,14 @@ public class NodeRepurposeCommand extends OpenSearchNodeCommand { static final String ABORTED_BY_USER_MSG = OpenSearchNodeCommand.ABORTED_BY_USER_MSG; static final String FAILED_TO_OBTAIN_NODE_LOCK_MSG = OpenSearchNodeCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG; - static final String NO_CLEANUP = "Node has node.data=true and node.search=true -> no clean up necessary"; + static final String NO_CLEANUP = "Node has node.data=true and node.warm=true -> no clean up necessary"; static final String NO_DATA_TO_CLEAN_UP_FOUND = "No data to clean-up found"; static final String NO_SHARD_DATA_TO_CLEAN_UP_FOUND = "No shard data to clean-up found"; static final String NO_FILE_CACHE_DATA_TO_CLEAN_UP_FOUND = "No file cache to clean-up found"; private static final int FILE_CACHE_NODE_PATH_LOCATION = 0; public NodeRepurposeCommand() { - super("Repurpose this node to another cluster-manager/data/search role, cleaning up any excess persisted data"); + super("Repurpose this node to another cluster-manager/data/warm role, cleaning up any excess persisted data"); } void testExecute(Terminal terminal, OptionSet options, Environment env) throws Exception { @@ -86,7 +86,7 @@ void testExecute(Terminal terminal, OptionSet options, Environment env) throws E @Override protected boolean validateBeforeLock(Terminal terminal, Environment env) { Settings settings = env.settings(); - if (DiscoveryNode.isDataNode(settings) && DiscoveryNode.isSearchNode(settings)) { + if (DiscoveryNode.isDataNode(settings) && DiscoveryNode.isWarmNode(settings)) { terminal.println(Terminal.Verbosity.NORMAL, NO_CLEANUP); return false; } @@ -97,15 +97,15 @@ protected boolean validateBeforeLock(Terminal terminal, Environment env) { @Override protected void processNodePaths(Terminal terminal, Path[] dataPaths, int nodeLockId, OptionSet options, Environment env) throws IOException { - assert DiscoveryNode.isDataNode(env.settings()) == false || DiscoveryNode.isSearchNode(env.settings()) == false; + assert DiscoveryNode.isDataNode(env.settings()) == false || DiscoveryNode.isWarmNode(env.settings()) == false; boolean repurposeData = DiscoveryNode.isDataNode(env.settings()) == false; - boolean repurposeSearch = DiscoveryNode.isSearchNode(env.settings()) == false; + boolean repurposeWarm = DiscoveryNode.isWarmNode(env.settings()) == false; if (DiscoveryNode.isClusterManagerNode(env.settings()) == false) { - processNoClusterManagerRepurposeNode(terminal, dataPaths, env, repurposeData, repurposeSearch); + processNoClusterManagerRepurposeNode(terminal, dataPaths, env, repurposeData, repurposeWarm); } else { - processClusterManagerRepurposeNode(terminal, dataPaths, env, repurposeData, repurposeSearch); + processClusterManagerRepurposeNode(terminal, dataPaths, env, repurposeData, repurposeWarm); } } @@ -170,13 +170,13 @@ private void processNoClusterManagerRepurposeNode( if (repurposeData && repurposeSearch) { terminal.println( - "Node is being re-purposed as no-cluster-manager, no-data and no-search. Clean-up of index data and file cache will be performed." + "Node is being re-purposed as no-cluster-manager, no-data and no-warm. Clean-up of index data and file cache will be performed." ); } else if (repurposeData) { terminal.println("Node is being re-purposed as no-cluster-manager and no-data. Clean-up of index data will be performed."); } else if (repurposeSearch) { terminal.println( - "Node is being re-purposed as no-cluster-manager and no-search. Clean-up of file cache and corresponding index metadata will be performed." + "Node is being re-purposed as no-cluster-manager and no-warm. Clean-up of file cache and corresponding index metadata will be performed." ); } confirm(terminal, "Do you want to proceed?"); @@ -194,11 +194,11 @@ private void processNoClusterManagerRepurposeNode( } if (repurposeData && repurposeSearch) { - terminal.println("Node successfully repurposed to no-cluster-manager, no-data and no-search."); + terminal.println("Node successfully repurposed to no-cluster-manager, no-data and no-warm."); } else if (repurposeData) { terminal.println("Node successfully repurposed to no-cluster-manager and no-data."); } else if (repurposeSearch) { - terminal.println("Node successfully repurposed to no-cluster-manager and no-search."); + terminal.println("Node successfully repurposed to no-cluster-manager and no-warm."); } } @@ -252,12 +252,12 @@ private void processClusterManagerRepurposeNode( if (repurposeData && repurposeSearch) { terminal.println( - "Node is being re-purposed as cluster-manager, no-data and no-search. Clean-up of shard data and file cache data will be performed." + "Node is being re-purposed as cluster-manager, no-data and no-warm. Clean-up of shard data and file cache data will be performed." ); } else if (repurposeData) { terminal.println("Node is being re-purposed as cluster-manager and no-data. Clean-up of shard data will be performed."); } else if (repurposeSearch) { - terminal.println("Node is being re-purposed as cluster-manager and no-search. Clean-up of file cache data will be performed."); + terminal.println("Node is being re-purposed as cluster-manager and no-warm. Clean-up of file cache data will be performed."); } confirm(terminal, "Do you want to proceed?"); @@ -271,11 +271,11 @@ private void processClusterManagerRepurposeNode( } if (repurposeData && repurposeSearch) { - terminal.println("Node successfully repurposed to cluster-manager, no-data and no-search."); + terminal.println("Node successfully repurposed to cluster-manager, no-data and no-warm."); } else if (repurposeData) { terminal.println("Node successfully repurposed to cluster-manager and no-data."); } else if (repurposeSearch) { - terminal.println("Node successfully repurposed to cluster-manager and no-search."); + terminal.println("Node successfully repurposed to cluster-manager and no-warm."); } } diff --git a/server/src/main/java/org/opensearch/indices/tiering/TieringRequestValidator.java b/server/src/main/java/org/opensearch/indices/tiering/TieringRequestValidator.java index 2de50f4d4295d..6501ef6e588bd 100644 --- a/server/src/main/java/org/opensearch/indices/tiering/TieringRequestValidator.java +++ b/server/src/main/java/org/opensearch/indices/tiering/TieringRequestValidator.java @@ -56,7 +56,7 @@ public static TieringValidationResult validateHotToWarm( final DiskThresholdSettings diskThresholdSettings ) { final String indexNames = concreteIndices.stream().map(Index::getName).collect(Collectors.joining(", ")); - validateSearchNodes(currentState, indexNames); + validateWarmNodes(currentState, indexNames); validateDiskThresholdWaterMarkNotBreached(currentState, clusterInfo, diskThresholdSettings, indexNames); final TieringValidationResult tieringValidationResult = new TieringValidationResult(concreteIndices); @@ -91,18 +91,18 @@ public static TieringValidationResult validateHotToWarm( } /** - * Validates that there are eligible nodes with the search role in the current cluster state. + * Validates that there are eligible nodes with the warm role in the current cluster state. * (only for the dedicated case - to be removed later) * * @param currentState the current cluster state * @param indexNames the names of the indices being validated - * @throws IllegalArgumentException if there are no eligible search nodes in the cluster + * @throws IllegalArgumentException if there are no eligible warm nodes in the cluster */ - static void validateSearchNodes(final ClusterState currentState, final String indexNames) { + static void validateWarmNodes(final ClusterState currentState, final String indexNames) { if (getEligibleNodes(currentState).isEmpty()) { final String errorMsg = "Rejecting tiering request for indices [" + indexNames - + "] because there are no nodes found with the search role"; + + "] because there are no nodes found with the warm role"; logger.warn(errorMsg); throw new IllegalArgumentException(errorMsg); } @@ -183,7 +183,7 @@ static void validateDiskThresholdWaterMarkNotBreached( } } throw new IllegalArgumentException( - "Disk threshold low watermark is breached on all the search nodes, rejecting tiering request for indices: " + indexNames + "Disk threshold low watermark is breached on all the warm nodes, rejecting tiering request for indices: " + indexNames ); } @@ -265,13 +265,13 @@ static long getTotalAvailableBytesInWarmTier(final Map usages } /** - * Retrieves the set of eligible(search) nodes from the current cluster state. + * Retrieves the set of eligible(warm) nodes from the current cluster state. * * @param currentState the current cluster state * @return the set of eligible nodes */ static Set getEligibleNodes(final ClusterState currentState) { final Map nodes = currentState.getNodes().getDataNodes(); - return nodes.values().stream().filter(DiscoveryNode::isSearchNode).collect(Collectors.toSet()); + return nodes.values().stream().filter(DiscoveryNode::isWarmNode).collect(Collectors.toSet()); } } diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 222c6e8ba36c4..8037f90653d89 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -401,7 +401,7 @@ public class Node implements Closeable { public static final Setting NODE_SEARCH_CACHE_SIZE_SETTING = new Setting<>( "node.search.cache.size", - s -> (DiscoveryNode.isDedicatedSearchNode(s)) ? "80%" : ZERO, + s -> (DiscoveryNode.isDedicatedWarmNode(s)) ? "80%" : ZERO, Node::validateFileCacheSize, Property.NodeScope ); @@ -556,8 +556,8 @@ protected Node( .map(IndexStorePlugin::getIndexStoreListener) .filter(Optional::isPresent) .map(Optional::get); - // FileCache is only initialized on search nodes, so we only create FileCacheCleaner on search nodes as well - if (DiscoveryNode.isSearchNode(settings) == false) { + // FileCache is only initialized on warm nodes, so we only create FileCacheCleaner on warm nodes as well + if (DiscoveryNode.isWarmNode(settings) == false) { nodeEnvironment = new NodeEnvironment( settings, environment, @@ -2164,13 +2164,13 @@ DiscoveryNode getNode() { } /** - * Initializes the search cache with a defined capacity. + * Initializes the warm cache with a defined capacity. * The capacity of the cache is based on user configuration for {@link Node#NODE_SEARCH_CACHE_SIZE_SETTING}. - * If the user doesn't configure the cache size, it fails if the node is a data + search node. - * Else it configures the size to 80% of total capacity for a dedicated search node, if not explicitly defined. + * If the user doesn't configure the cache size, it fails if the node is a data + warm node. + * Else it configures the size to 80% of total capacity for a dedicated warm node, if not explicitly defined. */ private void initializeFileCache(Settings settings, CircuitBreaker circuitBreaker) throws IOException { - if (DiscoveryNode.isSearchNode(settings) == false) { + if (DiscoveryNode.isWarmNode(settings) == false) { return; } @@ -2179,7 +2179,7 @@ private void initializeFileCache(Settings settings, CircuitBreaker circuitBreake if (capacityRaw.equals(ZERO)) { throw new SettingsException( "Unable to initialize the " - + DiscoveryNodeRole.SEARCH_ROLE.roleName() + + DiscoveryNodeRole.WARM_ROLE.roleName() + "-" + DiscoveryNodeRole.DATA_ROLE.roleName() + " node: Missing value for configuration " @@ -2220,7 +2220,7 @@ private static String validateFileCacheSize(String capacityRaw) { } /** - * Returns the {@link FileCache} instance for remote search node + * Returns the {@link FileCache} instance for remote warm node * Note: Visible for testing */ public FileCache fileCache() { diff --git a/server/src/test/java/org/opensearch/action/admin/indices/tiering/TransportHotToWarmTieringActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/tiering/TransportHotToWarmTieringActionTests.java index ec0c1a8ebe3b9..104168ea6e526 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/tiering/TransportHotToWarmTieringActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/tiering/TransportHotToWarmTieringActionTests.java @@ -56,7 +56,7 @@ protected Collection> nodePlugins() { @Before public void setup() { internalCluster().startClusterManagerOnlyNode(); - internalCluster().ensureAtLeastNumSearchAndDataNodes(1); + internalCluster().ensureAtLeastNumWarmAndDataNodes(1); long bytes = new ByteSizeValue(1000, ByteSizeUnit.KB).getBytes(); final MockInternalClusterInfoService clusterInfoService = getMockInternalClusterInfoService(); clusterInfoService.setDiskUsageFunctionAndRefresh((discoveryNode, fsInfoPath) -> setDiskUsage(fsInfoPath, bytes, bytes - 1)); diff --git a/server/src/test/java/org/opensearch/bootstrap/BootstrapChecksTests.java b/server/src/test/java/org/opensearch/bootstrap/BootstrapChecksTests.java index 69102d2e76bef..b3e912ebd6e9c 100644 --- a/server/src/test/java/org/opensearch/bootstrap/BootstrapChecksTests.java +++ b/server/src/test/java/org/opensearch/bootstrap/BootstrapChecksTests.java @@ -786,9 +786,9 @@ public void testMultipleDataPathsForSearchNodeCheck() { final NodeValidationException e = expectThrows( NodeValidationException.class, - () -> performDataPathsCheck(paths, DiscoveryNodeRole.SEARCH_ROLE.roleName()) + () -> performDataPathsCheck(paths, DiscoveryNodeRole.WARM_ROLE.roleName()) ); - assertThat(e.getMessage(), containsString("Multiple data paths are not allowed for search nodes")); + assertThat(e.getMessage(), containsString("Multiple data paths are not allowed for warm nodes")); } public void testMultipleDataPathsForDataNodeCheck() throws NodeValidationException { @@ -802,7 +802,7 @@ public void testSingleDataPathForSearchNodeCheck() throws NodeValidationExceptio Path path = PathUtils.get(createTempDir().toString()); String[] paths = new String[] { path.resolve("a").toString() }; - performDataPathsCheck(paths, DiscoveryNodeRole.SEARCH_ROLE.roleName()); + performDataPathsCheck(paths, DiscoveryNodeRole.WARM_ROLE.roleName()); } private void performDataPathsCheck(String[] paths, String roleName) throws NodeValidationException { diff --git a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java index 6550ed39e8042..40fcb648bea7a 100644 --- a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java +++ b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java @@ -56,9 +56,8 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static org.opensearch.test.NodeRoles.nonRemoteClusterClientNode; -import static org.opensearch.test.NodeRoles.nonSearchNode; +import static org.opensearch.test.NodeRoles.nonWarmNode; import static org.opensearch.test.NodeRoles.remoteClusterClientNode; -import static org.opensearch.test.NodeRoles.searchNode; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasItem; @@ -235,12 +234,12 @@ public void testDiscoveryNodeIsRemoteClusterClientUnset() { runTestDiscoveryNodeIsRemoteClusterClient(nonRemoteClusterClientNode(), false); } - public void testDiscoveryNodeIsSearchSet() { - runTestDiscoveryNodeIsSearch(searchNode(), true); + public void testDiscoveryNodeIsWarmSet() { + runTestDiscoveryNodeIsWarm(NodeRoles.warmNode(), true); } - public void testDiscoveryNodeIsSearchUnset() { - runTestDiscoveryNodeIsSearch(nonSearchNode(), false); + public void testDiscoveryNodeIsWarmUnset() { + runTestDiscoveryNodeIsWarm(nonWarmNode(), false); } // Added in 2.0 temporarily, validate the MASTER_ROLE is in the list of known roles. @@ -262,13 +261,13 @@ private void runTestDiscoveryNodeIsRemoteClusterClient(final Settings settings, } } - private void runTestDiscoveryNodeIsSearch(final Settings settings, final boolean expected) { + private void runTestDiscoveryNodeIsWarm(final Settings settings, final boolean expected) { final DiscoveryNode node = DiscoveryNode.createLocal(settings, new TransportAddress(TransportAddress.META_ADDRESS, 9200), "node"); - assertThat(node.isSearchNode(), equalTo(expected)); + assertThat(node.isWarmNode(), equalTo(expected)); if (expected) { - assertThat(node.getRoles(), hasItem(DiscoveryNodeRole.SEARCH_ROLE)); + assertThat(node.getRoles(), hasItem(DiscoveryNodeRole.WARM_ROLE)); } else { - assertThat(node.getRoles(), not(hasItem(DiscoveryNodeRole.SEARCH_ROLE))); + assertThat(node.getRoles(), not(hasItem(DiscoveryNodeRole.WARM_ROLE))); } } @@ -283,9 +282,9 @@ public void testGetRoleFromRoleNameIsCaseInsensitive() { assertEquals(dynamicRoleName.toLowerCase(Locale.ROOT), dynamicNodeRole.roleNameAbbreviation()); } - public void testDiscoveryNodeIsSearchNode() { - final Settings settingWithSearchRole = NodeRoles.onlyRole(DiscoveryNodeRole.SEARCH_ROLE); - final DiscoveryNode node = DiscoveryNode.createLocal(settingWithSearchRole, buildNewFakeTransportAddress(), "node"); - assertThat(node.isSearchNode(), equalTo(true)); + public void testDiscoveryNodeIsWarmNode() { + final Settings settingWithWarmRole = NodeRoles.onlyRole(DiscoveryNodeRole.WARM_ROLE); + final DiscoveryNode node = DiscoveryNode.createLocal(settingWithWarmRole, buildNewFakeTransportAddress(), "node"); + assertThat(node.isWarmNode(), equalTo(true)); } } diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java index a7f18aabf8436..523a5f59a81c5 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java @@ -55,12 +55,12 @@ public abstract class RemoteShardsBalancerBaseTestCase extends OpenSearchAllocat DiscoveryNodeRole.CLUSTER_MANAGER_ROLE, DiscoveryNodeRole.DATA_ROLE ); - protected static final Set SEARCH_DATA_ROLES = Set.of( + protected static final Set WARM_DATA_ROLES = Set.of( DiscoveryNodeRole.CLUSTER_MANAGER_ROLE, DiscoveryNodeRole.DATA_ROLE, - DiscoveryNodeRole.SEARCH_ROLE + DiscoveryNodeRole.WARM_ROLE ); - protected static final Set SEARCH_ONLY_ROLE = Set.of(DiscoveryNodeRole.SEARCH_ROLE); + protected static final Set WARM_ONLY_ROLE = Set.of(DiscoveryNodeRole.WARM_ROLE); protected static final int PRIMARIES = 5; protected static final int REPLICAS = 1; @@ -146,12 +146,12 @@ public ClusterState createInitialCluster(int localOnlyNodes, int remoteNodes, bo if (remoteOnly) { for (int i = 0; i < remoteNodes; i++) { String name = getNodeId(i, true); - nb.add(newNode(name, name, SEARCH_ONLY_ROLE)); + nb.add(newNode(name, name, WARM_ONLY_ROLE)); } } else { for (int i = 0; i < remoteNodes; i++) { String name = getNodeId(i, true); - nb.add(newNode(name, name, SEARCH_DATA_ROLES)); + nb.add(newNode(name, name, WARM_DATA_ROLES)); } } DiscoveryNodes nodes = nb.build(); diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java index 94e91c3f7c3c1..2f20c7c09a42b 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java @@ -335,12 +335,12 @@ public void testDiskThresholdForRemoteShards() { .routingTable(initialRoutingTable) .build(); - Set defaultWithSearchRole = new HashSet<>(CLUSTER_MANAGER_DATA_ROLES); - defaultWithSearchRole.add(DiscoveryNodeRole.SEARCH_ROLE); + Set defaultWithWarmRole = new HashSet<>(CLUSTER_MANAGER_DATA_ROLES); + defaultWithWarmRole.add(DiscoveryNodeRole.WARM_ROLE); logger.info("--> adding two nodes"); clusterState = ClusterState.builder(clusterState) - .nodes(DiscoveryNodes.builder().add(newNode("node1", defaultWithSearchRole)).add(newNode("node2", defaultWithSearchRole))) + .nodes(DiscoveryNodes.builder().add(newNode("node1", defaultWithWarmRole)).add(newNode("node2", defaultWithWarmRole))) .build(); clusterState = strategy.reroute(clusterState, "reroute"); logShardStates(clusterState); @@ -401,8 +401,8 @@ public void testFileCacheRemoteShardsDecisions() { final ClusterInfo clusterInfo = new DevNullClusterInfo(usages, usages, shardSizes, fileCacheStatsMap); - Set defaultWithSearchRole = new HashSet<>(CLUSTER_MANAGER_DATA_ROLES); - defaultWithSearchRole.add(DiscoveryNodeRole.SEARCH_ROLE); + Set defaultWithWarmRole = new HashSet<>(CLUSTER_MANAGER_DATA_ROLES); + defaultWithWarmRole.add(DiscoveryNodeRole.WARM_ROLE); DiskThresholdDecider diskThresholdDecider = makeDecider(diskSettings); Metadata metadata = Metadata.builder() @@ -415,14 +415,14 @@ public void testFileCacheRemoteShardsDecisions() { "node1", buildNewFakeTransportAddress(), emptyMap(), - defaultWithSearchRole, + defaultWithWarmRole, Version.CURRENT ); DiscoveryNode discoveryNode2 = new DiscoveryNode( "node2", buildNewFakeTransportAddress(), emptyMap(), - defaultWithSearchRole, + defaultWithWarmRole, Version.CURRENT ); DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(discoveryNode1).add(discoveryNode2).build(); diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/TargetPoolAllocationDeciderTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/TargetPoolAllocationDeciderTests.java index 052c7877404a8..9e25e86ec0797 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/TargetPoolAllocationDeciderTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/TargetPoolAllocationDeciderTests.java @@ -175,7 +175,7 @@ public void testTargetPoolDedicatedSearchNodeAllocationDecisions() { assertEquals(Decision.NO.type(), deciders.canAllocate(remoteShard, localOnlyNode, globalAllocation).type()); assertEquals(Decision.NO.type(), deciders.canAllocate(remoteIdx, localOnlyNode, globalAllocation).type()); assertEquals(Decision.NO.type(), deciders.canForceAllocatePrimary(unassignedRemoteShard, localOnlyNode, globalAllocation).type()); - // A dedicated search node should not accept local shards and indices. + // A dedicated warm node should not accept local shards and indices. assertEquals(Decision.NO.type(), deciders.canAllocate(localShard, remoteCapableNode, globalAllocation).type()); assertEquals(Decision.NO.type(), deciders.canAllocate(localIdx, remoteCapableNode, globalAllocation).type()); assertEquals( diff --git a/server/src/test/java/org/opensearch/cluster/structure/RoutingIteratorTests.java b/server/src/test/java/org/opensearch/cluster/structure/RoutingIteratorTests.java index 190ad3283dcfc..36213d07a3681 100644 --- a/server/src/test/java/org/opensearch/cluster/structure/RoutingIteratorTests.java +++ b/server/src/test/java/org/opensearch/cluster/structure/RoutingIteratorTests.java @@ -598,8 +598,8 @@ public void testSearchableSnapshotPreference() { .nodes( DiscoveryNodes.builder() .add(newNode("node1", Collections.singleton(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE))) - .add(newNode("node2", Collections.singleton(DiscoveryNodeRole.SEARCH_ROLE))) - .add(newNode("node3", Collections.singleton(DiscoveryNodeRole.SEARCH_ROLE))) + .add(newNode("node2", Collections.singleton(DiscoveryNodeRole.WARM_ROLE))) + .add(newNode("node3", Collections.singleton(DiscoveryNodeRole.WARM_ROLE))) .localNodeId("node1") ) .build(); diff --git a/server/src/test/java/org/opensearch/env/NodeRepurposeCommandTests.java b/server/src/test/java/org/opensearch/env/NodeRepurposeCommandTests.java index d2d6fdc387dfe..ab9e0c66bd8d3 100644 --- a/server/src/test/java/org/opensearch/env/NodeRepurposeCommandTests.java +++ b/server/src/test/java/org/opensearch/env/NodeRepurposeCommandTests.java @@ -81,29 +81,29 @@ public class NodeRepurposeCommandTests extends OpenSearchTestCase { private static final Index INDEX = new Index("testIndex", "testUUID"); private Settings dataClusterManagerSettings; - private Settings dataSearchClusterManagerSettings; + private Settings dataWarmClusterManagerSettings; private Environment environment; private Path[] nodePaths; - private Settings dataSearchNoClusterManagerSettings; + private Settings dataWarmNoClusterManagerSettings; private Settings noDataNoClusterManagerSettings; private Settings noDataClusterManagerSettings; - private Settings searchNoDataNoClusterManagerSettings; - private Settings noSearchNoClusterManagerSettings; + private Settings warmNoDataNoClusterManagerSettings; + private Settings noWarmNoClusterManagerSettings; @Before public void createNodePaths() throws IOException { dataClusterManagerSettings = buildEnvSettings(Settings.EMPTY); - Settings defaultSearchSettings = Settings.builder() + Settings defaultWarmSettings = Settings.builder() .put(dataClusterManagerSettings) .put(NODE_SEARCH_CACHE_SIZE_SETTING.getKey(), new ByteSizeValue(16, ByteSizeUnit.GB).toString()) .build(); - searchNoDataNoClusterManagerSettings = onlyRole(dataClusterManagerSettings, DiscoveryNodeRole.SEARCH_ROLE); - dataSearchClusterManagerSettings = addRoles(defaultSearchSettings, Set.of(DiscoveryNodeRole.SEARCH_ROLE)); + warmNoDataNoClusterManagerSettings = onlyRole(dataClusterManagerSettings, DiscoveryNodeRole.WARM_ROLE); + dataWarmClusterManagerSettings = addRoles(defaultWarmSettings, Set.of(DiscoveryNodeRole.WARM_ROLE)); noDataClusterManagerSettings = clusterManagerNode(nonDataNode(dataClusterManagerSettings)); - dataSearchNoClusterManagerSettings = nonClusterManagerNode(dataSearchClusterManagerSettings); - noSearchNoClusterManagerSettings = nonClusterManagerNode(defaultSearchSettings); + dataWarmNoClusterManagerSettings = nonClusterManagerNode(dataWarmClusterManagerSettings); + noWarmNoClusterManagerSettings = nonClusterManagerNode(defaultWarmSettings); noDataNoClusterManagerSettings = removeRoles( dataClusterManagerSettings, @@ -132,8 +132,8 @@ public void createNodePaths() throws IOException { public void testEarlyExitNoCleanup() throws Exception { createIndexDataFiles(dataClusterManagerSettings, randomInt(10), randomBoolean()); - verifyNoQuestions(dataSearchClusterManagerSettings, containsString(NO_CLEANUP)); - verifyNoQuestions(dataSearchNoClusterManagerSettings, containsString(NO_CLEANUP)); + verifyNoQuestions(dataWarmClusterManagerSettings, containsString(NO_CLEANUP)); + verifyNoQuestions(dataWarmNoClusterManagerSettings, containsString(NO_CLEANUP)); } public void testNothingToCleanup() throws Exception { @@ -156,7 +156,7 @@ public void testNothingToCleanup() throws Exception { verifyNoQuestions(noDataNoClusterManagerSettings, containsString(NO_DATA_TO_CLEAN_UP_FOUND)); verifyNoQuestions(noDataClusterManagerSettings, containsString(NO_SHARD_DATA_TO_CLEAN_UP_FOUND)); - verifyNoQuestions(noSearchNoClusterManagerSettings, containsString(NO_FILE_CACHE_DATA_TO_CLEAN_UP_FOUND)); + verifyNoQuestions(noWarmNoClusterManagerSettings, containsString(NO_FILE_CACHE_DATA_TO_CLEAN_UP_FOUND)); createIndexDataFiles(dataClusterManagerSettings, 0, randomBoolean()); @@ -227,11 +227,11 @@ public void testCleanupShardData() throws Exception { new NodeEnvironment(noDataClusterManagerSettings, environment).close(); } - public void testCleanupSearchNode() throws Exception { + public void testCleanupWarmNode() throws Exception { int shardCount = randomIntBetween(1, 10); boolean verbose = randomBoolean(); boolean hasClusterState = randomBoolean(); - createIndexDataFiles(searchNoDataNoClusterManagerSettings, shardCount, hasClusterState, true); + createIndexDataFiles(warmNoDataNoClusterManagerSettings, shardCount, hasClusterState, true); Matcher matcher = allOf( containsString(NodeRepurposeCommand.shardMessage(shardCount, 1)), @@ -251,11 +251,11 @@ public void testCleanupSearchNode() throws Exception { new NodeEnvironment(dataClusterManagerSettings, environment).close(); } - public void testCleanupSearchClusterManager() throws Exception { + public void testCleanupWarmClusterManager() throws Exception { int shardCount = randomIntBetween(1, 10); boolean verbose = randomBoolean(); boolean hasClusterState = randomBoolean(); - createIndexDataFiles(dataSearchClusterManagerSettings, shardCount, hasClusterState, true); + createIndexDataFiles(dataWarmClusterManagerSettings, shardCount, hasClusterState, true); String messageText = NodeRepurposeCommand.noClusterManagerMessage(1, shardCount, 0); @@ -266,23 +266,23 @@ public void testCleanupSearchClusterManager() throws Exception { conditionalNot(containsString("no name for uuid: testUUID"), verbose == false || hasClusterState) ); - verifyUnchangedOnAbort(noSearchNoClusterManagerSettings, matcher, verbose); + verifyUnchangedOnAbort(noWarmNoClusterManagerSettings, matcher, verbose); // verify test setup - expectThrows(IllegalStateException.class, () -> new NodeEnvironment(noSearchNoClusterManagerSettings, environment).close()); + expectThrows(IllegalStateException.class, () -> new NodeEnvironment(noWarmNoClusterManagerSettings, environment).close()); - verifySuccess(noSearchNoClusterManagerSettings, matcher, verbose); + verifySuccess(noWarmNoClusterManagerSettings, matcher, verbose); // verify clean. - new NodeEnvironment(noSearchNoClusterManagerSettings, environment).close(); + new NodeEnvironment(noWarmNoClusterManagerSettings, environment).close(); } public void testCleanupAll() throws Exception { int shardCount = randomIntBetween(1, 10); boolean verbose = randomBoolean(); boolean hasClusterState = randomBoolean(); - createIndexDataFiles(dataSearchClusterManagerSettings, shardCount, hasClusterState, false); - createIndexDataFiles(dataSearchClusterManagerSettings, shardCount, hasClusterState, true); + createIndexDataFiles(dataWarmClusterManagerSettings, shardCount, hasClusterState, false); + createIndexDataFiles(dataWarmClusterManagerSettings, shardCount, hasClusterState, true); // environment.dataFiles().length * shardCount will account for the local shard files // + shardCount will account for the additional file cache shard files. diff --git a/server/src/test/java/org/opensearch/indices/tiering/TieringRequestValidatorTests.java b/server/src/test/java/org/opensearch/indices/tiering/TieringRequestValidatorTests.java index 6b6f74353812b..a20d4debb3959 100644 --- a/server/src/test/java/org/opensearch/indices/tiering/TieringRequestValidatorTests.java +++ b/server/src/test/java/org/opensearch/indices/tiering/TieringRequestValidatorTests.java @@ -46,27 +46,27 @@ import static org.opensearch.indices.tiering.TieringRequestValidator.validateIndexHealth; import static org.opensearch.indices.tiering.TieringRequestValidator.validateOpenIndex; import static org.opensearch.indices.tiering.TieringRequestValidator.validateRemoteStoreIndex; -import static org.opensearch.indices.tiering.TieringRequestValidator.validateSearchNodes; +import static org.opensearch.indices.tiering.TieringRequestValidator.validateWarmNodes; public class TieringRequestValidatorTests extends OpenSearchTestCase { - public void testValidateSearchNodes() { - ClusterState clusterStateWithSearchNodes = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + public void testValidateWarmNodes() { + ClusterState clusterStateWithWarmNodes = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .nodes(createNodes(2, 0, 0)) .build(); // throws no errors - validateSearchNodes(clusterStateWithSearchNodes, "test_index"); + validateWarmNodes(clusterStateWithWarmNodes, "test_index"); } - public void testWithNoSearchNodesInCluster() { - ClusterState clusterStateWithNoSearchNodes = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + public void testWithNoWarmNodesInCluster() { + ClusterState clusterStateWithNoWarmNodes = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .nodes(createNodes(0, 1, 1)) .build(); // throws error IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> validateSearchNodes(clusterStateWithNoSearchNodes, "test") + () -> validateWarmNodes(clusterStateWithNoWarmNodes, "test") ); } @@ -212,7 +212,7 @@ public void testValidateEligibleNodesCapacityWithAllRejected() { public void testGetTotalAvailableBytesInWarmTier() { Map diskUsages = diskUsages(2, 500, 100); - assertEquals(200, getTotalAvailableBytesInWarmTier(diskUsages, Set.of("node-s0", "node-s1"))); + assertEquals(200, getTotalAvailableBytesInWarmTier(diskUsages, Set.of("node-w0", "node-w1"))); } public void testEligibleNodes() { @@ -254,15 +254,15 @@ private static Settings createDefaultIndexSettings(String indexUuid) { .build(); } - private DiscoveryNodes createNodes(int numOfSearchNodes, int numOfDataNodes, int numOfIngestNodes) { + private DiscoveryNodes createNodes(int numOfWarmNodes, int numOfDataNodes, int numOfIngestNodes) { DiscoveryNodes.Builder discoveryNodesBuilder = DiscoveryNodes.builder(); - for (int i = 0; i < numOfSearchNodes; i++) { + for (int i = 0; i < numOfWarmNodes; i++) { discoveryNodesBuilder.add( new DiscoveryNode( - "node-s" + i, + "node-w" + i, buildNewFakeTransportAddress(), Collections.emptyMap(), - Collections.singleton(DiscoveryNodeRole.SEARCH_ROLE), + Collections.singleton(DiscoveryNodeRole.WARM_ROLE), Version.CURRENT ) ); @@ -308,10 +308,10 @@ private static ClusterInfo clusterInfo(int noOfNodes, long totalBytes, long free return new ClusterInfo(diskUsages, null, null, null, Map.of(), Map.of()); } - private static Map diskUsages(int noOfSearchNodes, long totalBytes, long freeBytes) { + private static Map diskUsages(int noOfWarmNodes, long totalBytes, long freeBytes) { final Map diskUsages = new HashMap<>(); - for (int i = 0; i < noOfSearchNodes; i++) { - diskUsages.put("node-s" + i, new DiskUsage("node-s" + i, "node-s" + i, "/foo/bar", totalBytes, freeBytes)); + for (int i = 0; i < noOfWarmNodes; i++) { + diskUsages.put("node-w" + i, new DiskUsage("node-w" + i, "node-w" + i, "/foo/bar", totalBytes, freeBytes)); } return diskUsages; } diff --git a/server/src/test/java/org/opensearch/node/NodeTests.java b/server/src/test/java/org/opensearch/node/NodeTests.java index 489abd4bbca04..2f769dbd51b0a 100644 --- a/server/src/test/java/org/opensearch/node/NodeTests.java +++ b/server/src/test/java/org/opensearch/node/NodeTests.java @@ -376,17 +376,17 @@ public void testCreateWithCircuitBreakerPlugins() throws IOException { } public void testCreateWithFileCache() throws Exception { - Settings searchRoleSettings = addRoles(baseSettings().build(), Set.of(DiscoveryNodeRole.SEARCH_ROLE)); + Settings warmRoleSettings = addRoles(baseSettings().build(), Set.of(DiscoveryNodeRole.WARM_ROLE)); List> plugins = basePlugins(); ByteSizeValue cacheSize = new ByteSizeValue(16, ByteSizeUnit.GB); - Settings searchRoleSettingsWithConfig = baseSettings().put(searchRoleSettings) + Settings warmRoleSettingsWithConfig = baseSettings().put(warmRoleSettings) .put(Node.NODE_SEARCH_CACHE_SIZE_SETTING.getKey(), cacheSize.toString()) .build(); - Settings onlySearchRoleSettings = Settings.builder() - .put(searchRoleSettingsWithConfig) + Settings onlyWarmRoleSettings = Settings.builder() + .put(warmRoleSettingsWithConfig) .put( NodeRoles.removeRoles( - searchRoleSettingsWithConfig, + warmRoleSettingsWithConfig, Set.of( DiscoveryNodeRole.DATA_ROLE, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE, @@ -398,22 +398,22 @@ public void testCreateWithFileCache() throws Exception { .build(); // Test exception thrown with configuration missing - assertThrows(SettingsException.class, () -> new MockNode(searchRoleSettings, plugins)); + assertThrows(SettingsException.class, () -> new MockNode(warmRoleSettings, plugins)); // Test file cache is initialized - try (MockNode mockNode = new MockNode(searchRoleSettingsWithConfig, plugins)) { + try (MockNode mockNode = new MockNode(warmRoleSettingsWithConfig, plugins)) { NodeEnvironment.NodePath fileCacheNodePath = mockNode.getNodeEnvironment().fileCacheNodePath(); assertEquals(cacheSize.getBytes(), fileCacheNodePath.fileCacheReservedSize.getBytes()); } - // Test data + search node with defined cache size - try (MockNode mockNode = new MockNode(searchRoleSettingsWithConfig, plugins)) { + // Test data + warm node with defined cache size + try (MockNode mockNode = new MockNode(warmRoleSettingsWithConfig, plugins)) { NodeEnvironment.NodePath fileCacheNodePath = mockNode.getNodeEnvironment().fileCacheNodePath(); assertEquals(cacheSize.getBytes(), fileCacheNodePath.fileCacheReservedSize.getBytes()); } - // Test dedicated search node with no configuration - try (MockNode mockNode = new MockNode(onlySearchRoleSettings, plugins)) { + // Test dedicated warm node with no configuration + try (MockNode mockNode = new MockNode(onlyWarmRoleSettings, plugins)) { NodeEnvironment.NodePath fileCacheNodePath = mockNode.getNodeEnvironment().fileCacheNodePath(); assertTrue(fileCacheNodePath.fileCacheReservedSize.getBytes() > 0); FsProbe fsProbe = new FsProbe(mockNode.getNodeEnvironment(), mockNode.fileCache()); diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 4f0600588daef..580b507292ea8 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -1654,8 +1654,8 @@ private void setupTestCluster(int clusterManagerNodes, int dataNodes) { startCluster(); } - private void setupTestCluster(int clusterManagerNodes, int dataNodes, int searchNodes) { - testClusterNodes = new TestClusterNodes(clusterManagerNodes, dataNodes, searchNodes); + private void setupTestCluster(int clusterManagerNodes, int dataNodes, int warmNodes) { + testClusterNodes = new TestClusterNodes(clusterManagerNodes, dataNodes, warmNodes); startCluster(); } @@ -1735,7 +1735,7 @@ private final class TestClusterNodes { this(clusterManagerNodes, dataNodes, 0); } - TestClusterNodes(int clusterManagerNodes, int dataNodes, int searchNodes) { + TestClusterNodes(int clusterManagerNodes, int dataNodes, int warmNodes) { for (int i = 0; i < clusterManagerNodes; ++i) { nodes.computeIfAbsent("node" + i, nodeName -> { try { @@ -1754,10 +1754,10 @@ private final class TestClusterNodes { } }); } - for (int i = 0; i < searchNodes; ++i) { - nodes.computeIfAbsent("search-node" + i, nodeName -> { + for (int i = 0; i < warmNodes; ++i) { + nodes.computeIfAbsent("warm-node" + i, nodeName -> { try { - return newSearchNode(nodeName); + return newWarmNode(nodeName); } catch (IOException e) { throw new AssertionError(e); } @@ -1781,8 +1781,8 @@ private TestClusterNode newDataNode(String nodeName) throws IOException { return newNode(nodeName, DiscoveryNodeRole.DATA_ROLE); } - private TestClusterNode newSearchNode(String nodeName) throws IOException { - return newNode(nodeName, DiscoveryNodeRole.SEARCH_ROLE); + private TestClusterNode newWarmNode(String nodeName) throws IOException { + return newNode(nodeName, DiscoveryNodeRole.WARM_ROLE); } private TestClusterNode newNode(String nodeName, DiscoveryNodeRole role) throws IOException { diff --git a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java index f9a09c088095b..ed6f1404e4fd2 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java @@ -201,14 +201,14 @@ public final class InternalTestCluster extends TestCluster { nodeAndClient.node.settings() ); - private static final Predicate SEARCH_NODE_PREDICATE = nodeAndClient -> DiscoveryNode.hasRole( + private static final Predicate WARM_NODE_PREDICATE = nodeAndClient -> DiscoveryNode.hasRole( nodeAndClient.node.settings(), - DiscoveryNodeRole.SEARCH_ROLE + DiscoveryNodeRole.WARM_ROLE ); - private static final Predicate SEARCH_AND_DATA_NODE_PREDICATE = nodeAndClient -> DiscoveryNode.hasRole( + private static final Predicate WARM_AND_DATA_NODE_PREDICATE = nodeAndClient -> DiscoveryNode.hasRole( nodeAndClient.node.settings(), - DiscoveryNodeRole.SEARCH_ROLE + DiscoveryNodeRole.WARM_ROLE ) && DiscoveryNode.isDataNode(nodeAndClient.node.settings()); private static final Predicate NO_DATA_NO_CLUSTER_MANAGER_PREDICATE = nodeAndClient -> DiscoveryNode @@ -219,8 +219,8 @@ public final class InternalTestCluster extends TestCluster { nodeAndClient.node.settings() ); - private static final String DEFAULT_SEARCH_CACHE_SIZE_BYTES = "2gb"; - private static final String DEFAULT_SEARCH_CACHE_SIZE_PERCENT = "5%"; + private static final String DEFAULT_WARM_CACHE_SIZE_BYTES = "2gb"; + private static final String DEFAULT_WARM_CACHE_SIZE_PERCENT = "5%"; public static final int DEFAULT_LOW_NUM_CLUSTER_MANAGER_NODES = 1; public static final int DEFAULT_HIGH_NUM_CLUSTER_MANAGER_NODES = 3; @@ -673,36 +673,36 @@ public synchronized void ensureAtLeastNumDataNodes(int n) { } /** - * Ensures that at least n search nodes are present in the cluster. + * Ensures that at least n warm nodes are present in the cluster. * if more nodes than n are present this method will not * stop any of the running nodes. */ - public synchronized void ensureAtLeastNumSearchNodes(int n) { - int size = numSearchNodes(); + public synchronized void ensureAtLeastNumWarmNodes(int n) { + int size = numWarmNodes(); if (size < n) { logger.info("increasing cluster size from {} to {}", size, n); - startNodes(n - size, Settings.builder().put(onlyRole(Settings.EMPTY, DiscoveryNodeRole.SEARCH_ROLE)).build()); + startNodes(n - size, Settings.builder().put(onlyRole(Settings.EMPTY, DiscoveryNodeRole.WARM_ROLE)).build()); validateClusterFormed(); } } /** - * Ensures that at least n data-search nodes are present in the cluster. + * Ensures that at least n data-warm nodes are present in the cluster. * if more nodes than n are present this method will not * stop any of the running nodes. */ - public synchronized void ensureAtLeastNumSearchAndDataNodes(int n) { - int size = numSearchAndDataNodes(); + public synchronized void ensureAtLeastNumWarmAndDataNodes(int n) { + int size = numWarmAndDataNodes(); if (size < n) { logger.info("increasing cluster size from {} to {}", size, n); - Set searchAndDataRoles = Set.of(DiscoveryNodeRole.DATA_ROLE, DiscoveryNodeRole.SEARCH_ROLE); + Set warmAndDataRoles = Set.of(DiscoveryNodeRole.DATA_ROLE, DiscoveryNodeRole.WARM_ROLE); Settings settings = Settings.builder() .put( Node.NODE_SEARCH_CACHE_SIZE_SETTING.getKey(), - randomBoolean() ? DEFAULT_SEARCH_CACHE_SIZE_PERCENT : DEFAULT_SEARCH_CACHE_SIZE_BYTES + randomBoolean() ? DEFAULT_WARM_CACHE_SIZE_PERCENT : DEFAULT_WARM_CACHE_SIZE_BYTES ) .build(); - startNodes(n - size, Settings.builder().put(onlyRoles(settings, searchAndDataRoles)).build()); + startNodes(n - size, Settings.builder().put(onlyRoles(settings, warmAndDataRoles)).build()); validateClusterFormed(); } } @@ -1697,11 +1697,11 @@ public InetSocketAddress[] httpAddresses() { } /** - * Stops a random search node in the cluster. Returns true if a node was found to stop, false otherwise. + * Stops a random warm node in the cluster. Returns true if a node was found to stop, false otherwise. */ - public synchronized boolean stopRandomSearchNode() throws IOException { + public synchronized boolean stopRandomWarmNode() throws IOException { ensureOpen(); - NodeAndClient nodeAndClient = getRandomNodeAndClient(SEARCH_NODE_PREDICATE); + NodeAndClient nodeAndClient = getRandomNodeAndClient(WARM_NODE_PREDICATE); if (nodeAndClient != null) { logger.info("Closing random node [{}] ", nodeAndClient.name); stopNodesAndClient(nodeAndClient); @@ -2307,15 +2307,15 @@ public List startClusterManagerOnlyNodes(int numNodes, Settings settings return startNodes(numNodes, Settings.builder().put(onlyRole(settings, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)).build()); } - public List startDataAndSearchNodes(int numNodes) { - return startDataAndSearchNodes(numNodes, Settings.EMPTY); + public List startDataAndWarmNodes(int numNodes) { + return startDataAndWarmNodes(numNodes, Settings.EMPTY); } - public List startDataAndSearchNodes(int numNodes, Settings settings) { - Set searchAndDataRoles = new HashSet<>(); - searchAndDataRoles.add(DiscoveryNodeRole.DATA_ROLE); - searchAndDataRoles.add(DiscoveryNodeRole.SEARCH_ROLE); - return startNodes(numNodes, Settings.builder().put(onlyRoles(settings, searchAndDataRoles)).build()); + public List startDataAndWarmNodes(int numNodes, Settings settings) { + Set warmAndDataRoles = new HashSet<>(); + warmAndDataRoles.add(DiscoveryNodeRole.DATA_ROLE); + warmAndDataRoles.add(DiscoveryNodeRole.WARM_ROLE); + return startNodes(numNodes, Settings.builder().put(onlyRoles(settings, warmAndDataRoles)).build()); } public List startDataOnlyNodes(int numNodes) { @@ -2330,12 +2330,12 @@ public List startDataOnlyNodes(int numNodes, Settings settings, Boolean return startNodes(numNodes, Settings.builder().put(onlyRole(settings, DiscoveryNodeRole.DATA_ROLE)).build(), ignoreNodeJoin); } - public List startSearchOnlyNodes(int numNodes) { - return startSearchOnlyNodes(numNodes, Settings.EMPTY); + public List startWarmOnlyNodes(int numNodes) { + return startWarmOnlyNodes(numNodes, Settings.EMPTY); } - public List startSearchOnlyNodes(int numNodes, Settings settings) { - return startNodes(numNodes, Settings.builder().put(onlyRole(settings, DiscoveryNodeRole.SEARCH_ROLE)).build()); + public List startWarmOnlyNodes(int numNodes, Settings settings) { + return startNodes(numNodes, Settings.builder().put(onlyRole(settings, DiscoveryNodeRole.WARM_ROLE)).build()); } /** calculates a min cluster-manager nodes value based on the given number of cluster-manager nodes */ @@ -2382,12 +2382,12 @@ public int numDataNodes() { return dataNodeAndClients().size(); } - public int numSearchNodes() { - return searchNodeAndClients().size(); + public int numWarmNodes() { + return warmNodeAndClients().size(); } - public int numSearchAndDataNodes() { - return searchDataNodeAndClients().size(); + public int numWarmAndDataNodes() { + return warmDataNodeAndClients().size(); } @Override @@ -2443,12 +2443,12 @@ private Collection dataNodeAndClients() { return filterNodes(nodes, DATA_NODE_PREDICATE); } - private Collection searchNodeAndClients() { - return filterNodes(nodes, SEARCH_NODE_PREDICATE); + private Collection warmNodeAndClients() { + return filterNodes(nodes, WARM_NODE_PREDICATE); } - private Collection searchDataNodeAndClients() { - return filterNodes(nodes, SEARCH_AND_DATA_NODE_PREDICATE); + private Collection warmDataNodeAndClients() { + return filterNodes(nodes, WARM_AND_DATA_NODE_PREDICATE); } private static Collection filterNodes( diff --git a/test/framework/src/main/java/org/opensearch/test/NodeRoles.java b/test/framework/src/main/java/org/opensearch/test/NodeRoles.java index a3b4431c5aeb8..9c944e4aee62e 100644 --- a/test/framework/src/main/java/org/opensearch/test/NodeRoles.java +++ b/test/framework/src/main/java/org/opensearch/test/NodeRoles.java @@ -208,20 +208,20 @@ public static Settings nonRemoteClusterClientNode(final Settings settings) { return removeRoles(settings, Collections.singleton(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE)); } - public static Settings searchNode() { - return searchNode(Settings.EMPTY); + public static Settings warmNode() { + return warmNode(Settings.EMPTY); } - public static Settings searchNode(final Settings settings) { - return addRoles(settings, Collections.singleton(DiscoveryNodeRole.SEARCH_ROLE)); + public static Settings warmNode(final Settings settings) { + return addRoles(settings, Collections.singleton(DiscoveryNodeRole.WARM_ROLE)); } - public static Settings nonSearchNode() { - return nonSearchNode(Settings.EMPTY); + public static Settings nonWarmNode() { + return nonWarmNode(Settings.EMPTY); } - public static Settings nonSearchNode(final Settings settings) { - return removeRoles(settings, Collections.singleton(DiscoveryNodeRole.SEARCH_ROLE)); + public static Settings nonWarmNode(final Settings settings) { + return removeRoles(settings, Collections.singleton(DiscoveryNodeRole.WARM_ROLE)); } } From e6ffc62a6bc01f504d13fcf924a1061f57148b9e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 Mar 2025 11:20:46 -0400 Subject: [PATCH 080/550] Bump ch.qos.logback:logback-core from 1.5.16 to 1.5.17 in /test/fixtures/hdfs-fixture (#17609) * Bump ch.qos.logback:logback-core in /test/fixtures/hdfs-fixture Bumps [ch.qos.logback:logback-core](https://github.com/qos-ch/logback) from 1.5.16 to 1.5.17. - [Release notes](https://github.com/qos-ch/logback/releases) - [Commits](https://github.com/qos-ch/logback/compare/v_1.5.16...v_1.5.17) --- updated-dependencies: - dependency-name: ch.qos.logback:logback-core dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Signed-off-by: gaobinlong Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: gaobinlong --- CHANGELOG.md | 1 + test/fixtures/hdfs-fixture/build.gradle | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 33154c97ff5b7..e0634e932079b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Added - Renaming the node role search to warm ([#17573](https://github.com/opensearch-project/OpenSearch/pull/17573)) ### Dependencies +- Bump `ch.qos.logback:logback-core` from 1.5.16 to 1.5.17 ([#17609](https://github.com/opensearch-project/OpenSearch/pull/17609)) ### Changed diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 6018087038ca3..52291d360bd14 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -74,7 +74,7 @@ dependencies { api 'org.apache.zookeeper:zookeeper:3.9.3' api "org.apache.commons:commons-text:1.13.0" api "commons-net:commons-net:3.11.1" - api "ch.qos.logback:logback-core:1.5.16" + api "ch.qos.logback:logback-core:1.5.17" api "ch.qos.logback:logback-classic:1.5.17" api "org.jboss.xnio:xnio-nio:3.8.16.Final" api 'org.jline:jline:3.29.0' From 36d798cc42c473bdb49f4bf39093dd3dd9191b70 Mon Sep 17 00:00:00 2001 From: Rishab Nahata Date: Wed, 19 Mar 2025 00:47:24 +0530 Subject: [PATCH 081/550] Change priority for scheduling reroute during timeout (#16445) * Change priority for scheduling reroute in timeout Signed-off-by: Rishab Nahata * Add setting for ESA Signed-off-by: Rishab Nahata * Fix tests Signed-off-by: Rishab Nahata * Trigger Build Signed-off-by: Rishab Nahata * Trigger Build Signed-off-by: Rishab Nahata * Add test Signed-off-by: Rishab Nahata * Trigger Build Signed-off-by: Rishab Nahata * Add changelog Signed-off-by: Rishab Nahata * Trigger Build Signed-off-by: Rishab Nahata * Trigger Build Signed-off-by: Rishab Nahata * Trigger Build Signed-off-by: Rishab Nahata --------- Signed-off-by: Rishab Nahata --- CHANGELOG.md | 2 + .../allocator/BalancedShardsAllocator.java | 42 +++++++- .../common/settings/ClusterSettings.java | 2 + .../gateway/ShardsBatchGatewayAllocator.java | 42 +++++++- ...TimeBoundBalancedShardsAllocatorTests.java | 88 +++++++++++++++-- .../gateway/GatewayAllocatorTests.java | 97 +++++++++++++++++-- 6 files changed, 249 insertions(+), 24 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e0634e932079b..0d30e975aa497 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added +- Change priority for scheduling reroute during timeout([#16445](https://github.com/opensearch-project/OpenSearch/pull/16445)) - Renaming the node role search to warm ([#17573](https://github.com/opensearch-project/OpenSearch/pull/17573)) + ### Dependencies - Bump `ch.qos.logback:logback-core` from 1.5.16 to 1.5.17 ([#17609](https://github.com/opensearch-project/OpenSearch/pull/17609)) diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index cfbb4d34c3a38..bd5b694f4fe41 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -62,9 +62,11 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; +import java.util.Locale; import java.util.Map; import java.util.Set; +import static org.opensearch.cluster.action.shard.ShardStateAction.FOLLOW_UP_REROUTE_PRIORITY_SETTING; import static org.opensearch.cluster.routing.allocation.ConstraintTypes.CLUSTER_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID; import static org.opensearch.cluster.routing.allocation.ConstraintTypes.CLUSTER_PRIMARY_SHARD_REBALANCE_CONSTRAINT_ID; import static org.opensearch.cluster.routing.allocation.ConstraintTypes.INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID; @@ -199,6 +201,32 @@ public class BalancedShardsAllocator implements ShardsAllocator { Setting.Property.Dynamic ); + /** + * Adjusts the priority of the followup reroute task when current round times out. NORMAL is right for reasonable clusters, + * but for a cluster in a messed up state which is starving NORMAL priority tasks, it might be necessary to raise this higher + * to allocate shards. + */ + public static final Setting FOLLOW_UP_REROUTE_PRIORITY_SETTING = new Setting<>( + "cluster.routing.allocation.balanced_shards_allocator.schedule_reroute.priority", + Priority.NORMAL.toString(), + BalancedShardsAllocator::parseReroutePriority, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + private static Priority parseReroutePriority(String priorityString) { + final Priority priority = Priority.valueOf(priorityString.toUpperCase(Locale.ROOT)); + switch (priority) { + case NORMAL: + case HIGH: + case URGENT: + return priority; + } + throw new IllegalArgumentException( + "priority [" + priority + "] not supported for [" + FOLLOW_UP_REROUTE_PRIORITY_SETTING.getKey() + "]" + ); + } + private volatile boolean movePrimaryFirst; private volatile ShardMovementStrategy shardMovementStrategy; @@ -213,6 +241,7 @@ public class BalancedShardsAllocator implements ShardsAllocator { private volatile boolean ignoreThrottleInRestore; private volatile TimeValue allocatorTimeout; + private volatile Priority followUpRerouteTaskPriority; private long startTime; private RerouteService rerouteService; @@ -233,6 +262,7 @@ public BalancedShardsAllocator(Settings settings, ClusterSettings clusterSetting setPreferPrimaryShardRebalance(PREFER_PRIMARY_SHARD_REBALANCE.get(settings)); setShardMovementStrategy(SHARD_MOVEMENT_STRATEGY_SETTING.get(settings)); setAllocatorTimeout(ALLOCATOR_TIMEOUT_SETTING.get(settings)); + setFollowUpRerouteTaskPriority(FOLLOW_UP_REROUTE_PRIORITY_SETTING.get(settings)); clusterSettings.addSettingsUpdateConsumer(PREFER_PRIMARY_SHARD_BALANCE, this::setPreferPrimaryShardBalance); clusterSettings.addSettingsUpdateConsumer(SHARD_MOVE_PRIMARY_FIRST_SETTING, this::setMovePrimaryFirst); clusterSettings.addSettingsUpdateConsumer(SHARD_MOVEMENT_STRATEGY_SETTING, this::setShardMovementStrategy); @@ -244,6 +274,7 @@ public BalancedShardsAllocator(Settings settings, ClusterSettings clusterSetting clusterSettings.addSettingsUpdateConsumer(PRIMARY_CONSTRAINT_THRESHOLD_SETTING, this::setPrimaryConstraintThresholdSetting); clusterSettings.addSettingsUpdateConsumer(IGNORE_THROTTLE_FOR_REMOTE_RESTORE, this::setIgnoreThrottleInRestore); clusterSettings.addSettingsUpdateConsumer(ALLOCATOR_TIMEOUT_SETTING, this::setAllocatorTimeout); + clusterSettings.addSettingsUpdateConsumer(FOLLOW_UP_REROUTE_PRIORITY_SETTING, this::setFollowUpRerouteTaskPriority); } @Override @@ -342,6 +373,10 @@ private void setAllocatorTimeout(TimeValue allocatorTimeout) { this.allocatorTimeout = allocatorTimeout; } + private void setFollowUpRerouteTaskPriority(Priority followUpRerouteTaskPriority) { + this.followUpRerouteTaskPriority = followUpRerouteTaskPriority; + } + protected boolean allocatorTimedOut() { if (allocatorTimeout.equals(TimeValue.MINUS_ONE)) { if (logger.isTraceEnabled()) { @@ -438,10 +473,13 @@ private void failAllocationOfNewPrimaries(RoutingAllocation allocation) { private void scheduleRerouteIfAllocatorTimedOut() { if (allocatorTimedOut()) { - assert rerouteService != null : "RerouteService not set to schedule reroute after allocator time out"; + if (rerouteService == null) { + logger.info("RerouteService not set to schedule reroute after allocator time out"); + return; + } rerouteService.reroute( "reroute after balanced shards allocator timed out", - Priority.HIGH, + followUpRerouteTaskPriority, ActionListener.wrap( r -> logger.trace("reroute after balanced shards allocator timed out completed"), e -> logger.debug("reroute after balanced shards allocator timed out failed", e) diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index c1ce90aaa8efa..d45d72e0817e0 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -277,6 +277,7 @@ public void apply(Settings value, Settings current, Settings previous) { BalancedShardsAllocator.THRESHOLD_SETTING, BalancedShardsAllocator.IGNORE_THROTTLE_FOR_REMOTE_RESTORE, BalancedShardsAllocator.ALLOCATOR_TIMEOUT_SETTING, + BalancedShardsAllocator.FOLLOW_UP_REROUTE_PRIORITY_SETTING, BalancedShardsAllocator.PRIMARY_CONSTRAINT_THRESHOLD_SETTING, BreakerSettings.CIRCUIT_BREAKER_LIMIT_SETTING, BreakerSettings.CIRCUIT_BREAKER_OVERHEAD_SETTING, @@ -351,6 +352,7 @@ public void apply(Settings value, Settings current, Settings previous) { ShardsBatchGatewayAllocator.GATEWAY_ALLOCATOR_BATCH_SIZE, ShardsBatchGatewayAllocator.PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING, ShardsBatchGatewayAllocator.REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING, + ShardsBatchGatewayAllocator.FOLLOW_UP_REROUTE_PRIORITY_SETTING, PersistedClusterStateService.SLOW_WRITE_LOGGING_THRESHOLD, NetworkModule.HTTP_DEFAULT_TYPE_SETTING, NetworkModule.TRANSPORT_DEFAULT_TYPE_SETTING, diff --git a/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java b/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java index 9c38ea1df8a41..82229f244239f 100644 --- a/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java @@ -53,6 +53,7 @@ import java.util.HashSet; import java.util.Iterator; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -82,6 +83,7 @@ public class ShardsBatchGatewayAllocator implements ExistingShardsAllocator { private TimeValue primaryShardsBatchGatewayAllocatorTimeout; private TimeValue replicaShardsBatchGatewayAllocatorTimeout; + private volatile Priority followUpRerouteTaskPriority; public static final TimeValue MIN_ALLOCATOR_TIMEOUT = TimeValue.timeValueSeconds(20); private final ClusterManagerMetrics clusterManagerMetrics; @@ -145,6 +147,32 @@ public void validate(TimeValue timeValue) { Setting.Property.Dynamic ); + /** + * Adjusts the priority of the followup reroute task when current round times out. NORMAL is right for reasonable clusters, + * but for a cluster in a messed up state which is starving NORMAL priority tasks, it might be necessary to raise this higher + * to allocate existing shards. + */ + public static final Setting FOLLOW_UP_REROUTE_PRIORITY_SETTING = new Setting<>( + "cluster.routing.allocation.shards_batch_gateway_allocator.schedule_reroute.priority", + Priority.NORMAL.toString(), + ShardsBatchGatewayAllocator::parseReroutePriority, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + private static Priority parseReroutePriority(String priorityString) { + final Priority priority = Priority.valueOf(priorityString.toUpperCase(Locale.ROOT)); + switch (priority) { + case NORMAL: + case HIGH: + case URGENT: + return priority; + } + throw new IllegalArgumentException( + "priority [" + priority + "] not supported for [" + FOLLOW_UP_REROUTE_PRIORITY_SETTING.getKey() + "]" + ); + } + private final RerouteService rerouteService; private final PrimaryShardBatchAllocator primaryShardBatchAllocator; private final ReplicaShardBatchAllocator replicaShardBatchAllocator; @@ -179,6 +207,8 @@ public ShardsBatchGatewayAllocator( this.replicaShardsBatchGatewayAllocatorTimeout = REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING.get(settings); clusterSettings.addSettingsUpdateConsumer(REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING, this::setReplicaBatchAllocatorTimeout); this.clusterManagerMetrics = clusterManagerMetrics; + setFollowUpRerouteTaskPriority(FOLLOW_UP_REROUTE_PRIORITY_SETTING.get(settings)); + clusterSettings.addSettingsUpdateConsumer(FOLLOW_UP_REROUTE_PRIORITY_SETTING, this::setFollowUpRerouteTaskPriority); } @Override @@ -308,8 +338,8 @@ public void onComplete() { logger.trace("scheduling reroute after existing shards allocator timed out for primary shards"); assert rerouteService != null; rerouteService.reroute( - "reroute after existing shards allocator timed out", - Priority.HIGH, + "reroute after existing shards allocator [P] timed out", + followUpRerouteTaskPriority, ActionListener.wrap( r -> logger.trace("reroute after existing shards allocator timed out completed"), e -> logger.debug("reroute after existing shards allocator timed out failed", e) @@ -343,8 +373,8 @@ public void onComplete() { logger.trace("scheduling reroute after existing shards allocator timed out for replica shards"); assert rerouteService != null; rerouteService.reroute( - "reroute after existing shards allocator timed out", - Priority.HIGH, + "reroute after existing shards allocator [R] timed out", + followUpRerouteTaskPriority, ActionListener.wrap( r -> logger.trace("reroute after existing shards allocator timed out completed"), e -> logger.debug("reroute after existing shards allocator timed out failed", e) @@ -920,4 +950,8 @@ protected void setPrimaryBatchAllocatorTimeout(TimeValue primaryShardsBatchGatew protected void setReplicaBatchAllocatorTimeout(TimeValue replicaShardsBatchGatewayAllocatorTimeout) { this.replicaShardsBatchGatewayAllocatorTimeout = replicaShardsBatchGatewayAllocatorTimeout; } + + protected void setFollowUpRerouteTaskPriority(Priority followUpRerouteTaskPriority) { + this.followUpRerouteTaskPriority = followUpRerouteTaskPriority; + } } diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/allocator/TimeBoundBalancedShardsAllocatorTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/allocator/TimeBoundBalancedShardsAllocatorTests.java index 45a0bd7b18afd..8899b4ee8f68d 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/allocator/TimeBoundBalancedShardsAllocatorTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/allocator/TimeBoundBalancedShardsAllocatorTests.java @@ -46,6 +46,7 @@ import static org.opensearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.opensearch.cluster.routing.ShardRoutingState.STARTED; import static org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.ALLOCATOR_TIMEOUT_SETTING; +import static org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.FOLLOW_UP_REROUTE_PRIORITY_SETTING; public class TimeBoundBalancedShardsAllocatorTests extends OpenSearchAllocationTestCase { @@ -108,7 +109,7 @@ public void testAllUnassignedShardsAllocatedWhenNoTimeOutAndRerouteNotScheduled( listener.onResponse(clusterService.state()); } assertEquals("reroute after balanced shards allocator timed out", reason); - assertEquals(Priority.HIGH, priority); + assertEquals(Priority.NORMAL, priority); rerouteScheduled.compareAndSet(false, true); }; allocator.setRerouteService(rerouteService); @@ -143,6 +144,49 @@ public void testAllUnassignedShardsIgnoredWhenTimedOutAndRerouteScheduled() { System.nanoTime() ); AtomicBoolean rerouteScheduled = new AtomicBoolean(false); + final RerouteService rerouteService = (reason, priority, listener) -> { + if (randomBoolean()) { + listener.onFailure(new OpenSearchException("simulated")); + } else { + listener.onResponse(clusterService.state()); + } + assertEquals("reroute after balanced shards allocator timed out", reason); + assertEquals(Priority.NORMAL, priority); + rerouteScheduled.compareAndSet(false, true); + }; + allocator.setRerouteService(rerouteService); + allocator.allocate(allocation); + List initializingShards = allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING); + int node1Recoveries = allocation.routingNodes().getInitialPrimariesIncomingRecoveries(node1.getId()); + int node2Recoveries = allocation.routingNodes().getInitialPrimariesIncomingRecoveries(node2.getId()); + int node3Recoveries = allocation.routingNodes().getInitialPrimariesIncomingRecoveries(node3.getId()); + assertEquals(0, initializingShards.size()); + assertEquals(totalShardCount, allocation.routingNodes().unassigned().ignored().size()); + assertEquals(0, node1Recoveries + node2Recoveries + node3Recoveries); + assertTrue(rerouteScheduled.get()); + } + + public void testAllUnassignedShardsIgnoredWhenTimedOutAndRerouteScheduledWithHighPriority() { + int numberOfIndices = 2; + int numberOfShards = 5; + int numberOfReplicas = 1; + int totalShardCount = numberOfIndices * (numberOfShards * (numberOfReplicas + 1)); + Settings.Builder settings = Settings.builder() + .put("cluster.routing.allocation.balanced_shards_allocator.schedule_reroute.priority", "high"); + // passing 0 for timed out latch such that all shard times out + BalancedShardsAllocator allocator = new TestBalancedShardsAllocator(settings.build(), new CountDownLatch(0)); + Metadata metadata = buildMetadata(Metadata.builder(), numberOfIndices, numberOfShards, numberOfReplicas); + RoutingTable routingTable = buildRoutingTable(metadata); + setupStateAndService(metadata, routingTable); + RoutingAllocation allocation = new RoutingAllocation( + yesAllocationDeciders(), + new RoutingNodes(state, false), + state, + ClusterInfo.EMPTY, + null, + System.nanoTime() + ); + AtomicBoolean rerouteScheduled = new AtomicBoolean(false); final RerouteService rerouteService = (reason, priority, listener) -> { if (randomBoolean()) { listener.onFailure(new OpenSearchException("simulated")); @@ -193,7 +237,7 @@ public void testAllocatePartialPrimaryShardsUntilTimedOutAndRerouteScheduled() { listener.onResponse(clusterService.state()); } assertEquals("reroute after balanced shards allocator timed out", reason); - assertEquals(Priority.HIGH, priority); + assertEquals(Priority.NORMAL, priority); rerouteScheduled.compareAndSet(false, true); }; allocator.setRerouteService(rerouteService); @@ -237,7 +281,7 @@ public void testAllocateAllPrimaryShardsAndPartialReplicaShardsUntilTimedOutAndR listener.onResponse(clusterService.state()); } assertEquals("reroute after balanced shards allocator timed out", reason); - assertEquals(Priority.HIGH, priority); + assertEquals(Priority.NORMAL, priority); rerouteScheduled.compareAndSet(false, true); }; allocator.setRerouteService(rerouteService); @@ -284,7 +328,7 @@ public void testAllShardsMoveWhenExcludedAndTimeoutNotBreachedAndRerouteNotSched listener.onResponse(clusterService.state()); } assertEquals("reroute after balanced shards allocator timed out", reason); - assertEquals(Priority.HIGH, priority); + assertEquals(Priority.NORMAL, priority); rerouteScheduled.compareAndSet(false, true); }; allocator.setRerouteService(rerouteService); @@ -326,7 +370,7 @@ public void testNoShardsMoveWhenExcludedAndTimeoutBreachedAndRerouteScheduled() listener.onResponse(clusterService.state()); } assertEquals("reroute after balanced shards allocator timed out", reason); - assertEquals(Priority.HIGH, priority); + assertEquals(Priority.NORMAL, priority); rerouteScheduled.compareAndSet(false, true); }; allocator.setRerouteService(rerouteService); @@ -371,7 +415,7 @@ public void testPartialShardsMoveWhenExcludedAndTimeoutBreachedAndRerouteSchedul listener.onResponse(clusterService.state()); } assertEquals("reroute after balanced shards allocator timed out", reason); - assertEquals(Priority.HIGH, priority); + assertEquals(Priority.NORMAL, priority); rerouteScheduled.compareAndSet(false, true); }; allocator.setRerouteService(rerouteService); @@ -416,7 +460,7 @@ public void testClusterRebalancedWhenNotTimedOutAndRerouteNotScheduled() { listener.onResponse(clusterService.state()); } assertEquals("reroute after balanced shards allocator timed out", reason); - assertEquals(Priority.HIGH, priority); + assertEquals(Priority.NORMAL, priority); rerouteScheduled.compareAndSet(false, true); }; allocator.setRerouteService(rerouteService); @@ -462,7 +506,7 @@ public void testClusterNotRebalancedWhenTimedOutAndRerouteScheduled() { listener.onResponse(clusterService.state()); } assertEquals("reroute after balanced shards allocator timed out", reason); - assertEquals(Priority.HIGH, priority); + assertEquals(Priority.NORMAL, priority); rerouteScheduled.compareAndSet(false, true); }; allocator.setRerouteService(rerouteService); @@ -522,7 +566,7 @@ public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation alloca listener.onResponse(clusterService.state()); } assertEquals("reroute after balanced shards allocator timed out", reason); - assertEquals(Priority.HIGH, priority); + assertEquals(Priority.NORMAL, priority); rerouteScheduled.compareAndSet(false, true); }; allocator.setRerouteService(rerouteService); @@ -561,6 +605,32 @@ public void testAllocatorTimeout() { assertEquals(-1, ALLOCATOR_TIMEOUT_SETTING.get(build).getMillis()); } + public void testFollowupPriorityValues() { + String settingKey = "cluster.routing.allocation.balanced_shards_allocator.schedule_reroute.priority"; + Settings build = Settings.builder().put(settingKey, "normal").build(); + assertEquals(Priority.NORMAL, FOLLOW_UP_REROUTE_PRIORITY_SETTING.get(build)); + + build = Settings.builder().put(settingKey, "high").build(); + assertEquals(Priority.HIGH, FOLLOW_UP_REROUTE_PRIORITY_SETTING.get(build)); + + build = Settings.builder().put(settingKey, "urgent").build(); + assertEquals(Priority.URGENT, FOLLOW_UP_REROUTE_PRIORITY_SETTING.get(build)); + + Settings wrongPriority = Settings.builder().put(settingKey, "immediate").build(); + IllegalArgumentException iae = expectThrows( + IllegalArgumentException.class, + () -> FOLLOW_UP_REROUTE_PRIORITY_SETTING.get(wrongPriority) + ); + assertEquals("priority [IMMEDIATE] not supported for [" + FOLLOW_UP_REROUTE_PRIORITY_SETTING.getKey() + "]", iae.getMessage()); + + Settings wrongPriority2 = Settings.builder().put(settingKey, "random").build(); + IllegalArgumentException iae2 = expectThrows( + IllegalArgumentException.class, + () -> FOLLOW_UP_REROUTE_PRIORITY_SETTING.get(wrongPriority2) + ); + assertEquals("No enum constant org.opensearch.common.Priority.RANDOM", iae2.getMessage()); + } + private RoutingTable buildRoutingTable(Metadata metadata) { RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); for (Map.Entry entry : metadata.getIndices().entrySet()) { diff --git a/server/src/test/java/org/opensearch/gateway/GatewayAllocatorTests.java b/server/src/test/java/org/opensearch/gateway/GatewayAllocatorTests.java index ebc2e59fa5a30..7a3b5f576449c 100644 --- a/server/src/test/java/org/opensearch/gateway/GatewayAllocatorTests.java +++ b/server/src/test/java/org/opensearch/gateway/GatewayAllocatorTests.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.cluster.ClusterInfo; @@ -53,6 +52,7 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; import static org.opensearch.gateway.ShardsBatchGatewayAllocator.PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING; @@ -437,10 +437,51 @@ public void testCollectTimedOutShardsAndScheduleReroute_Success() throws Interru TestThreadPool threadPool = new TestThreadPool(getTestName()); ClusterService clusterService = ClusterServiceUtils.createClusterService(clusterState, threadPool); final CountDownLatch rerouteLatch = new CountDownLatch(2); + final AtomicBoolean primary = new AtomicBoolean(true); final RerouteService rerouteService = (reason, priority, listener) -> { listener.onResponse(clusterService.state()); assertThat(rerouteLatch.getCount(), greaterThanOrEqualTo(0L)); - assertEquals("reroute after existing shards allocator timed out", reason); + if (primary.get()) { + assertEquals("reroute after existing shards allocator [P] timed out", reason); + } else { + assertEquals("reroute after existing shards allocator [R] timed out", reason); + } + assertEquals(Priority.NORMAL, priority); + rerouteLatch.countDown(); + }; + CountDownLatch timedOutShardsLatch = new CountDownLatch(20); + testShardsBatchGatewayAllocator = new TestShardBatchGatewayAllocator(timedOutShardsLatch, 1000, rerouteService); + testShardsBatchGatewayAllocator.setPrimaryBatchAllocatorTimeout(TimeValue.ZERO); + testShardsBatchGatewayAllocator.setReplicaBatchAllocatorTimeout(TimeValue.ZERO); + testShardsBatchGatewayAllocator.setFollowUpRerouteTaskPriority(Priority.NORMAL); + BatchRunnableExecutor executor = testShardsBatchGatewayAllocator.allocateAllUnassignedShards(testAllocation, primary.get()); + executor.run(); + assertEquals(timedOutShardsLatch.getCount(), 10); + assertEquals(1, rerouteLatch.getCount()); + primary.set(false); + executor = testShardsBatchGatewayAllocator.allocateAllUnassignedShards(testAllocation, primary.get()); + executor.run(); + assertEquals(timedOutShardsLatch.getCount(), 0); + assertEquals(0, rerouteLatch.getCount()); // even with failure it doesn't leak any listeners + final boolean terminated = terminate(threadPool); + assert terminated; + clusterService.close(); + } + + public void testCollectTimedOutShardsAndScheduleRerouteWithHighPriority_Success() throws InterruptedException { + createIndexAndUpdateClusterState(2, 5, 2); + TestThreadPool threadPool = new TestThreadPool(getTestName()); + ClusterService clusterService = ClusterServiceUtils.createClusterService(clusterState, threadPool); + final CountDownLatch rerouteLatch = new CountDownLatch(2); + final AtomicBoolean primary = new AtomicBoolean(true); + final RerouteService rerouteService = (reason, priority, listener) -> { + listener.onResponse(clusterService.state()); + assertThat(rerouteLatch.getCount(), greaterThanOrEqualTo(0L)); + if (primary.get()) { + assertEquals("reroute after existing shards allocator [P] timed out", reason); + } else { + assertEquals("reroute after existing shards allocator [R] timed out", reason); + } assertEquals(Priority.HIGH, priority); rerouteLatch.countDown(); }; @@ -448,11 +489,13 @@ public void testCollectTimedOutShardsAndScheduleReroute_Success() throws Interru testShardsBatchGatewayAllocator = new TestShardBatchGatewayAllocator(timedOutShardsLatch, 1000, rerouteService); testShardsBatchGatewayAllocator.setPrimaryBatchAllocatorTimeout(TimeValue.ZERO); testShardsBatchGatewayAllocator.setReplicaBatchAllocatorTimeout(TimeValue.ZERO); - BatchRunnableExecutor executor = testShardsBatchGatewayAllocator.allocateAllUnassignedShards(testAllocation, true); + testShardsBatchGatewayAllocator.setFollowUpRerouteTaskPriority(Priority.HIGH); + BatchRunnableExecutor executor = testShardsBatchGatewayAllocator.allocateAllUnassignedShards(testAllocation, primary.get()); executor.run(); assertEquals(timedOutShardsLatch.getCount(), 10); assertEquals(1, rerouteLatch.getCount()); - executor = testShardsBatchGatewayAllocator.allocateAllUnassignedShards(testAllocation, false); + primary.set(false); + executor = testShardsBatchGatewayAllocator.allocateAllUnassignedShards(testAllocation, primary.get()); executor.run(); assertEquals(timedOutShardsLatch.getCount(), 0); assertEquals(0, rerouteLatch.getCount()); // even with failure it doesn't leak any listeners @@ -466,22 +509,29 @@ public void testCollectTimedOutShardsAndScheduleReroute_Failure() throws Interru TestThreadPool threadPool = new TestThreadPool(getTestName()); ClusterService clusterService = ClusterServiceUtils.createClusterService(clusterState, threadPool); final CountDownLatch rerouteLatch = new CountDownLatch(2); + final AtomicBoolean primary = new AtomicBoolean(true); final RerouteService rerouteService = (reason, priority, listener) -> { - listener.onFailure(new OpenSearchException("simulated")); + listener.onResponse(clusterService.state()); assertThat(rerouteLatch.getCount(), greaterThanOrEqualTo(0L)); - assertEquals("reroute after existing shards allocator timed out", reason); - assertEquals(Priority.HIGH, priority); + if (primary.get()) { + assertEquals("reroute after existing shards allocator [P] timed out", reason); + } else { + assertEquals("reroute after existing shards allocator [R] timed out", reason); + } + assertEquals(Priority.NORMAL, priority); rerouteLatch.countDown(); }; CountDownLatch timedOutShardsLatch = new CountDownLatch(20); testShardsBatchGatewayAllocator = new TestShardBatchGatewayAllocator(timedOutShardsLatch, 1000, rerouteService); testShardsBatchGatewayAllocator.setPrimaryBatchAllocatorTimeout(TimeValue.ZERO); testShardsBatchGatewayAllocator.setReplicaBatchAllocatorTimeout(TimeValue.ZERO); - BatchRunnableExecutor executor = testShardsBatchGatewayAllocator.allocateAllUnassignedShards(testAllocation, true); + testShardsBatchGatewayAllocator.setFollowUpRerouteTaskPriority(Priority.NORMAL); + BatchRunnableExecutor executor = testShardsBatchGatewayAllocator.allocateAllUnassignedShards(testAllocation, primary.get()); executor.run(); assertEquals(timedOutShardsLatch.getCount(), 10); assertEquals(1, rerouteLatch.getCount()); - executor = testShardsBatchGatewayAllocator.allocateAllUnassignedShards(testAllocation, false); + primary.set(false); + executor = testShardsBatchGatewayAllocator.allocateAllUnassignedShards(testAllocation, primary.get()); executor.run(); assertEquals(timedOutShardsLatch.getCount(), 0); assertEquals(0, rerouteLatch.getCount()); // even with failure it doesn't leak any listeners @@ -490,6 +540,35 @@ public void testCollectTimedOutShardsAndScheduleReroute_Failure() throws Interru clusterService.close(); } + public void testFollowupPriorityValues() { + String settingKey = "cluster.routing.allocation.shards_batch_gateway_allocator.schedule_reroute.priority"; + Settings build = Settings.builder().put(settingKey, "normal").build(); + assertEquals(Priority.NORMAL, ShardsBatchGatewayAllocator.FOLLOW_UP_REROUTE_PRIORITY_SETTING.get(build)); + + build = Settings.builder().put(settingKey, "high").build(); + assertEquals(Priority.HIGH, ShardsBatchGatewayAllocator.FOLLOW_UP_REROUTE_PRIORITY_SETTING.get(build)); + + build = Settings.builder().put(settingKey, "urgent").build(); + assertEquals(Priority.URGENT, ShardsBatchGatewayAllocator.FOLLOW_UP_REROUTE_PRIORITY_SETTING.get(build)); + + Settings wrongPriority = Settings.builder().put(settingKey, "immediate").build(); + IllegalArgumentException iae = expectThrows( + IllegalArgumentException.class, + () -> ShardsBatchGatewayAllocator.FOLLOW_UP_REROUTE_PRIORITY_SETTING.get(wrongPriority) + ); + assertEquals( + "priority [IMMEDIATE] not supported for [" + ShardsBatchGatewayAllocator.FOLLOW_UP_REROUTE_PRIORITY_SETTING.getKey() + "]", + iae.getMessage() + ); + + Settings wrongPriority2 = Settings.builder().put(settingKey, "random").build(); + IllegalArgumentException iae2 = expectThrows( + IllegalArgumentException.class, + () -> ShardsBatchGatewayAllocator.FOLLOW_UP_REROUTE_PRIORITY_SETTING.get(wrongPriority2) + ); + assertEquals("No enum constant org.opensearch.common.Priority.RANDOM", iae2.getMessage()); + } + private void createIndexAndUpdateClusterState(int count, int numberOfShards, int numberOfReplicas) { if (count == 0) return; Metadata.Builder metadata = Metadata.builder(); From e3d3a17dd052c363d7b85565f8ba7834962af659 Mon Sep 17 00:00:00 2001 From: Varun Bharadwaj Date: Tue, 18 Mar 2025 12:58:29 -0700 Subject: [PATCH 082/550] Support custom kafka consumer configs (#17594) Signed-off-by: Varun Bharadwaj --- .../plugin/kafka/KafkaIngestionBaseIT.java | 5 ++++- .../plugin/kafka/RemoteStoreKafkaIT.java | 1 - .../plugin/kafka/KafkaPartitionConsumer.java | 8 ++++---- .../plugin/kafka/KafkaSourceConfig.java | 15 +++++++++++++-- .../plugin/kafka/KafkaSourceConfigTests.java | 6 +++++- 5 files changed, 26 insertions(+), 9 deletions(-) diff --git a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/KafkaIngestionBaseIT.java b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/KafkaIngestionBaseIT.java index e3250fc806679..cfc9b4a766fcd 100644 --- a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/KafkaIngestionBaseIT.java +++ b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/KafkaIngestionBaseIT.java @@ -34,7 +34,7 @@ import org.testcontainers.utility.DockerImageName; /** - * Base test class for Kafka ingestion tests + * Base test class for Kafka ingestion tests. */ @ThreadLeakFilters(filters = TestContainerThreadLeakFilter.class) public class KafkaIngestionBaseIT extends OpenSearchIntegTestCase { @@ -135,6 +135,9 @@ protected void createIndexWithDefaultSettings(int numShards, int numReplicas) { .put("ingestion_source.param.topic", topicName) .put("ingestion_source.param.bootstrap_servers", kafka.getBootstrapServers()) .put("index.replication.type", "SEGMENT") + // set custom kafka consumer properties + .put("ingestion_source.param.fetch.min.bytes", 30000) + .put("ingestion_source.param.enable.auto.commit", false) .build(), "{\"properties\":{\"name\":{\"type\": \"text\"},\"age\":{\"type\": \"integer\"}}}}" ); diff --git a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/RemoteStoreKafkaIT.java b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/RemoteStoreKafkaIT.java index ecd7b72bc8349..ad4bc2bf77071 100644 --- a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/RemoteStoreKafkaIT.java +++ b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/RemoteStoreKafkaIT.java @@ -48,7 +48,6 @@ public void testSegmentReplicationWithRemoteStore() throws Exception { internalCluster().startClusterManagerOnlyNode(); final String nodeA = internalCluster().startDataOnlyNode(); createIndexWithDefaultSettings(1, 1); - ensureYellowAndNoInitializingShards(indexName); final String nodeB = internalCluster().startDataOnlyNode(); ensureGreen(indexName); diff --git a/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaPartitionConsumer.java b/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaPartitionConsumer.java index c749a887a2ccb..15f20af6d6275 100644 --- a/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaPartitionConsumer.java +++ b/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaPartitionConsumer.java @@ -9,7 +9,6 @@ package org.opensearch.plugin.kafka; import org.apache.kafka.clients.consumer.Consumer; -import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.KafkaConsumer; @@ -99,9 +98,10 @@ protected static Consumer createConsumer(String clientId, KafkaS Properties consumerProp = new Properties(); consumerProp.put("bootstrap.servers", config.getBootstrapServers()); consumerProp.put("client.id", clientId); - if (config.getAutoOffsetResetConfig() != null && !config.getAutoOffsetResetConfig().isEmpty()) { - consumerProp.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, config.getAutoOffsetResetConfig()); - } + + logger.info("Kafka consumer properties for topic {}: {}", config.getTopic(), config.getConsumerConfigurations()); + consumerProp.putAll(config.getConsumerConfigurations()); + // TODO: why Class org.apache.kafka.common.serialization.StringDeserializer could not be found if set the deserializer as prop? // consumerProp.put("key.deserializer", // "org.apache.kafka.common.serialization.StringDeserializer"); diff --git a/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaSourceConfig.java b/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaSourceConfig.java index cbb8530963ec8..6eae3093881f2 100644 --- a/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaSourceConfig.java +++ b/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaSourceConfig.java @@ -10,6 +10,7 @@ import org.opensearch.core.util.ConfigurationUtils; +import java.util.HashMap; import java.util.Map; /** @@ -18,21 +19,27 @@ public class KafkaSourceConfig { private final String PROP_TOPIC = "topic"; private final String PROP_BOOTSTRAP_SERVERS = "bootstrap_servers"; - // TODO: support pass any generic kafka configs private final String PROP_AUTO_OFFSET_RESET = "auto.offset.reset"; private final String topic; private final String bootstrapServers; private final String autoOffsetResetConfig; + private final Map consumerConfigsMap; + /** - * Constructor + * Extracts and look for required and optional kafka consumer configurations. * @param params the configuration parameters */ public KafkaSourceConfig(Map params) { this.topic = ConfigurationUtils.readStringProperty(params, PROP_TOPIC); this.bootstrapServers = ConfigurationUtils.readStringProperty(params, PROP_BOOTSTRAP_SERVERS); this.autoOffsetResetConfig = ConfigurationUtils.readOptionalStringProperty(params, PROP_AUTO_OFFSET_RESET); + this.consumerConfigsMap = new HashMap<>(params); + + // remove above configurations + consumerConfigsMap.remove(PROP_TOPIC); + consumerConfigsMap.remove(PROP_BOOTSTRAP_SERVERS); } /** @@ -60,4 +67,8 @@ public String getBootstrapServers() { public String getAutoOffsetResetConfig() { return autoOffsetResetConfig; } + + public Map getConsumerConfigurations() { + return consumerConfigsMap; + } } diff --git a/plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaSourceConfigTests.java b/plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaSourceConfigTests.java index aa4ddb94f23fc..2e554b7c80b79 100644 --- a/plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaSourceConfigTests.java +++ b/plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaSourceConfigTests.java @@ -16,10 +16,12 @@ public class KafkaSourceConfigTests extends OpenSearchTestCase { - public void testConstructorAndGetters() { + public void testKafkaSourceConfig() { Map params = new HashMap<>(); params.put("topic", "topic"); params.put("bootstrap_servers", "bootstrap"); + params.put("fetch.min.bytes", 30000); + params.put("enable.auto.commit", false); KafkaSourceConfig config = new KafkaSourceConfig(params); @@ -29,5 +31,7 @@ public void testConstructorAndGetters() { "bootstrap", config.getBootstrapServers() ); + Assert.assertEquals("Incorrect fetch.min.bytes", 30000, config.getConsumerConfigurations().get("fetch.min.bytes")); + Assert.assertEquals("Incorrect enable.auto.commit", false, config.getConsumerConfigurations().get("enable.auto.commit")); } } From dcad6b8ff5e2f356d71019810ce7ced3212f8769 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 Mar 2025 16:42:39 -0400 Subject: [PATCH 083/550] Bump org.jruby.joni:joni from 2.2.3 to 2.2.5 in /libs/grok (#17608) * Bump org.jruby.joni:joni from 2.2.3 to 2.2.5 in /libs/grok Bumps [org.jruby.joni:joni](https://github.com/jruby/joni) from 2.2.3 to 2.2.5. - [Commits](https://github.com/jruby/joni/compare/joni-2.2.3...joni-2.2.5) --- updated-dependencies: - dependency-name: org.jruby.joni:joni dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- CHANGELOG.md | 1 + libs/grok/build.gradle | 2 +- libs/grok/licenses/joni-2.2.3.jar.sha1 | 1 - libs/grok/licenses/joni-2.2.5.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 libs/grok/licenses/joni-2.2.3.jar.sha1 create mode 100644 libs/grok/licenses/joni-2.2.5.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 0d30e975aa497..6f855d6bf1327 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Dependencies - Bump `ch.qos.logback:logback-core` from 1.5.16 to 1.5.17 ([#17609](https://github.com/opensearch-project/OpenSearch/pull/17609)) +- Bump `org.jruby.joni:joni` from 2.2.3 to 2.2.5 ([#17608](https://github.com/opensearch-project/OpenSearch/pull/17608)) ### Changed diff --git a/libs/grok/build.gradle b/libs/grok/build.gradle index 3bcbb59aece56..97c68177a1164 100644 --- a/libs/grok/build.gradle +++ b/libs/grok/build.gradle @@ -29,7 +29,7 @@ */ dependencies { - api 'org.jruby.joni:joni:2.2.3' + api 'org.jruby.joni:joni:2.2.5' // joni dependencies: api 'org.jruby.jcodings:jcodings:1.0.63' diff --git a/libs/grok/licenses/joni-2.2.3.jar.sha1 b/libs/grok/licenses/joni-2.2.3.jar.sha1 deleted file mode 100644 index fcc7a23f0d5aa..0000000000000 --- a/libs/grok/licenses/joni-2.2.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a26324720f0053e151c01fe4f407d0a84bafb2dc \ No newline at end of file diff --git a/libs/grok/licenses/joni-2.2.5.jar.sha1 b/libs/grok/licenses/joni-2.2.5.jar.sha1 new file mode 100644 index 0000000000000..060581a9b8a28 --- /dev/null +++ b/libs/grok/licenses/joni-2.2.5.jar.sha1 @@ -0,0 +1 @@ +4ebafe67efa7395678a34d07e7585bed5ef0cc72 \ No newline at end of file From 14d740fa1fdfa33885c3636ffd5e4db3912180b6 Mon Sep 17 00:00:00 2001 From: Vinay Krishna Pudyodu Date: Tue, 18 Mar 2025 16:13:23 -0700 Subject: [PATCH 084/550] Introduce a new search node role to hold search only shards (#17620) * Introduce a new search node role Signed-off-by: Vinay Krishna Pudyodu * Add changelog Signed-off-by: Vinay Krishna Pudyodu * fixed PR comments Signed-off-by: Vinay Krishna Pudyodu * rename search to warm in FsProbe test Signed-off-by: Vinay Krishna Pudyodu * fixed ClusterStatsIT tests Signed-off-by: Vinay Krishna Pudyodu * added ClusterStatsIT test for search node role Signed-off-by: Vinay Krishna Pudyodu --------- Signed-off-by: Vinay Krishna Pudyodu --- CHANGELOG.md | 1 + .../admin/cluster/stats/ClusterStatsIT.java | 39 +++++++++++++++---- .../cluster/node/DiscoveryNode.java | 9 +++++ .../cluster/node/DiscoveryNodeRole.java | 30 +++++++++++++- .../cluster/node/DiscoveryNodeTests.java | 18 +++++++++ .../opensearch/monitor/fs/FsProbeTests.java | 4 +- .../node/NodeRoleSettingsTests.java | 9 +++++ .../opensearch/test/InternalTestCluster.java | 17 ++++++++ .../java/org/opensearch/test/NodeRoles.java | 15 +++++++ 9 files changed, 132 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6f855d6bf1327..5a58078644e6a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Added - Change priority for scheduling reroute during timeout([#16445](https://github.com/opensearch-project/OpenSearch/pull/16445)) - Renaming the node role search to warm ([#17573](https://github.com/opensearch-project/OpenSearch/pull/17573)) +- Introduce a new search node role to hold search only shards ([#17620](https://github.com/opensearch-project/OpenSearch/pull/17620)) ### Dependencies - Bump `ch.qos.logback:logback-core` from 1.5.16 to 1.5.17 ([#17609](https://github.com/opensearch-project/OpenSearch/pull/17609)) diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java index 4dd5e7b74c96d..e3e151fdc5403 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java @@ -90,7 +90,7 @@ private void waitForNodes(int numNodes) { public void testNodeCounts() { int total = 1; internalCluster().startNode(); - Map expectedCounts = getExpectedCounts(1, 1, 1, 1, 1, 0, 0); + Map expectedCounts = getExpectedCounts(1, 1, 1, 1, 1, 0, 0, 0); int numNodes = randomIntBetween(1, 5); ClusterStatsResponse response = client().admin() @@ -159,7 +159,7 @@ public void testNodeCountsWithDeprecatedMasterRole() throws ExecutionException, internalCluster().startNode(settings); waitForNodes(total); - Map expectedCounts = getExpectedCounts(0, 1, 1, 0, 0, 0, 0); + Map expectedCounts = getExpectedCounts(0, 1, 1, 0, 0, 0, 0, 0); Client client = client(); ClusterStatsResponse response = client.admin() @@ -484,7 +484,7 @@ public void testNodeRolesWithMasterLegacySettings() throws ExecutionException, I internalCluster().startNodes(legacyMasterSettings); waitForNodes(total); - Map expectedCounts = getExpectedCounts(0, 1, 1, 0, 1, 0, 0); + Map expectedCounts = getExpectedCounts(0, 1, 1, 0, 1, 0, 0, 0); Client client = client(); ClusterStatsResponse clusterStatsResponse = client.admin() @@ -518,7 +518,7 @@ public void testNodeRolesWithClusterManagerRole() throws ExecutionException, Int internalCluster().startNodes(clusterManagerNodeRoleSettings); waitForNodes(total); - Map expectedCounts = getExpectedCounts(0, 1, 1, 0, 1, 0, 0); + Map expectedCounts = getExpectedCounts(0, 1, 1, 0, 1, 0, 0, 0); Client client = client(); ClusterStatsResponse clusterStatsResponse = client.admin() @@ -546,7 +546,7 @@ public void testNodeRolesWithSeedDataNodeLegacySettings() throws ExecutionExcept internalCluster().startNodes(legacySeedDataNodeSettings); waitForNodes(total); - Map expectedRoleCounts = getExpectedCounts(1, 1, 1, 0, 1, 0, 0); + Map expectedRoleCounts = getExpectedCounts(1, 1, 1, 0, 1, 0, 0, 0); Client client = client(); ClusterStatsResponse clusterStatsResponse = client.admin() @@ -577,7 +577,7 @@ public void testNodeRolesWithDataNodeLegacySettings() throws ExecutionException, internalCluster().startNodes(legacyDataNodeSettings); waitForNodes(total); - Map expectedRoleCounts = getExpectedCounts(1, 1, 1, 0, 1, 0, 0); + Map expectedRoleCounts = getExpectedCounts(1, 1, 1, 0, 1, 0, 0, 0); Client client = client(); ClusterStatsResponse clusterStatsResponse = client.admin() @@ -594,6 +594,29 @@ public void testNodeRolesWithDataNodeLegacySettings() throws ExecutionException, assertEquals(expectedNodesRoles, Set.of(getNodeRoles(client, 0), getNodeRoles(client, 1))); } + public void testNodeRolesWithSearchNode() throws ExecutionException, InterruptedException { + int total = 2; + internalCluster().startClusterManagerOnlyNodes(1); + internalCluster().startSearchOnlyNode(); + waitForNodes(total); + + Map expectedRoleCounts = getExpectedCounts(0, 1, 1, 0, 0, 0, 1, 0); + + Client client = client(); + ClusterStatsResponse clusterStatsResponse = client.admin() + .cluster() + .prepareClusterStats() + .useAggregatedNodeLevelResponses(randomBoolean()) + .get(); + assertCounts(clusterStatsResponse.getNodesStats().getCounts(), total, expectedRoleCounts); + + Set> expectedNodesRoles = Set.of( + Set.of(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE.roleName()), + Set.of(DiscoveryNodeRole.SEARCH_ROLE.roleName()) + ); + assertEquals(expectedNodesRoles, Set.of(getNodeRoles(client, 0), getNodeRoles(client, 1))); + } + public void testClusterStatsWithNodeMetricsFilter() { internalCluster().startNode(); ensureGreen(); @@ -887,6 +910,7 @@ private Map getExpectedCounts( int clusterManagerRoleCount, int ingestRoleCount, int remoteClusterClientRoleCount, + int warmRoleCount, int searchRoleCount, int coordinatingOnlyCount ) { @@ -896,7 +920,8 @@ private Map getExpectedCounts( expectedCounts.put(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE.roleName(), clusterManagerRoleCount); expectedCounts.put(DiscoveryNodeRole.INGEST_ROLE.roleName(), ingestRoleCount); expectedCounts.put(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE.roleName(), remoteClusterClientRoleCount); - expectedCounts.put(DiscoveryNodeRole.WARM_ROLE.roleName(), searchRoleCount); + expectedCounts.put(DiscoveryNodeRole.WARM_ROLE.roleName(), warmRoleCount); + expectedCounts.put(DiscoveryNodeRole.SEARCH_ROLE.roleName(), searchRoleCount); expectedCounts.put(ClusterStatsNodes.Counts.COORDINATING_ONLY, coordinatingOnlyCount); return expectedCounts; } diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java index 9869bfd3fd4e6..eceb076dfc926 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java @@ -488,6 +488,15 @@ public boolean isWarmNode() { return roles.contains(DiscoveryNodeRole.WARM_ROLE); } + /** + * Returns whether the node is dedicated to host search replicas. + * + * @return true if the node contains a search role, false otherwise + */ + public boolean isSearchNode() { + return roles.contains(DiscoveryNodeRole.SEARCH_ROLE); + } + /** * Returns whether the node is a remote store node. * diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java index 90aa4b6f389d1..62fb93eb4c41b 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java @@ -310,11 +310,39 @@ public Setting legacySetting() { }; + /** + * Represents the role for a search node, which is dedicated to host search replicas. + */ + public static final DiscoveryNodeRole SEARCH_ROLE = new DiscoveryNodeRole("search", "s", true) { + + @Override + public Setting legacySetting() { + // search role is added in 2.4 so doesn't need to configure legacy setting + return null; + } + + @Override + public void validateRole(List roles) { + for (DiscoveryNodeRole role : roles) { + if (role.equals(DiscoveryNodeRole.SEARCH_ROLE) == false) { + throw new IllegalArgumentException( + String.format( + Locale.ROOT, + "%s role cannot be combined with any other role on a node.", + DiscoveryNodeRole.SEARCH_ROLE.roleName() + ) + ); + } + } + } + + }; + /** * The built-in node roles. */ public static SortedSet BUILT_IN_ROLES = Collections.unmodifiableSortedSet( - new TreeSet<>(Arrays.asList(DATA_ROLE, INGEST_ROLE, CLUSTER_MANAGER_ROLE, REMOTE_CLUSTER_CLIENT_ROLE, WARM_ROLE)) + new TreeSet<>(Arrays.asList(DATA_ROLE, INGEST_ROLE, CLUSTER_MANAGER_ROLE, REMOTE_CLUSTER_CLIENT_ROLE, WARM_ROLE, SEARCH_ROLE)) ); /** diff --git a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java index 40fcb648bea7a..ded32e9b36cb4 100644 --- a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java +++ b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java @@ -242,6 +242,14 @@ public void testDiscoveryNodeIsWarmUnset() { runTestDiscoveryNodeIsWarm(nonWarmNode(), false); } + public void testDiscoveryNodeIsSearchSet() { + runTestDiscoveryNodeIsSearch(NodeRoles.searchOnlyNode(), true); + } + + public void testDiscoveryNodeIsSearchUnset() { + runTestDiscoveryNodeIsSearch(NodeRoles.nonSearchNode(), false); + } + // Added in 2.0 temporarily, validate the MASTER_ROLE is in the list of known roles. // MASTER_ROLE was removed from BUILT_IN_ROLES and is imported by setDeprecatedMasterRole(), // as a workaround for making the new CLUSTER_MANAGER_ROLE has got the same abbreviation 'm'. @@ -271,6 +279,16 @@ private void runTestDiscoveryNodeIsWarm(final Settings settings, final boolean e } } + private void runTestDiscoveryNodeIsSearch(final Settings settings, final boolean expected) { + final DiscoveryNode node = DiscoveryNode.createLocal(settings, new TransportAddress(TransportAddress.META_ADDRESS, 9200), "node"); + assertThat(node.isSearchNode(), equalTo(expected)); + if (expected) { + assertThat(node.getRoles(), hasItem(DiscoveryNodeRole.SEARCH_ROLE)); + } else { + assertThat(node.getRoles(), not(hasItem(DiscoveryNodeRole.SEARCH_ROLE))); + } + } + public void testGetRoleFromRoleNameIsCaseInsensitive() { String dataRoleName = "DATA"; DiscoveryNodeRole dataNodeRole = DiscoveryNode.getRoleFromRoleName(dataRoleName); diff --git a/server/src/test/java/org/opensearch/monitor/fs/FsProbeTests.java b/server/src/test/java/org/opensearch/monitor/fs/FsProbeTests.java index e2e09d5ce63fe..c9ac3a8996f58 100644 --- a/server/src/test/java/org/opensearch/monitor/fs/FsProbeTests.java +++ b/server/src/test/java/org/opensearch/monitor/fs/FsProbeTests.java @@ -126,7 +126,7 @@ public void testFsInfo() throws IOException { } public void testFsCacheInfo() throws IOException { - Settings settings = Settings.builder().put("node.roles", "search").build(); + Settings settings = Settings.builder().put("node.roles", "warm").build(); try (NodeEnvironment env = newNodeEnvironment(settings)) { ByteSizeValue gbByteSizeValue = new ByteSizeValue(1, ByteSizeUnit.GB); env.fileCacheNodePath().fileCacheReservedSize = gbByteSizeValue; @@ -164,7 +164,7 @@ public void testFsCacheInfo() throws IOException { } public void testFsInfoWhenFileCacheOccupied() throws IOException { - Settings settings = Settings.builder().putList("node.roles", "search", "data").build(); + Settings settings = Settings.builder().putList("node.roles", "warm", "data").build(); try (NodeEnvironment env = newNodeEnvironment(settings)) { // Use the total space as reserved space to simulate the situation where the cache space is occupied final long totalSpace = adjustForHugeFilesystems(env.fileCacheNodePath().fileStore.getTotalSpace()); diff --git a/server/src/test/java/org/opensearch/node/NodeRoleSettingsTests.java b/server/src/test/java/org/opensearch/node/NodeRoleSettingsTests.java index b2bb6897fe164..e3e26739730a4 100644 --- a/server/src/test/java/org/opensearch/node/NodeRoleSettingsTests.java +++ b/server/src/test/java/org/opensearch/node/NodeRoleSettingsTests.java @@ -44,6 +44,15 @@ public void testClusterManagerAndDataNodeRoles() { ); } + /** + * Validate search role cannot coexist with any other role on a node. + */ + public void testSearchRoleCannotCoExistWithAnyOtherRole() { + Settings roleSettings = Settings.builder().put(NodeRoleSettings.NODE_ROLES_SETTING.getKey(), "search, test_role").build(); + Exception exception = expectThrows(IllegalArgumentException.class, () -> NodeRoleSettings.NODE_ROLES_SETTING.get(roleSettings)); + assertThat(exception.getMessage(), containsString("search role cannot be combined with any other role on a node.")); + } + /** * Validate setting master role will result a deprecation message. * Remove the test after removing MASTER_ROLE. diff --git a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java index ed6f1404e4fd2..241a919304fec 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java @@ -166,6 +166,7 @@ import static org.opensearch.test.NodeRoles.onlyRole; import static org.opensearch.test.NodeRoles.onlyRoles; import static org.opensearch.test.NodeRoles.removeRoles; +import static org.opensearch.test.NodeRoles.searchOnlyNode; import static org.opensearch.test.OpenSearchTestCase.assertBusy; import static org.opensearch.test.OpenSearchTestCase.randomBoolean; import static org.opensearch.test.OpenSearchTestCase.randomFrom; @@ -2318,6 +2319,22 @@ public List startDataAndWarmNodes(int numNodes, Settings settings) { return startNodes(numNodes, Settings.builder().put(onlyRoles(settings, warmAndDataRoles)).build()); } + public List startSearchOnlyNodes(int numNodes) { + return startSearchOnlyNodes(numNodes, Settings.EMPTY); + } + + public List startSearchOnlyNodes(int numNodes, Settings settings) { + return startNodes(numNodes, Settings.builder().put(searchOnlyNode(settings)).build()); + } + + public String startSearchOnlyNode() { + return startSearchOnlyNode(Settings.EMPTY); + } + + public String startSearchOnlyNode(Settings settings) { + return startNode(Settings.builder().put(settings).put(searchOnlyNode(settings)).build()); + } + public List startDataOnlyNodes(int numNodes) { return startDataOnlyNodes(numNodes, Settings.EMPTY); } diff --git a/test/framework/src/main/java/org/opensearch/test/NodeRoles.java b/test/framework/src/main/java/org/opensearch/test/NodeRoles.java index 9c944e4aee62e..4766db875d3fb 100644 --- a/test/framework/src/main/java/org/opensearch/test/NodeRoles.java +++ b/test/framework/src/main/java/org/opensearch/test/NodeRoles.java @@ -224,4 +224,19 @@ public static Settings nonWarmNode(final Settings settings) { return removeRoles(settings, Collections.singleton(DiscoveryNodeRole.WARM_ROLE)); } + public static Settings searchOnlyNode() { + return searchOnlyNode(Settings.EMPTY); + } + + public static Settings searchOnlyNode(final Settings settings) { + return onlyRole(settings, DiscoveryNodeRole.SEARCH_ROLE); + } + + public static Settings nonSearchNode() { + return nonSearchNode(Settings.EMPTY); + } + + public static Settings nonSearchNode(final Settings settings) { + return removeRoles(settings, Collections.singleton(DiscoveryNodeRole.SEARCH_ROLE)); + } } From d9a927464ec62634d520bc32bbd34104c4f4c097 Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Tue, 18 Mar 2025 16:29:28 -0700 Subject: [PATCH 085/550] Update changelog for new branching strategy (#17618) * Update changelog for new branching strategy The two file approach is no longer needed now that we are not maintaining the main branch as the next major version. This updates the changelog and the verifier workflow accordingly. Signed-off-by: Andrew Ross * Remove documentation about two changelog files Signed-off-by: Andrew Ross * Update changelog_verifier.yml --------- Signed-off-by: Andrew Ross Co-authored-by: Peter Zhu --- .github/workflows/changelog_verifier.yml | 37 ------------------------ CHANGELOG-3.0.md | 21 -------------- CHANGELOG.md | 4 +-- CONTRIBUTING.md | 9 ------ 4 files changed, 2 insertions(+), 69 deletions(-) delete mode 100644 CHANGELOG-3.0.md diff --git a/.github/workflows/changelog_verifier.yml b/.github/workflows/changelog_verifier.yml index cd0415119282c..7bd28e1a9e18f 100644 --- a/.github/workflows/changelog_verifier.yml +++ b/.github/workflows/changelog_verifier.yml @@ -14,42 +14,5 @@ jobs: token: ${{ secrets.GITHUB_TOKEN }} ref: ${{ github.event.pull_request.head.sha }} - uses: dangoslen/changelog-enforcer@v3 - id: verify-changelog-3x with: skipLabels: "autocut, skip-changelog" - changeLogPath: 'CHANGELOG-3.0.md' - continue-on-error: true - - uses: dangoslen/changelog-enforcer@v3 - id: verify-changelog - with: - skipLabels: "autocut, skip-changelog" - changeLogPath: 'CHANGELOG.md' - continue-on-error: true - - run: | - # The check was possibly skipped leading to success for both the jobs - has_backport_label=${{ contains(join(github.event.pull_request.labels.*.name, ', '), 'backport')}} - has_breaking_label=${{ contains(join(github.event.pull_request.labels.*.name, ', '), '>breaking')}} - if [[ $has_breaking_label == true && $has_backport_label == true ]]; then - echo "error: Please make sure that the PR does not have a backport label associated with it when making breaking changes" - exit 1 - fi - - if [[ ${{ steps.verify-changelog-3x.outcome }} == 'success' && ${{ steps.verify-changelog.outcome }} == 'success' ]]; then - exit 0 - fi - - if [[ ${{ steps.verify-changelog-3x.outcome }} == 'failure' && ${{ steps.verify-changelog.outcome }} == 'failure' ]]; then - echo "error: Please ensure a changelog entry exists in CHANGELOG.md or CHANGELOG-3.0.md" - exit 1 - fi - - # Concatenates the labels and checks if the string contains "backport" - if [[ ${{ steps.verify-changelog.outcome }} == 'success' && $has_backport_label == false ]]; then - echo "error: Please make sure that the PR has a backport label associated with it when making an entry to the CHANGELOG.md file" - exit 1 - fi - - if [[ ${{ steps.verify-changelog-3x.outcome }} == 'success' && $has_backport_label == true ]]; then - echo "error: Please make sure that the PR does not have a backport label associated with it when making an entry to the CHANGELOG-3.0.md file" - exit 1 - fi diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md deleted file mode 100644 index f301c4669de96..0000000000000 --- a/CHANGELOG-3.0.md +++ /dev/null @@ -1,21 +0,0 @@ -# CHANGELOG -All notable changes to this project are documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). See the [CONTRIBUTING guide](./CONTRIBUTING.md#Changelog) for instructions on how to add changelog entries. - -## [Unreleased 3.0] -### Added - -### Dependencies - -### Changed - -### Deprecated - -### Removed - -### Fixed - -### Security - -[Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD diff --git a/CHANGELOG.md b/CHANGELOG.md index 5a58078644e6a..ee6464aa15c03 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,7 +3,7 @@ All notable changes to this project are documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). See the [CONTRIBUTING guide](./CONTRIBUTING.md#Changelog) for instructions on how to add changelog entries. -## [Unreleased 2.x] +## [Unreleased 3.x] ### Added - Change priority for scheduling reroute during timeout([#16445](https://github.com/opensearch-project/OpenSearch/pull/16445)) - Renaming the node role search to warm ([#17573](https://github.com/opensearch-project/OpenSearch/pull/17573)) @@ -23,4 +23,4 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Security -[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.19...2.x +[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/f58d846f...main diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 0ec0abe535dd0..56b6da1f989c3 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -146,15 +146,6 @@ Adding in the change is two step process: 1. Add your changes to the corresponding section within the CHANGELOG file with dummy pull request information, publish the PR 2. Update the entry for your change in [`CHANGELOG.md`](CHANGELOG.md) and make sure that you reference the pull request there. -### Where should I put my CHANGELOG entry? -Please review the [branching strategy](https://github.com/opensearch-project/.github/blob/main/RELEASING.md#opensearch-branching) document. The changelog on the `main` branch will contain **two files**: `CHANGELOG.md` which corresponds to unreleased changes intended for the _next minor_ release and `CHANGELOG-3.0.md` which correspond to unreleased changes intended for the _next major_ release. Your entry should go into file corresponding to the version it is intended to be released in. In practice, most changes to `main` will be backported to the next minor release so most entries will be in the `CHANGELOG.md` file. - -The following examples assume the _next major_ release on main is 3.0, then _next minor_ release is 2.5, and the _current_ release is 2.4. - -- **Add a new feature to release in next minor:** Add a changelog entry to `[Unreleased 2.x]` in CHANGELOG.md on main, then backport to 2.x (including the changelog entry). -- **Introduce a breaking API change to release in next major:** Add a changelog entry to `[Unreleased 3.0]` to CHANGELOG-3.0.md on main, do not backport. -- **Upgrade a dependency to fix a CVE:** Add a changelog entry to `[Unreleased 2.x]` on main, then backport to 2.x (including the changelog entry), then backport to 2.4 and ensure the changelog entry is added to `[Unreleased 2.4.1]`. - ## Review Process We deeply appreciate everyone who takes the time to make a contribution. We will review all contributions as quickly as possible. As a reminder, [opening an issue](https://github.com/opensearch-project/OpenSearch/issues/new/choose) discussing your change before you make it is the best way to smooth the PR process. This will prevent a rejection because someone else is already working on the problem, or because the solution is incompatible with the architectural direction. From 007600e5069d022b9c2a0203d86d99aaf8e7e056 Mon Sep 17 00:00:00 2001 From: Iwan Igonin <83668556+beanuwave@users.noreply.github.com> Date: Wed, 19 Mar 2025 18:31:04 +0100 Subject: [PATCH 086/550] Fix flakiness of RemoteRestoreSnapshotIT.testClusterManagerFailoverDuringSnapshotCreation (#17589) Signed-off-by: Igonin Co-authored-by: Igonin Co-authored-by: Benny Goerzig Co-authored-by: Karsten Schnitter Co-authored-by: Kai Sternad --- .../remotestore/RemoteRestoreSnapshotIT.java | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java index 9315adf8bcd41..7b0dc19a92807 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java @@ -68,7 +68,9 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -1448,9 +1450,8 @@ public void testClusterManagerFailoverDuringSnapshotCreation() throws Exception ensureStableCluster(4, internalCluster().getClusterManagerName()); - final SnapshotInfo[] snapshotInfo = new SnapshotInfo[1]; - final Boolean[] snapshotFailed = new Boolean[1]; - snapshotFailed[0] = false; + final AtomicReference snapshotInfoRef = new AtomicReference<>(); + final AtomicBoolean snapshotFailed = new AtomicBoolean(false); Thread snapshotThread = new Thread(() -> { try { // Start snapshot creation @@ -1459,10 +1460,10 @@ public void testClusterManagerFailoverDuringSnapshotCreation() throws Exception .prepareCreateSnapshot(snapshotRepoName, snapshotName1) .setWaitForCompletion(true) .get(); - snapshotInfo[0] = createSnapshotResponse.getSnapshotInfo(); + snapshotInfoRef.set(createSnapshotResponse.getSnapshotInfo()); } catch (Exception e) { - snapshotFailed[0] = true; + snapshotFailed.set(true); } }); snapshotThread.start(); @@ -1482,10 +1483,11 @@ public void testClusterManagerFailoverDuringSnapshotCreation() throws Exception repository.getRepositoryData(repositoryDataPlainActionFuture); RepositoryData repositoryData = repositoryDataPlainActionFuture.get(); - if (snapshotFailed[0]) { - assertFalse(repositoryData.getSnapshotIds().contains(snapshotInfo[0].snapshotId())); + SnapshotInfo snapshotInfo = snapshotInfoRef.get(); + if (snapshotFailed.get()) { + assertTrue(repositoryData.getSnapshotIds().isEmpty()); } else { - assertTrue(repositoryData.getSnapshotIds().contains(snapshotInfo[0].snapshotId())); + assertTrue(repositoryData.getSnapshotIds().contains(snapshotInfo.snapshotId())); } } From d2d5d5f9913b18ad7b7506a881227238ccdcee62 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 19 Mar 2025 14:08:29 -0400 Subject: [PATCH 087/550] Bump dangoslen/dependabot-changelog-helper from 3 to 4 (#17498) * Bump dangoslen/dependabot-changelog-helper from 3 to 4 Bumps [dangoslen/dependabot-changelog-helper](https://github.com/dangoslen/dependabot-changelog-helper) from 3 to 4. - [Release notes](https://github.com/dangoslen/dependabot-changelog-helper/releases) - [Changelog](https://github.com/dangoslen/dependabot-changelog-helper/blob/main/CHANGELOG.md) - [Commits](https://github.com/dangoslen/dependabot-changelog-helper/compare/v3...v4) --- updated-dependencies: - dependency-name: dangoslen/dependabot-changelog-helper dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/dependabot_pr.yml | 2 +- CHANGELOG.md | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/dependabot_pr.yml b/.github/workflows/dependabot_pr.yml index 71ca74050ccfb..5f5f15ca3ef72 100644 --- a/.github/workflows/dependabot_pr.yml +++ b/.github/workflows/dependabot_pr.yml @@ -56,7 +56,7 @@ jobs: commit_options: '--signoff' - name: Update the changelog - uses: dangoslen/dependabot-changelog-helper@v3 + uses: dangoslen/dependabot-changelog-helper@v4 with: version: 'Unreleased 2.x' diff --git a/CHANGELOG.md b/CHANGELOG.md index ee6464aa15c03..f812df3f4ed10 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Dependencies - Bump `ch.qos.logback:logback-core` from 1.5.16 to 1.5.17 ([#17609](https://github.com/opensearch-project/OpenSearch/pull/17609)) - Bump `org.jruby.joni:joni` from 2.2.3 to 2.2.5 ([#17608](https://github.com/opensearch-project/OpenSearch/pull/17608)) +- Bump `dangoslen/dependabot-changelog-helper` from 3 to 4 ([#17498](https://github.com/opensearch-project/OpenSearch/pull/17498)) ### Changed From 73669fe2d47ac4299b4667a37c422ca62e4893d3 Mon Sep 17 00:00:00 2001 From: Peter Zhu Date: Wed, 19 Mar 2025 15:15:32 -0400 Subject: [PATCH 088/550] Update beta1 qualifier for 3.0.0 version (#17621) Signed-off-by: Peter Zhu --- .github/workflows/benchmark-pull-request.yml | 4 ++-- .github/workflows/publish-maven-snapshots.yml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/benchmark-pull-request.yml b/.github/workflows/benchmark-pull-request.yml index 850a3310cbf6c..b1884c6156f08 100644 --- a/.github/workflows/benchmark-pull-request.yml +++ b/.github/workflows/benchmark-pull-request.yml @@ -22,7 +22,7 @@ jobs: echo "PR_NUMBER=${{ github.event.issue.number }}" >> $GITHUB_ENV echo "REPOSITORY=${{ github.event.repository.full_name }}" >> $GITHUB_ENV OPENSEARCH_VERSION=$(awk -F '=' '/^opensearch[[:space:]]*=/ {gsub(/[[:space:]]/, "", $2); print $2}' buildSrc/version.properties) - echo "OPENSEARCH_VERSION=$OPENSEARCH_VERSION-alpha1" >> $GITHUB_ENV + echo "OPENSEARCH_VERSION=$OPENSEARCH_VERSION-beta1" >> $GITHUB_ENV major_version=$(echo $OPENSEARCH_VERSION | cut -d'.' -f1) echo "OPENSEARCH_MAJOR_VERSION=$major_version" >> $GITHUB_ENV echo "USER_TAGS=pull_request_number:${{ github.event.issue.number }},repository:OpenSearch" >> $GITHUB_ENV @@ -147,7 +147,7 @@ jobs: distribution: 'temurin' - name: Build and Assemble OpenSearch from PR run: | - ./gradlew :distribution:archives:linux-tar:assemble -Dbuild.snapshot=false -Dbuild.version_qualifier=alpha1 + ./gradlew :distribution:archives:linux-tar:assemble -Dbuild.snapshot=false -Dbuild.version_qualifier=beta1 - name: Configure AWS credentials uses: aws-actions/configure-aws-credentials@v4 with: diff --git a/.github/workflows/publish-maven-snapshots.yml b/.github/workflows/publish-maven-snapshots.yml index 14642f251f25b..56b9c00338fe3 100644 --- a/.github/workflows/publish-maven-snapshots.yml +++ b/.github/workflows/publish-maven-snapshots.yml @@ -37,4 +37,4 @@ jobs: export SONATYPE_PASSWORD=$(aws secretsmanager get-secret-value --secret-id maven-snapshots-password --query SecretString --output text) echo "::add-mask::$SONATYPE_USERNAME" echo "::add-mask::$SONATYPE_PASSWORD" - ./gradlew publishNebulaPublicationToSnapshotsRepository -Dbuild.version_qualifier=alpha1 + ./gradlew publishNebulaPublicationToSnapshotsRepository -Dbuild.version_qualifier=beta1 From af5835f561ddf383bd6869fa4c6703a3cc2d2433 Mon Sep 17 00:00:00 2001 From: Hrithik Shukla <33272221+hrithikshukla@users.noreply.github.com> Date: Wed, 19 Mar 2025 17:24:24 -0400 Subject: [PATCH 089/550] Fix how bytes are displayed on _cat/recovery (#17598) * Fix how bytes are displayed on _cat/recovery Signed-off-by: Hrithik Shukla * Fix tests Signed-off-by: Hrithik Shukla * Update changelog Signed-off-by: Hrithik Shukla --------- Signed-off-by: Hrithik Shukla Co-authored-by: Hrithik Shukla --- CHANGELOG.md | 1 + .../test/cat.recovery/10_basic.yml | 23 +++++++++++-------- .../action/cat/RestCatRecoveryAction.java | 7 +++--- .../action/cat/RestRecoveryActionTests.java | 7 +++--- 4 files changed, 22 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f812df3f4ed10..37eef849fe59b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Removed ### Fixed +- Fix bytes parameter on `_cat/recovery` ([#17598](https://github.com/opensearch-project/OpenSearch/pull/17598)) ### Security diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.recovery/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.recovery/10_basic.yml index ef1272322e9af..942d90be25bb4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.recovery/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.recovery/10_basic.yml @@ -1,5 +1,8 @@ --- "Test cat recovery output": + - skip: + version: " - 2.99.99" + reason: Output format changed in 3.0 - do: cat.recovery: {} @@ -35,10 +38,10 @@ \d+ \s+ # files_recovered \d+\.\d+% \s+ # files_percent \d+ \s+ # files_total - \d+ \s+ # bytes - \d+ \s+ # bytes_recovered + (\d+|\d+[.]\d+)(kb|b) \s+ # bytes + (\d+|\d+[.]\d+)(kb|b) \s+ # bytes_recovered \d+\.\d+% \s+ # bytes_percent - \d+ \s+ # bytes_total + (\d+|\d+[.]\d+)(kb|b) \s+ # bytes_total -?\d+ \s+ # translog_ops \d+ \s+ # translog_ops_recovered -?\d+\.\d+% # translog_ops_percent @@ -56,7 +59,7 @@ ( \d \s+ # shard ((\S+\s?){1,10})\s+ # source_node - \d+ # bytes + (\d+|\d+[.]\d+)(kb|b) # bytes \n )+ $/ @@ -71,7 +74,7 @@ ( \d \s+ # shard ((\S+\s?){1,10})\s+ # target_node - \d+ # bytes + (\d+|\d+[.]\d+)(kb|b) # bytes \n )+ $/ @@ -79,8 +82,8 @@ --- "Test cat recovery output for closed index": - skip: - version: " - 7.1.99" - reason: closed indices are replicated starting version 7.2.0 + version: " - 2.99.99" + reason: Output format changed in 3.0 - do: indices.create: @@ -122,10 +125,10 @@ \d+ \s+ # files_recovered \d+\.\d+% \s+ # files_percent \d+ \s+ # files_total - \d+ \s+ # bytes - \d+ \s+ # bytes_recovered + (\d+|\d+[.]\d+)(kb|b) \s+ # bytes + (\d+|\d+[.]\d+)(kb|b) \s+ # bytes_recovered \d+\.\d+% \s+ # bytes_percent - \d+ \s+ # bytes_total + (\d+|\d+[.]\d+)(kb|b) \s+ # bytes_total 0 \s+ # translog_ops (always 0 for closed indices) 0 \s+ # translog_ops_recovered (always 0 for closed indices) 100\.0% # translog_ops_percent (always 100.0% for closed indices) diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestCatRecoveryAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestCatRecoveryAction.java index 5266095f9d26e..221cef9d71780 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestCatRecoveryAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestCatRecoveryAction.java @@ -42,6 +42,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentOpenSearchExtension; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestResponse; @@ -196,10 +197,10 @@ public int compare(RecoveryState o1, RecoveryState o2) { t.addCell(state.getIndex().recoveredFileCount()); t.addCell(String.format(Locale.ROOT, "%1.1f%%", state.getIndex().recoveredFilesPercent())); t.addCell(state.getIndex().totalFileCount()); - t.addCell(state.getIndex().totalRecoverBytes()); - t.addCell(state.getIndex().recoveredBytes()); + t.addCell(new ByteSizeValue(state.getIndex().totalRecoverBytes())); + t.addCell(new ByteSizeValue(state.getIndex().recoveredBytes())); t.addCell(String.format(Locale.ROOT, "%1.1f%%", state.getIndex().recoveredBytesPercent())); - t.addCell(state.getIndex().totalBytes()); + t.addCell(new ByteSizeValue(state.getIndex().totalBytes())); t.addCell(state.getTranslog().totalOperations()); t.addCell(state.getTranslog().recoveredOperations()); t.addCell(String.format(Locale.ROOT, "%1.1f%%", state.getTranslog().recoveredPercent())); diff --git a/server/src/test/java/org/opensearch/rest/action/cat/RestRecoveryActionTests.java b/server/src/test/java/org/opensearch/rest/action/cat/RestRecoveryActionTests.java index 20fcac1089bc3..3ceb149a7c755 100644 --- a/server/src/test/java/org/opensearch/rest/action/cat/RestRecoveryActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/cat/RestRecoveryActionTests.java @@ -42,6 +42,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentOpenSearchExtension; import org.opensearch.core.action.support.DefaultShardOperationFailedException; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.recovery.RecoveryState; @@ -186,10 +187,10 @@ public void testRestRecoveryAction() { state.getIndex().recoveredFileCount(), percent(state.getIndex().recoveredFilesPercent()), state.getIndex().totalFileCount(), - state.getIndex().totalRecoverBytes(), - state.getIndex().recoveredBytes(), + new ByteSizeValue(state.getIndex().totalRecoverBytes()), + new ByteSizeValue(state.getIndex().recoveredBytes()), percent(state.getIndex().recoveredBytesPercent()), - state.getIndex().totalBytes(), + new ByteSizeValue(state.getIndex().totalBytes()), state.getTranslog().totalOperations(), state.getTranslog().recoveredOperations(), percent(state.getTranslog().recoveredPercent()) From 47d3655d8e86e44ab8ea4b58079d09fa0439db8c Mon Sep 17 00:00:00 2001 From: John Mazanec Date: Thu, 20 Mar 2025 09:04:08 -0700 Subject: [PATCH 090/550] Add dfs transformation function in XContentMapValues (#17612) * Add transformation function in XContentMapValues Adds a transformation function for XContentMapValues that performs depth first traversal into a map, potentially applying transformations to different values along the way. Main application for the method will be to provide masks that change values in the map without compromising the structure. Signed-off-by: John Mazanec * Switch to stack based Signed-off-by: John Mazanec * Implement buildTransformerTrie without recursion Signed-off-by: John Mazanec * Add inplace transform Signed-off-by: John Mazanec * Fix changelog Signed-off-by: John Mazanec * Add test for shared path Signed-off-by: John Mazanec --------- Signed-off-by: John Mazanec --- CHANGELOG.md | 1 + .../xcontent/support/XContentMapValues.java | 149 ++++++++++++++++++ .../support/XContentMapValuesTests.java | 125 +++++++++++++++ 3 files changed, 275 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 37eef849fe59b..ace34a4e4dc30 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Change priority for scheduling reroute during timeout([#16445](https://github.com/opensearch-project/OpenSearch/pull/16445)) - Renaming the node role search to warm ([#17573](https://github.com/opensearch-project/OpenSearch/pull/17573)) - Introduce a new search node role to hold search only shards ([#17620](https://github.com/opensearch-project/OpenSearch/pull/17620)) +- Add dfs transformation function in XContentMapValues ([#17612](https://github.com/opensearch-project/OpenSearch/pull/17612)) ### Dependencies - Bump `ch.qos.logback:logback-core` from 1.5.16 to 1.5.17 ([#17609](https://github.com/opensearch-project/OpenSearch/pull/17609)) diff --git a/server/src/main/java/org/opensearch/common/xcontent/support/XContentMapValues.java b/server/src/main/java/org/opensearch/common/xcontent/support/XContentMapValues.java index 7240252b51d83..8175c001cdcd5 100644 --- a/server/src/main/java/org/opensearch/common/xcontent/support/XContentMapValues.java +++ b/server/src/main/java/org/opensearch/common/xcontent/support/XContentMapValues.java @@ -43,9 +43,11 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.Strings; +import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.Deque; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -60,6 +62,8 @@ */ public class XContentMapValues { + private static final String TRANSFORMER_TRIE_LEAF_KEY = "$transformer"; + /** * Extracts raw values (string, int, and so on) based on the path provided returning all of them * as a single list. @@ -621,4 +625,149 @@ public static String[] nodeStringArrayValue(Object node) { return Strings.splitStringByCommaToArray(node.toString()); } } + + /** + * Performs a depth first traversal of a map and applies a transformation for each field matched along the way. For + * duplicated paths with transformers (i.e. "test.nested" and "test.nested.field"), only the transformer for + * the shorter path is applied. + * + * @param source Source map to perform transformation on + * @param transformers Map from path to transformer to apply to each path. Each transformer is a function that takes + * the current value and returns a transformed value + * @param inPlace If true, modify the source map directly; if false, create a copy + * @return Map with transformations applied + */ + public static Map transform( + Map source, + Map> transformers, + boolean inPlace + ) { + return transform(transformers, inPlace).apply(source); + } + + /** + * Returns function that performs a depth first traversal of a map and applies a transformation for each field + * matched along the way. For duplicated paths with transformers (i.e. "test.nested" and "test.nested.field"), only + * the transformer for the shorter path is applied. + * + * @param transformers Map from path to transformer to apply to each path. Each transformer is a function that takes + * the current value and returns a transformed value + * @param inPlace If true, modify the source map directly; if false, create a copy + * @return Function that takes a map and returns a transformed version of the map + */ + public static Function, Map> transform( + Map> transformers, + boolean inPlace + ) { + Map transformerTrie = buildTransformerTrie(transformers); + return source -> { + Deque stack = new ArrayDeque<>(); + Map result = inPlace ? source : new HashMap<>(source); + stack.push(new TransformContext(result, transformerTrie)); + + processStack(stack, inPlace); + return result; + }; + } + + @SuppressWarnings("unchecked") + private static Map buildTransformerTrie(Map> transformers) { + Map trie = new HashMap<>(); + for (Map.Entry> entry : transformers.entrySet()) { + String[] pathElements = entry.getKey().split("\\."); + Map subTrie = trie; + for (String pathElement : pathElements) { + subTrie = (Map) subTrie.computeIfAbsent(pathElement, k -> new HashMap<>()); + } + subTrie.put(TRANSFORMER_TRIE_LEAF_KEY, entry.getValue()); + } + return trie; + } + + private static void processStack(Deque stack, boolean inPlace) { + while (!stack.isEmpty()) { + TransformContext ctx = stack.pop(); + processMap(ctx.map, ctx.trie, stack, inPlace); + } + } + + private static void processMap( + Map currentMap, + Map currentTrie, + Deque stack, + boolean inPlace + ) { + for (Map.Entry entry : currentMap.entrySet()) { + processEntry(entry, currentTrie, stack, inPlace); + } + } + + private static void processEntry( + Map.Entry entry, + Map currentTrie, + Deque stack, + boolean inPlace + ) { + String key = entry.getKey(); + Object value = entry.getValue(); + + Object subTrieObj = currentTrie.get(key); + if (subTrieObj instanceof Map == false) { + return; + } + Map subTrie = nodeMapValue(subTrieObj, "transform"); + + // Apply transformation if available + Function transformer = (Function) subTrie.get(TRANSFORMER_TRIE_LEAF_KEY); + if (transformer != null) { + entry.setValue(transformer.apply(value)); + return; + } + + // Process nested structures + if (value instanceof Map) { + Map subMap = nodeMapValue(value, "transform"); + if (inPlace == false) { + subMap = new HashMap<>(subMap); + entry.setValue(subMap); + } + stack.push(new TransformContext(subMap, subTrie)); + } else if (value instanceof List list) { + List subList = (List) list; + if (inPlace == false) { + subList = new ArrayList<>(list); + entry.setValue(subList); + } + processList(subList, subTrie, stack, inPlace); + } + } + + private static void processList( + List list, + Map transformerTrie, + Deque stack, + boolean inPlace + ) { + for (int i = list.size() - 1; i >= 0; i--) { + Object value = list.get(i); + if (value instanceof Map) { + Map subMap = nodeMapValue(value, "transform"); + if (inPlace == false) { + subMap = new HashMap<>(subMap); + list.set(i, subMap); + } + stack.push(new TransformContext(subMap, transformerTrie)); + } + } + } + + private static class TransformContext { + Map map; + Map trie; + + TransformContext(Map map, Map trie) { + this.map = map; + this.trie = trie; + } + } } diff --git a/server/src/test/java/org/opensearch/common/xcontent/support/XContentMapValuesTests.java b/server/src/test/java/org/opensearch/common/xcontent/support/XContentMapValuesTests.java index be194c070135a..08ee5d4e8d3a9 100644 --- a/server/src/test/java/org/opensearch/common/xcontent/support/XContentMapValuesTests.java +++ b/server/src/test/java/org/opensearch/common/xcontent/support/XContentMapValuesTests.java @@ -32,6 +32,7 @@ package org.opensearch.common.xcontent.support; +import org.opensearch.common.collect.MapBuilder; import org.opensearch.common.collect.Tuple; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentType; @@ -48,9 +49,12 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.function.Function; +import java.util.stream.IntStream; import static java.util.Collections.emptySet; import static java.util.Collections.singleton; @@ -629,6 +633,127 @@ public void testPrefix() { assertEquals(expected, filtered); } + public void testTransformFlat() { + Map mapToTransform = Map.of( + "test1", + "value_before", + "test2", + List.of("value_before", "value_before", "value_before") + ); + + Map> transformers = Map.of("test1", v -> "value_after", "test2", v -> "value_after"); + + Map expected = Map.of("test1", "value_after", "test2", "value_after"); + + Map transformedMapped = XContentMapValues.transform(mapToTransform, transformers, false); + assertEquals(expected, transformedMapped); + } + + public void testTransformNested() { + Map mapToTransform = MapBuilder.newMapBuilder() + .put("test1", "value_before") + .put("test2", Map.of("nest2", "value_before")) + .put("test3", List.of(Map.of("nest3", "value_before"), Map.of("nest3", "value_before"), Map.of("nest3", "value_before"))) + .put( + "test4", + List.of( + Map.of( + "nest4", + List.of(Map.of("nest5", "value_before"), Map.of("nest5", "value_before"), Map.of("nest5", "value_before")), + "test5", + "no_change" + ), + Map.of( + "nest4", + List.of( + Map.of("nest5", "value_before"), + Map.of("nest5", "value_before"), + Map.of("nest5", "value_before"), + Map.of("nest5", "value_before") + ), + "test5", + "no_change" + ), + Map.of("nest4", List.of(Map.of("nest5", "value_before"), Map.of("nest5", "value_before")), "test5", "no_change") + ) + ) + .put("test6", null) + .immutableMap(); + + Iterator test3Stream = IntStream.rangeClosed(1, 3).mapToObj(i -> "value_after" + i).toList().iterator(); + Iterator test4Stream = IntStream.rangeClosed(1, 9).mapToObj(i -> "value_after" + i).toList().iterator(); + Map> transformers = Map.of( + "test1", + v -> "value_after", + "test2.nest2", + v -> "value_after", + "test3.nest3", + v -> test3Stream.next(), + "test4.nest4.nest5", + v -> test4Stream.next(), + "test6", + v -> v == null ? v : "value_after" + ); + + Map expected = MapBuilder.newMapBuilder() + .put("test1", "value_after") + .put("test2", Map.of("nest2", "value_after")) + .put("test3", List.of(Map.of("nest3", "value_after1"), Map.of("nest3", "value_after2"), Map.of("nest3", "value_after3"))) + .put( + "test4", + List.of( + Map.of( + "nest4", + List.of(Map.of("nest5", "value_after1"), Map.of("nest5", "value_after2"), Map.of("nest5", "value_after3")), + "test5", + "no_change" + ), + Map.of( + "nest4", + List.of( + Map.of("nest5", "value_after4"), + Map.of("nest5", "value_after5"), + Map.of("nest5", "value_after6"), + Map.of("nest5", "value_after7") + ), + "test5", + "no_change" + ), + Map.of("nest4", List.of(Map.of("nest5", "value_after8"), Map.of("nest5", "value_after9")), "test5", "no_change") + ) + ) + .put("test6", null) + .immutableMap(); + + Map transformedMapped = XContentMapValues.transform(mapToTransform, transformers, false); + assertEquals(expected, transformedMapped); + } + + public void testTransformInPlace() { + Map mapToTransform = MapBuilder.newMapBuilder().put("test1", "value_before").map(); + Map> transformers = Map.of("test1", v -> "value_after"); + Map expected = MapBuilder.newMapBuilder().put("test1", "value_after").immutableMap(); + + Map transformedMapped = XContentMapValues.transform(mapToTransform, transformers, true); + assertEquals(expected, transformedMapped); + } + + public void testTransformSharedPaths() { + Map mapToTransform = MapBuilder.newMapBuilder() + .put("test", "value_before") + .put("test.nested", "nested_value_before") + .map(); + Map> transformers = Map.of("test", v -> "value_after", "test.nested", v -> "nested_value_after"); + + Map expected = MapBuilder.newMapBuilder() + .put("test", "value_after") + .put("test.nested", "nested_value_before") + .immutableMap(); + + Map transformedMap = XContentMapValues.transform(mapToTransform, transformers, true); + assertEquals(expected, transformedMap); + } + private static Map toMap(Builder test, XContentType xContentType, boolean humanReadable) throws IOException { ToXContentObject toXContent = (builder, params) -> test.apply(builder); return convertToMap(toXContent(toXContent, xContentType, humanReadable), true, xContentType).v2(); From ebd743a50cd7162f1552568c367b60dea077774e Mon Sep 17 00:00:00 2001 From: Ashish Singh Date: Thu, 20 Mar 2025 22:45:38 +0530 Subject: [PATCH 091/550] Mute flaky tests - S3RemoteStoreIT (#17642) Signed-off-by: Ashish Singh --- .../java/org/opensearch/repositories/s3/S3RemoteStoreIT.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RemoteStoreIT.java b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RemoteStoreIT.java index e899ac685132e..6358e6eb65146 100644 --- a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RemoteStoreIT.java +++ b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RemoteStoreIT.java @@ -10,6 +10,7 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope; +import org.apache.lucene.tests.util.LuceneTestCase; import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.blobstore.BlobPath; @@ -42,6 +43,7 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @ThreadLeakScope(ThreadLeakScope.Scope.NONE) +@LuceneTestCase.AwaitsFix(bugUrl = "Flakiness seen for this class") public class S3RemoteStoreIT extends RemoteStoreCoreTestCase { @Override From f5db0d7b0bc9c1986af69b5c387dbb32b9f229cf Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Thu, 20 Mar 2025 12:46:19 -0700 Subject: [PATCH 092/550] Remove dead benchmarking code (#17644) The multi-release JAR configuration was removed in #16366 but this class was not actually removed. It has been dead code since that commit. Signed-off-by: Andrew Ross --- .../common/round/RoundableSupplier.java | 36 ------------------- 1 file changed, 36 deletions(-) delete mode 100644 benchmarks/src/main/java20/org/opensearch/common/round/RoundableSupplier.java diff --git a/benchmarks/src/main/java20/org/opensearch/common/round/RoundableSupplier.java b/benchmarks/src/main/java20/org/opensearch/common/round/RoundableSupplier.java deleted file mode 100644 index e81c1b137bd30..0000000000000 --- a/benchmarks/src/main/java20/org/opensearch/common/round/RoundableSupplier.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.common.round; - -import java.util.function.Supplier; - -public class RoundableSupplier implements Supplier { - private final Supplier delegate; - - RoundableSupplier(String type, long[] values, int size) { - switch (type) { - case "binary": - delegate = () -> new BinarySearcher(values, size); - break; - case "linear": - delegate = () -> new BidirectionalLinearSearcher(values, size); - break; - case "btree": - delegate = () -> new BtreeSearcher(values, size); - break; - default: - throw new IllegalArgumentException("invalid type: " + type); - } - } - - @Override - public Roundable get() { - return delegate.get(); - } -} From 9d4414ba7dc7640e6da29ebbf94727a70a0e27f6 Mon Sep 17 00:00:00 2001 From: gaobinlong Date: Fri, 21 Mar 2025 05:27:32 +0800 Subject: [PATCH 093/550] Bump com.google.api:api-common from 1.8.1 to 2.46.1 in /plugins/repository-gcs (#17604) * Bump com.google.api:api-common from 1.8.1 to 2.46.1 in /plugins/repository-gcs Signed-off-by: Binlong Gao * modify changelog Signed-off-by: Binlong Gao * Fix format issue Signed-off-by: Binlong Gao --------- Signed-off-by: Binlong Gao Signed-off-by: gaobinlong --- CHANGELOG.md | 1 + plugins/repository-gcs/build.gradle | 2 +- plugins/repository-gcs/licenses/api-common-1.8.1.jar.sha1 | 1 - plugins/repository-gcs/licenses/api-common-2.46.1.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-gcs/licenses/api-common-1.8.1.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/api-common-2.46.1.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index ace34a4e4dc30..56ff3d11bde8c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add dfs transformation function in XContentMapValues ([#17612](https://github.com/opensearch-project/OpenSearch/pull/17612)) ### Dependencies +- Bump `com.google.api:api-common` from 1.8.1 to 2.46.1 ([#17604](https://github.com/opensearch-project/OpenSearch/pull/17604)) - Bump `ch.qos.logback:logback-core` from 1.5.16 to 1.5.17 ([#17609](https://github.com/opensearch-project/OpenSearch/pull/17609)) - Bump `org.jruby.joni:joni` from 2.2.3 to 2.2.5 ([#17608](https://github.com/opensearch-project/OpenSearch/pull/17608)) - Bump `dangoslen/dependabot-changelog-helper` from 3 to 4 ([#17498](https://github.com/opensearch-project/OpenSearch/pull/17498)) diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 371ad36e6fdbb..4e772118e18d5 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -48,7 +48,7 @@ opensearchplugin { } dependencies { - api 'com.google.api:api-common:1.8.1' + api 'com.google.api:api-common:2.46.1' api 'com.google.api:gax:2.35.0' api 'com.google.api:gax-httpjson:2.42.0' diff --git a/plugins/repository-gcs/licenses/api-common-1.8.1.jar.sha1 b/plugins/repository-gcs/licenses/api-common-1.8.1.jar.sha1 deleted file mode 100644 index 7a1c114c6c0fc..0000000000000 --- a/plugins/repository-gcs/licenses/api-common-1.8.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e89befb19b08ad84b262b2f226ab79aefcaa9d7f \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/api-common-2.46.1.jar.sha1 b/plugins/repository-gcs/licenses/api-common-2.46.1.jar.sha1 new file mode 100644 index 0000000000000..19b87717499be --- /dev/null +++ b/plugins/repository-gcs/licenses/api-common-2.46.1.jar.sha1 @@ -0,0 +1 @@ +b38a684c734963a72c204aa208dd31018d79bf3a \ No newline at end of file From cdcfcbc2f243ca8725828e6c64c1bd08d2bf7d91 Mon Sep 17 00:00:00 2001 From: Sandesh Kumar Date: Thu, 20 Mar 2025 14:51:17 -0700 Subject: [PATCH 094/550] fix precompute ordering for min & max aggregators (#17505) Signed-off-by: Sandesh Kumar --- .../aggregations/metrics/MaxAggregator.java | 32 ++++++++++--------- .../aggregations/metrics/MinAggregator.java | 30 +++++++++-------- 2 files changed, 33 insertions(+), 29 deletions(-) diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregator.java index 8a2c8a6de923f..6f606408fc5f8 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregator.java @@ -109,6 +109,23 @@ protected boolean tryPrecomputeAggregationForLeaf(LeafReaderContext ctx) throws if (valuesSource == null) { return false; } + + if (pointConverter != null) { + Number segMax = findLeafMaxValue(ctx.reader(), pointField, pointConverter); + if (segMax != null) { + /* + * There is no parent aggregator (see {@link AggregatorBase#getPointReaderOrNull} + * so the ordinal for the bucket is always 0. + */ + assert maxes.size() == 1; + double max = maxes.get(0); + max = Math.max(max, segMax.doubleValue()); + maxes.set(0, max); + // the maximum value has been extracted, we don't need to collect hits on this segment. + return true; + } + } + CompositeIndexFieldInfo supportedStarTree = getSupportedStarTree(this.context.getQueryShardContext()); if (supportedStarTree != null) { if (parent != null && subAggregators.length == 0) { @@ -132,21 +149,6 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBuc throw new CollectionTerminatedException(); } } - if (pointConverter != null) { - Number segMax = findLeafMaxValue(ctx.reader(), pointField, pointConverter); - if (segMax != null) { - /* - * There is no parent aggregator (see {@link AggregatorBase#getPointReaderOrNull} - * so the ordinal for the bucket is always 0. - */ - assert maxes.size() == 1; - double max = maxes.get(0); - max = Math.max(max, segMax.doubleValue()); - maxes.set(0, max); - // the maximum value has been extracted, we don't need to collect hits on this segment. - throw new CollectionTerminatedException(); - } - } final BigArrays bigArrays = context.bigArrays(); final SortedNumericDoubleValues allValues = valuesSource.doubleValues(ctx); diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregator.java index 84dda7928aa90..22749382216dd 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregator.java @@ -109,6 +109,22 @@ protected boolean tryPrecomputeAggregationForLeaf(LeafReaderContext ctx) throws if (valuesSource == null) { return false; } + + if (pointConverter != null) { + Number segMin = findLeafMinValue(ctx.reader(), pointField, pointConverter); + if (segMin != null) { + /* + * There is no parent aggregator (see {@link MinAggregator#getPointReaderOrNull} + * so the ordinal for the bucket is always 0. + */ + double min = mins.get(0); + min = Math.min(min, segMin.doubleValue()); + mins.set(0, min); + // the minimum value has been extracted, we don't need to collect hits on this segment. + return true; + } + } + CompositeIndexFieldInfo supportedStarTree = getSupportedStarTree(this.context.getQueryShardContext()); if (supportedStarTree != null) { if (parent != null && subAggregators.length == 0) { @@ -133,20 +149,6 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBuc throw new CollectionTerminatedException(); } } - if (pointConverter != null) { - Number segMin = findLeafMinValue(ctx.reader(), pointField, pointConverter); - if (segMin != null) { - /* - * There is no parent aggregator (see {@link MinAggregator#getPointReaderOrNull} - * so the ordinal for the bucket is always 0. - */ - double min = mins.get(0); - min = Math.min(min, segMin.doubleValue()); - mins.set(0, min); - // the minimum value has been extracted, we don't need to collect hits on this segment. - throw new CollectionTerminatedException(); - } - } final BigArrays bigArrays = context.bigArrays(); final SortedNumericDoubleValues allValues = valuesSource.doubleValues(ctx); From 65595719968da3c82af04d3f7a72ec68a5160077 Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Thu, 20 Mar 2025 15:27:20 -0700 Subject: [PATCH 095/550] Update dependabot changelog helper for 3.x major version (#17645) Signed-off-by: Andrew Ross --- .github/workflows/dependabot_pr.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/dependabot_pr.yml b/.github/workflows/dependabot_pr.yml index 5f5f15ca3ef72..ee6a851d231f8 100644 --- a/.github/workflows/dependabot_pr.yml +++ b/.github/workflows/dependabot_pr.yml @@ -58,7 +58,7 @@ jobs: - name: Update the changelog uses: dangoslen/dependabot-changelog-helper@v4 with: - version: 'Unreleased 2.x' + version: 'Unreleased 3.x' - name: Commit the changes uses: stefanzweifel/git-auto-commit-action@v5 From d5e9093758f36a7ff8b346d055d1296b33d42652 Mon Sep 17 00:00:00 2001 From: Rajat Gupta <72070007+RajatGupta02@users.noreply.github.com> Date: Fri, 21 Mar 2025 05:44:59 +0530 Subject: [PATCH 096/550] Resolve integTest bug on deb to check for opensearch:adm (#17641) * Fix ownership issue on deb Signed-off-by: Rajat Gupta * Update Changelog Signed-off-by: Rajat Gupta --------- Signed-off-by: Rajat Gupta Signed-off-by: Rajat Gupta <72070007+RajatGupta02@users.noreply.github.com> Co-authored-by: Rajat Gupta --- CHANGELOG.md | 1 + .../java/org/opensearch/systemdinteg/SystemdIntegTests.java | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 56ff3d11bde8c..64ff48c1880aa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Change priority for scheduling reroute during timeout([#16445](https://github.com/opensearch-project/OpenSearch/pull/16445)) - Renaming the node role search to warm ([#17573](https://github.com/opensearch-project/OpenSearch/pull/17573)) - Introduce a new search node role to hold search only shards ([#17620](https://github.com/opensearch-project/OpenSearch/pull/17620)) +- Fix systemd integTest on deb regarding path ownership check ([#17641](https://github.com/opensearch-project/OpenSearch/pull/17641)) - Add dfs transformation function in XContentMapValues ([#17612](https://github.com/opensearch-project/OpenSearch/pull/17612)) ### Dependencies diff --git a/qa/systemd-test/src/test/java/org/opensearch/systemdinteg/SystemdIntegTests.java b/qa/systemd-test/src/test/java/org/opensearch/systemdinteg/SystemdIntegTests.java index 2beadd9445412..6db6f186dc3a4 100644 --- a/qa/systemd-test/src/test/java/org/opensearch/systemdinteg/SystemdIntegTests.java +++ b/qa/systemd-test/src/test/java/org/opensearch/systemdinteg/SystemdIntegTests.java @@ -116,7 +116,8 @@ public void testReadWritePaths() throws IOException, InterruptedException { assertTrue("Path should exist: " + path, checkPathExists(path)); assertTrue("Path should be readable: " + path, checkPathReadable(path)); assertTrue("Path should be writable: " + path, checkPathWritable(path)); - assertEquals("Path should be owned by opensearch:opensearch", "opensearch:opensearch", getPathOwnership(path)); + String ownership = getPathOwnership(path); + assertTrue("Path should be owned by opensearch:opensearch or opensearch:adm", ownership.equals("opensearch:opensearch") || ownership.equals("opensearch:adm")); } } From b24c72b06bfd76d7d1d134a9ec9804965ad74352 Mon Sep 17 00:00:00 2001 From: Ruirui Zhang Date: Thu, 20 Mar 2025 21:25:47 -0700 Subject: [PATCH 097/550] [Rule-based Auto Tagging] Add rule schema for auto tagging (#17238) * add rule schema for workload management Signed-off-by: Ruirui Zhang * modify rule structure based on comment Signed-off-by: Ruirui Zhang * update based on comments Signed-off-by: Ruirui Zhang * add javadoc for autotagging directory Signed-off-by: Ruirui Zhang * change field name from 'name' to 'description' Signed-off-by: Ruirui Zhang * update the rule schema Signed-off-by: Ruirui Zhang * simplify autotagging registry Signed-off-by: Ruirui Zhang * remove generic type parameter from Rule class Signed-off-by: Ruirui Zhang * modify based on comments Signed-off-by: Ruirui Zhang --- CHANGELOG.md | 1 + .../org/opensearch/autotagging/Attribute.java | 59 ++++ .../autotagging/AutoTaggingRegistry.java | 68 +++++ .../opensearch/autotagging/FeatureType.java | 73 +++++ .../java/org/opensearch/autotagging/Rule.java | 255 ++++++++++++++++++ .../opensearch/autotagging/RuleValidator.java | 170 ++++++++++++ .../opensearch/autotagging/package-info.java | 13 + .../autotagging/AutoTaggingRegistryTests.java | 47 ++++ .../autotagging/FeatureTypeTests.java | 40 +++ .../org/opensearch/autotagging/RuleTests.java | 167 ++++++++++++ .../autotagging/RuleValidatorTests.java | 120 +++++++++ 11 files changed, 1013 insertions(+) create mode 100644 server/src/main/java/org/opensearch/autotagging/Attribute.java create mode 100644 server/src/main/java/org/opensearch/autotagging/AutoTaggingRegistry.java create mode 100644 server/src/main/java/org/opensearch/autotagging/FeatureType.java create mode 100644 server/src/main/java/org/opensearch/autotagging/Rule.java create mode 100644 server/src/main/java/org/opensearch/autotagging/RuleValidator.java create mode 100644 server/src/main/java/org/opensearch/autotagging/package-info.java create mode 100644 server/src/test/java/org/opensearch/autotagging/AutoTaggingRegistryTests.java create mode 100644 server/src/test/java/org/opensearch/autotagging/FeatureTypeTests.java create mode 100644 server/src/test/java/org/opensearch/autotagging/RuleTests.java create mode 100644 server/src/test/java/org/opensearch/autotagging/RuleValidatorTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 64ff48c1880aa..b5bac0ee8a4d1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 3.x] ### Added - Change priority for scheduling reroute during timeout([#16445](https://github.com/opensearch-project/OpenSearch/pull/16445)) +- [Rule Based Auto-tagging] Add rule schema for auto tagging ([#17238](https://github.com/opensearch-project/OpenSearch/pull/17238)) - Renaming the node role search to warm ([#17573](https://github.com/opensearch-project/OpenSearch/pull/17573)) - Introduce a new search node role to hold search only shards ([#17620](https://github.com/opensearch-project/OpenSearch/pull/17620)) - Fix systemd integTest on deb regarding path ownership check ([#17641](https://github.com/opensearch-project/OpenSearch/pull/17641)) diff --git a/server/src/main/java/org/opensearch/autotagging/Attribute.java b/server/src/main/java/org/opensearch/autotagging/Attribute.java new file mode 100644 index 0000000000000..61dfc7e704c20 --- /dev/null +++ b/server/src/main/java/org/opensearch/autotagging/Attribute.java @@ -0,0 +1,59 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.autotagging; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; + +import java.io.IOException; + +/** + * Represents an attribute within the auto-tagging feature. Attributes define characteristics that can + * be used for tagging and classification. Implementations must ensure that attributes + * are uniquely identifiable by their name. Attributes should be singletons and managed centrally to + * avoid duplicates. + * + * @opensearch.experimental + */ +public interface Attribute extends Writeable { + String getName(); + + /** + * Ensure that `validateAttribute` is called in the constructor of attribute implementations + * to prevent potential serialization issues. + */ + default void validateAttribute() { + String name = getName(); + if (name == null || name.isEmpty()) { + throw new IllegalArgumentException("Attribute name cannot be null or empty"); + } + } + + @Override + default void writeTo(StreamOutput out) throws IOException { + out.writeString(getName()); + } + + /** + * Retrieves an attribute from the given feature type based on its name. + * Implementations of `FeatureType.getAttributeFromName` must be thread-safe as this method + * may be called concurrently. + * @param in - the {@link StreamInput} from which the attribute name is read + * @param featureType - the FeatureType used to look up the attribute + */ + static Attribute from(StreamInput in, FeatureType featureType) throws IOException { + String attributeName = in.readString(); + Attribute attribute = featureType.getAttributeFromName(attributeName); + if (attribute == null) { + throw new IllegalStateException(attributeName + " is not a valid attribute under feature type " + featureType.getName()); + } + return attribute; + } +} diff --git a/server/src/main/java/org/opensearch/autotagging/AutoTaggingRegistry.java b/server/src/main/java/org/opensearch/autotagging/AutoTaggingRegistry.java new file mode 100644 index 0000000000000..394b89922dd2b --- /dev/null +++ b/server/src/main/java/org/opensearch/autotagging/AutoTaggingRegistry.java @@ -0,0 +1,68 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.autotagging; + +import org.opensearch.ResourceNotFoundException; + +import java.util.HashMap; +import java.util.Map; + +/** + * Registry for managing auto-tagging attributes and feature types. + * This class provides functionality to register and retrieve {@link Attribute} and {@link FeatureType} instances + * used for auto-tagging. + * + * @opensearch.experimental + */ +public class AutoTaggingRegistry { + /** + * featureTypesRegistryMap should be concurrently readable but not concurrently writable. + * The registration of FeatureType should only be done during boot-up. + */ + public static final Map featureTypesRegistryMap = new HashMap<>(); + public static final int MAX_FEATURE_TYPE_NAME_LENGTH = 30; + + public static void registerFeatureType(FeatureType featureType) { + validateFeatureType(featureType); + String name = featureType.getName(); + if (featureTypesRegistryMap.containsKey(name) && featureTypesRegistryMap.get(name) != featureType) { + throw new IllegalStateException("Feature type " + name + " is already registered. Duplicate feature type is not allowed."); + } + featureTypesRegistryMap.put(name, featureType); + } + + private static void validateFeatureType(FeatureType featureType) { + if (featureType == null) { + throw new IllegalStateException("Feature type can't be null. Unable to register."); + } + String name = featureType.getName(); + if (name == null || name.isEmpty() || name.length() > MAX_FEATURE_TYPE_NAME_LENGTH) { + throw new IllegalStateException( + "Feature type name " + name + " should not be null, empty or have more than " + MAX_FEATURE_TYPE_NAME_LENGTH + "characters" + ); + } + } + + /** + * Retrieves the registered {@link FeatureType} instance based on class name and feature type name. + * This method assumes that FeatureTypes are singletons, meaning that each unique + * (className, featureTypeName) pair corresponds to a single, globally shared instance. + * + * @param featureTypeName The name of the feature type. + */ + public static FeatureType getFeatureType(String featureTypeName) { + FeatureType featureType = featureTypesRegistryMap.get(featureTypeName); + if (featureType == null) { + throw new ResourceNotFoundException( + "Couldn't find a feature type with name: " + featureTypeName + ". Make sure you have registered it." + ); + } + return featureType; + } +} diff --git a/server/src/main/java/org/opensearch/autotagging/FeatureType.java b/server/src/main/java/org/opensearch/autotagging/FeatureType.java new file mode 100644 index 0000000000000..b446f62f6d764 --- /dev/null +++ b/server/src/main/java/org/opensearch/autotagging/FeatureType.java @@ -0,0 +1,73 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.autotagging; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; + +import java.io.IOException; +import java.util.Map; + +/** + * Represents a feature type within the auto-tagging feature. Feature types define different categories of + * characteristics that can be used for tagging and classification. Implementations of this interface are + * responsible for registering feature types in {@link AutoTaggingRegistry}. Implementations must ensure that + * feature types are uniquely identifiable by their class and name. + * + * Implementers should follow these guidelines: + * Feature types should be singletons and managed centrally to avoid duplicates. + * {@link #registerFeatureType()} must be called during initialization to ensure the feature type is available. + * + * @opensearch.experimental + */ +public interface FeatureType extends Writeable { + int DEFAULT_MAX_ATTRIBUTE_VALUES = 10; + int DEFAULT_MAX_ATTRIBUTE_VALUE_LENGTH = 100; + + String getName(); + + /** + * Returns the registry of allowed attributes for this feature type. + * Implementations must ensure that access to this registry is thread-safe. + */ + Map getAllowedAttributesRegistry(); + + default int getMaxNumberOfValuesPerAttribute() { + return DEFAULT_MAX_ATTRIBUTE_VALUES; + } + + default int getMaxCharLengthPerAttributeValue() { + return DEFAULT_MAX_ATTRIBUTE_VALUE_LENGTH; + } + + void registerFeatureType(); + + default boolean isValidAttribute(Attribute attribute) { + return getAllowedAttributesRegistry().containsValue(attribute); + } + + /** + * Retrieves an attribute by its name from the allowed attributes' registry. + * Implementations must ensure that this method is thread-safe. + * @param name The name of the attribute. + */ + default Attribute getAttributeFromName(String name) { + return getAllowedAttributesRegistry().get(name); + } + + @Override + default void writeTo(StreamOutput out) throws IOException { + out.writeString(getName()); + } + + static FeatureType from(StreamInput in) throws IOException { + return AutoTaggingRegistry.getFeatureType(in.readString()); + } +} diff --git a/server/src/main/java/org/opensearch/autotagging/Rule.java b/server/src/main/java/org/opensearch/autotagging/Rule.java new file mode 100644 index 0000000000000..0f4adb5e462f5 --- /dev/null +++ b/server/src/main/java/org/opensearch/autotagging/Rule.java @@ -0,0 +1,255 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.autotagging; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParseException; +import org.opensearch.core.xcontent.XContentParser; + +import java.io.IOException; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Objects; +import java.util.Set; + +/** + * Represents a rule schema used for automatic query tagging in the system. + * This class encapsulates the criteria (defined through attributes) for automatically applying relevant + * tags to queries based on matching attribute patterns. This class provides an in-memory representation + * of a rule. The indexed view may differ in representation. + * { + * "_id": "fwehf8302582mglfio349==", + * "description": "Assign Query Group for Index Logs123" + * "index_pattern": ["logs123"], + * "query_group": "dev_query_group_id", + * "updated_at": "01-10-2025T21:23:21.456Z" + * } + * @opensearch.experimental + */ +public class Rule implements Writeable, ToXContentObject { + private final String description; + private final FeatureType featureType; + private final Map> attributeMap; + private final String featureValue; + private final String updatedAt; + private final RuleValidator ruleValidator; + public static final String _ID_STRING = "_id"; + public static final String DESCRIPTION_STRING = "description"; + public static final String UPDATED_AT_STRING = "updated_at"; + + public Rule( + String description, + Map> attributeMap, + FeatureType featureType, + String featureValue, + String updatedAt + ) { + this.description = description; + this.featureType = featureType; + this.attributeMap = attributeMap; + this.featureValue = featureValue; + this.updatedAt = updatedAt; + this.ruleValidator = new RuleValidator(description, attributeMap, featureValue, updatedAt, featureType); + this.ruleValidator.validate(); + } + + public Rule(StreamInput in) throws IOException { + description = in.readString(); + featureType = FeatureType.from(in); + attributeMap = in.readMap(i -> Attribute.from(i, featureType), i -> new HashSet<>(i.readStringList())); + featureValue = in.readString(); + updatedAt = in.readString(); + this.ruleValidator = new RuleValidator(description, attributeMap, featureValue, updatedAt, featureType); + this.ruleValidator.validate(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(description); + featureType.writeTo(out); + out.writeMap(attributeMap, (output, attribute) -> attribute.writeTo(output), StreamOutput::writeStringCollection); + out.writeString(featureValue); + out.writeString(updatedAt); + } + + public static Rule fromXContent(final XContentParser parser, FeatureType featureType) throws IOException { + return Builder.fromXContent(parser, featureType).build(); + } + + public String getDescription() { + return description; + } + + public String getFeatureValue() { + return featureValue; + } + + public String getUpdatedAt() { + return updatedAt; + } + + public FeatureType getFeatureType() { + return featureType; + } + + public Map> getAttributeMap() { + return attributeMap; + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startObject(); + String id = params.param(_ID_STRING); + if (id != null) { + builder.field(_ID_STRING, id); + } + builder.field(DESCRIPTION_STRING, description); + for (Map.Entry> entry : attributeMap.entrySet()) { + builder.array(entry.getKey().getName(), entry.getValue().toArray(new String[0])); + } + builder.field(featureType.getName(), featureValue); + builder.field(UPDATED_AT_STRING, updatedAt); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Rule that = (Rule) o; + return Objects.equals(description, that.description) + && Objects.equals(featureValue, that.featureValue) + && Objects.equals(featureType, that.featureType) + && Objects.equals(attributeMap, that.attributeMap) + && Objects.equals(ruleValidator, that.ruleValidator) + && Objects.equals(updatedAt, that.updatedAt); + } + + @Override + public int hashCode() { + return Objects.hash(description, featureValue, featureType, attributeMap, updatedAt); + } + + /** + * builder method for the {@link Rule} + * @return Builder object + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Builder class for {@link Rule} + * @opensearch.experimental + */ + public static class Builder { + private String description; + private Map> attributeMap; + private FeatureType featureType; + private String featureValue; + private String updatedAt; + + private Builder() {} + + public static Builder fromXContent(XContentParser parser, FeatureType featureType) throws IOException { + if (parser.currentToken() == null) { + parser.nextToken(); + } + Builder builder = builder(); + XContentParser.Token token = parser.currentToken(); + + if (token != XContentParser.Token.START_OBJECT) { + throw new XContentParseException("Expected START_OBJECT token but found [" + parser.currentName() + "]"); + } + Map> attributeMap1 = new HashMap<>(); + String fieldName = ""; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + fieldName = parser.currentName(); + } else if (token.isValue()) { + if (fieldName.equals(DESCRIPTION_STRING)) { + builder.description(parser.text()); + } else if (fieldName.equals(UPDATED_AT_STRING)) { + builder.updatedAt(parser.text()); + } else if (fieldName.equals(featureType.getName())) { + builder.featureType(featureType); + builder.featureValue(parser.text()); + } + } else if (token == XContentParser.Token.START_ARRAY) { + fromXContentParseArray(parser, fieldName, featureType, attributeMap1); + } + } + return builder.attributeMap(attributeMap1); + } + + public static void fromXContentParseArray( + XContentParser parser, + String fieldName, + FeatureType featureType, + Map> attributeMap + ) throws IOException { + Attribute attribute = featureType.getAttributeFromName(fieldName); + if (attribute == null) { + throw new XContentParseException(fieldName + " is not a valid attribute within the " + featureType.getName() + " feature."); + } + Set attributeValueSet = new HashSet<>(); + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + if (parser.currentToken() == XContentParser.Token.VALUE_STRING) { + attributeValueSet.add(parser.text()); + } else { + throw new XContentParseException("Unexpected token in array: " + parser.currentToken()); + } + } + attributeMap.put(attribute, attributeValueSet); + } + + public Builder description(String description) { + this.description = description; + return this; + } + + public Builder featureValue(String featureValue) { + this.featureValue = featureValue; + return this; + } + + public Builder attributeMap(Map> attributeMap) { + this.attributeMap = attributeMap; + return this; + } + + public Builder featureType(FeatureType featureType) { + this.featureType = featureType; + return this; + } + + public Builder updatedAt(String updatedAt) { + this.updatedAt = updatedAt; + return this; + } + + public Rule build() { + return new Rule(description, attributeMap, featureType, featureValue, updatedAt); + } + + public String getFeatureValue() { + return featureValue; + } + + public Map> getAttributeMap() { + return attributeMap; + } + } +} diff --git a/server/src/main/java/org/opensearch/autotagging/RuleValidator.java b/server/src/main/java/org/opensearch/autotagging/RuleValidator.java new file mode 100644 index 0000000000000..625d7ba94d282 --- /dev/null +++ b/server/src/main/java/org/opensearch/autotagging/RuleValidator.java @@ -0,0 +1,170 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.autotagging; + +import org.opensearch.common.ValidationException; +import org.joda.time.Instant; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; + +import static org.opensearch.cluster.metadata.QueryGroup.isValid; + +/** + * This is the validator for rule. It ensures that the rule has a valid description, feature value, + * update time, attribute map, and the rule adheres to the feature type's constraints. + * + * @opensearch.experimental + */ +public class RuleValidator { + private final String description; + private final Map> attributeMap; + private final String featureValue; + private final String updatedAt; + private final FeatureType featureType; + public static final int MAX_DESCRIPTION_LENGTH = 256; + + public RuleValidator( + String description, + Map> attributeMap, + String featureValue, + String updatedAt, + FeatureType featureType + ) { + this.description = description; + this.attributeMap = attributeMap; + this.featureValue = featureValue; + this.updatedAt = updatedAt; + this.featureType = featureType; + } + + public void validate() { + List errorMessages = new ArrayList<>(); + errorMessages.addAll(validateStringFields()); + errorMessages.addAll(validateFeatureType()); + errorMessages.addAll(validateUpdatedAtEpoch()); + errorMessages.addAll(validateAttributeMap()); + if (!errorMessages.isEmpty()) { + ValidationException validationException = new ValidationException(); + validationException.addValidationErrors(errorMessages); + throw new IllegalArgumentException(validationException); + } + } + + private List validateStringFields() { + List errors = new ArrayList<>(); + if (isNullOrEmpty(description)) { + errors.add("Rule description can't be null or empty"); + } else if (description.length() > MAX_DESCRIPTION_LENGTH) { + errors.add("Rule description cannot exceed " + MAX_DESCRIPTION_LENGTH + " characters."); + } + if (isNullOrEmpty(featureValue)) { + errors.add("Rule featureValue can't be null or empty"); + } + if (isNullOrEmpty(updatedAt)) { + errors.add("Rule update time can't be null or empty"); + } + return errors; + } + + private boolean isNullOrEmpty(String str) { + return str == null || str.isEmpty(); + } + + private List validateFeatureType() { + if (featureType == null) { + return List.of("Couldn't identify which feature the rule belongs to. Rule feature can't be null."); + } + return new ArrayList<>(); + } + + private List validateUpdatedAtEpoch() { + if (updatedAt != null && !isValid(Instant.parse(updatedAt).getMillis())) { + return List.of("Rule update time is not a valid epoch"); + } + return new ArrayList<>(); + } + + private List validateAttributeMap() { + List errors = new ArrayList<>(); + if (attributeMap == null || attributeMap.isEmpty()) { + errors.add("Rule should have at least 1 attribute requirement"); + } + + if (attributeMap != null && featureType != null) { + for (Map.Entry> entry : attributeMap.entrySet()) { + Attribute attribute = entry.getKey(); + Set attributeValues = entry.getValue(); + errors.addAll(validateAttributeExistence(attribute)); + errors.addAll(validateMaxAttributeValues(attribute, attributeValues)); + errors.addAll(validateAttributeValuesLength(attributeValues)); + } + } + return errors; + } + + private List validateAttributeExistence(Attribute attribute) { + if (featureType.getAttributeFromName(attribute.getName()) == null) { + return List.of(attribute.getName() + " is not a valid attribute within the " + featureType.getName() + " feature."); + } + return new ArrayList<>(); + } + + private List validateMaxAttributeValues(Attribute attribute, Set attributeValues) { + List errors = new ArrayList<>(); + String attributeName = attribute.getName(); + if (attributeValues.isEmpty()) { + errors.add("Attribute values for " + attributeName + " cannot be empty."); + } + int maxSize = featureType.getMaxNumberOfValuesPerAttribute(); + int actualSize = attributeValues.size(); + if (actualSize > maxSize) { + errors.add( + "Each attribute can only have a maximum of " + + maxSize + + " values. The input attribute " + + attributeName + + " has length " + + attributeValues.size() + + ", which exceeds this limit." + ); + } + return errors; + } + + private List validateAttributeValuesLength(Set attributeValues) { + int maxValueLength = featureType.getMaxCharLengthPerAttributeValue(); + for (String attributeValue : attributeValues) { + if (attributeValue.isEmpty() || attributeValue.length() > maxValueLength) { + return List.of("Attribute value [" + attributeValue + "] is invalid (empty or exceeds " + maxValueLength + " characters)"); + } + } + return new ArrayList<>(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + RuleValidator that = (RuleValidator) o; + return Objects.equals(description, that.description) + && Objects.equals(attributeMap, that.attributeMap) + && Objects.equals(featureValue, that.featureValue) + && Objects.equals(updatedAt, that.updatedAt) + && Objects.equals(featureType, that.featureType); + } + + @Override + public int hashCode() { + return Objects.hash(description, attributeMap, featureValue, updatedAt, featureType); + } +} diff --git a/server/src/main/java/org/opensearch/autotagging/package-info.java b/server/src/main/java/org/opensearch/autotagging/package-info.java new file mode 100644 index 0000000000000..1c0794c18241b --- /dev/null +++ b/server/src/main/java/org/opensearch/autotagging/package-info.java @@ -0,0 +1,13 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains auto tagging constructs + */ + +package org.opensearch.autotagging; diff --git a/server/src/test/java/org/opensearch/autotagging/AutoTaggingRegistryTests.java b/server/src/test/java/org/opensearch/autotagging/AutoTaggingRegistryTests.java new file mode 100644 index 0000000000000..8bd240dad99e6 --- /dev/null +++ b/server/src/test/java/org/opensearch/autotagging/AutoTaggingRegistryTests.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.autotagging; + +import org.opensearch.ResourceNotFoundException; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.BeforeClass; + +import static org.opensearch.autotagging.AutoTaggingRegistry.MAX_FEATURE_TYPE_NAME_LENGTH; +import static org.opensearch.autotagging.RuleTests.INVALID_FEATURE; +import static org.opensearch.autotagging.RuleTests.TEST_FEATURE_TYPE; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class AutoTaggingRegistryTests extends OpenSearchTestCase { + + @BeforeClass + public static void setUpOnce() { + FeatureType featureType = mock(FeatureType.class); + when(featureType.getName()).thenReturn(TEST_FEATURE_TYPE); + AutoTaggingRegistry.registerFeatureType(featureType); + } + + public void testGetFeatureType_Success() { + FeatureType retrievedFeatureType = AutoTaggingRegistry.getFeatureType(TEST_FEATURE_TYPE); + assertEquals(TEST_FEATURE_TYPE, retrievedFeatureType.getName()); + } + + public void testRuntimeException() { + assertThrows(ResourceNotFoundException.class, () -> AutoTaggingRegistry.getFeatureType(INVALID_FEATURE)); + } + + public void testIllegalStateExceptionException() { + assertThrows(IllegalStateException.class, () -> AutoTaggingRegistry.registerFeatureType(null)); + FeatureType featureType = mock(FeatureType.class); + when(featureType.getName()).thenReturn(TEST_FEATURE_TYPE); + assertThrows(IllegalStateException.class, () -> AutoTaggingRegistry.registerFeatureType(featureType)); + when(featureType.getName()).thenReturn(randomAlphaOfLength(MAX_FEATURE_TYPE_NAME_LENGTH + 1)); + assertThrows(IllegalStateException.class, () -> AutoTaggingRegistry.registerFeatureType(featureType)); + } +} diff --git a/server/src/test/java/org/opensearch/autotagging/FeatureTypeTests.java b/server/src/test/java/org/opensearch/autotagging/FeatureTypeTests.java new file mode 100644 index 0000000000000..e8cf46818c515 --- /dev/null +++ b/server/src/test/java/org/opensearch/autotagging/FeatureTypeTests.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.autotagging; + +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +import static org.opensearch.autotagging.RuleTests.FEATURE_TYPE; +import static org.opensearch.autotagging.RuleTests.INVALID_ATTRIBUTE; +import static org.opensearch.autotagging.RuleTests.TEST_ATTR1_NAME; +import static org.opensearch.autotagging.RuleTests.TestAttribute.TEST_ATTRIBUTE_1; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; + +public class FeatureTypeTests extends OpenSearchTestCase { + public void testIsValidAttribute() { + assertTrue(FEATURE_TYPE.isValidAttribute(TEST_ATTRIBUTE_1)); + assertFalse(FEATURE_TYPE.isValidAttribute(mock(Attribute.class))); + } + + public void testGetAttributeFromName() { + assertEquals(TEST_ATTRIBUTE_1, FEATURE_TYPE.getAttributeFromName(TEST_ATTR1_NAME)); + assertNull(FEATURE_TYPE.getAttributeFromName(INVALID_ATTRIBUTE)); + } + + public void testWriteTo() throws IOException { + StreamOutput mockOutput = mock(StreamOutput.class); + FEATURE_TYPE.writeTo(mockOutput); + verify(mockOutput).writeString(anyString()); + } +} diff --git a/server/src/test/java/org/opensearch/autotagging/RuleTests.java b/server/src/test/java/org/opensearch/autotagging/RuleTests.java new file mode 100644 index 0000000000000..5f27942639126 --- /dev/null +++ b/server/src/test/java/org/opensearch/autotagging/RuleTests.java @@ -0,0 +1,167 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.autotagging; + +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.test.AbstractSerializingTestCase; + +import java.io.IOException; +import java.time.Instant; +import java.util.Map; +import java.util.Set; + +import static org.opensearch.autotagging.Rule._ID_STRING; +import static org.opensearch.autotagging.RuleTests.TestAttribute.TEST_ATTRIBUTE_1; +import static org.opensearch.autotagging.RuleTests.TestAttribute.TEST_ATTRIBUTE_2; + +public class RuleTests extends AbstractSerializingTestCase { + public static final String TEST_ATTR1_NAME = "test_attr1"; + public static final String TEST_ATTR2_NAME = "test_attr2"; + public static final String TEST_FEATURE_TYPE = "test_feature_type"; + public static final String DESCRIPTION = "description"; + public static final String _ID = "test_id"; + public static final String FEATURE_VALUE = "feature_value"; + public static final TestFeatureType FEATURE_TYPE = TestFeatureType.INSTANCE; + public static final Map> ATTRIBUTE_MAP = Map.of( + TEST_ATTRIBUTE_1, + Set.of("value1"), + TEST_ATTRIBUTE_2, + Set.of("value2") + ); + public static final String UPDATED_AT = "2025-02-24T07:42:10.123456Z"; + public static final String INVALID_CLASS = "invalid_class"; + public static final String INVALID_ATTRIBUTE = "invalid_attribute"; + public static final String INVALID_FEATURE = "invalid_feature"; + + @Override + protected Rule createTestInstance() { + String description = randomAlphaOfLength(10); + String featureValue = randomAlphaOfLength(5); + String updatedAt = Instant.now().toString(); + return new Rule(description, ATTRIBUTE_MAP, FEATURE_TYPE, featureValue, updatedAt); + } + + @Override + protected Writeable.Reader instanceReader() { + return Rule::new; + } + + @Override + protected Rule doParseInstance(XContentParser parser) throws IOException { + return Rule.fromXContent(parser, FEATURE_TYPE); + } + + public enum TestAttribute implements Attribute { + TEST_ATTRIBUTE_1(TEST_ATTR1_NAME), + TEST_ATTRIBUTE_2(TEST_ATTR2_NAME); + + private final String name; + + TestAttribute(String name) { + this.name = name; + } + + @Override + public String getName() { + return name; + } + } + + public static class TestFeatureType implements FeatureType { + public static final TestFeatureType INSTANCE = new TestFeatureType(); + private static final String NAME = TEST_FEATURE_TYPE; + private static final int MAX_ATTRIBUTE_VALUES = 10; + private static final int MAX_ATTRIBUTE_VALUE_LENGTH = 100; + private static final Map ALLOWED_ATTRIBUTES = Map.of( + TEST_ATTR1_NAME, + TEST_ATTRIBUTE_1, + TEST_ATTR2_NAME, + TEST_ATTRIBUTE_2 + ); + + public TestFeatureType() {} + + static { + INSTANCE.registerFeatureType(); + } + + @Override + public String getName() { + return NAME; + } + + @Override + public int getMaxNumberOfValuesPerAttribute() { + return MAX_ATTRIBUTE_VALUES; + } + + @Override + public int getMaxCharLengthPerAttributeValue() { + return MAX_ATTRIBUTE_VALUE_LENGTH; + } + + @Override + public Map getAllowedAttributesRegistry() { + return ALLOWED_ATTRIBUTES; + } + + @Override + public void registerFeatureType() { + AutoTaggingRegistry.registerFeatureType(INSTANCE); + } + } + + static Rule buildRule( + String featureValue, + FeatureType featureType, + Map> attributeListMap, + String updatedAt, + String description + ) { + return Rule.builder() + .featureValue(featureValue) + .featureType(featureType) + .description(description) + .attributeMap(attributeListMap) + .updatedAt(updatedAt) + .build(); + } + + public void testValidRule() { + Rule rule = buildRule(FEATURE_VALUE, FEATURE_TYPE, ATTRIBUTE_MAP, UPDATED_AT, DESCRIPTION); + assertNotNull(rule.getFeatureValue()); + assertEquals(FEATURE_VALUE, rule.getFeatureValue()); + assertNotNull(rule.getUpdatedAt()); + assertEquals(UPDATED_AT, rule.getUpdatedAt()); + Map> resultMap = rule.getAttributeMap(); + assertNotNull(resultMap); + assertFalse(resultMap.isEmpty()); + assertNotNull(rule.getFeatureType()); + } + + public void testToXContent() throws IOException { + String updatedAt = Instant.now().toString(); + Rule rule = buildRule(FEATURE_VALUE, FEATURE_TYPE, Map.of(TEST_ATTRIBUTE_1, Set.of("value1")), updatedAt, DESCRIPTION); + + XContentBuilder builder = JsonXContent.contentBuilder(); + rule.toXContent(builder, new ToXContent.MapParams(Map.of(_ID_STRING, _ID))); + assertEquals( + "{\"_id\":\"" + + _ID + + "\",\"description\":\"description\",\"test_attr1\":[\"value1\"],\"test_feature_type\":\"feature_value\",\"updated_at\":\"" + + updatedAt + + "\"}", + builder.toString() + ); + } +} diff --git a/server/src/test/java/org/opensearch/autotagging/RuleValidatorTests.java b/server/src/test/java/org/opensearch/autotagging/RuleValidatorTests.java new file mode 100644 index 0000000000000..8fbf16cd34c52 --- /dev/null +++ b/server/src/test/java/org/opensearch/autotagging/RuleValidatorTests.java @@ -0,0 +1,120 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.autotagging; + +import org.opensearch.test.OpenSearchTestCase; + +import java.time.Instant; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import static org.opensearch.autotagging.RuleTests.ATTRIBUTE_MAP; +import static org.opensearch.autotagging.RuleTests.DESCRIPTION; +import static org.opensearch.autotagging.RuleTests.FEATURE_TYPE; +import static org.opensearch.autotagging.RuleTests.FEATURE_VALUE; +import static org.opensearch.autotagging.RuleTests.TestAttribute.TEST_ATTRIBUTE_1; +import static org.opensearch.autotagging.RuleTests.UPDATED_AT; + +public class RuleValidatorTests extends OpenSearchTestCase { + + public void testValidRule() { + RuleValidator validator = new RuleValidator(DESCRIPTION, ATTRIBUTE_MAP, FEATURE_VALUE, UPDATED_AT, FEATURE_TYPE); + try { + validator.validate(); + } catch (Exception e) { + fail("Expected no exception to be thrown, but got: " + e.getClass().getSimpleName()); + } + } + + public static void validateRule( + String featureValue, + T featureType, + Map> attributeMap, + String updatedAt, + String description + ) { + RuleValidator validator = new RuleValidator(description, attributeMap, featureValue, updatedAt, featureType); + validator.validate(); + } + + public void testInvalidDescription() { + assertThrows(IllegalArgumentException.class, () -> validateRule(FEATURE_VALUE, FEATURE_TYPE, ATTRIBUTE_MAP, UPDATED_AT, "")); + assertThrows(IllegalArgumentException.class, () -> validateRule(FEATURE_VALUE, FEATURE_TYPE, ATTRIBUTE_MAP, UPDATED_AT, null)); + assertThrows( + IllegalArgumentException.class, + () -> validateRule( + FEATURE_VALUE, + FEATURE_TYPE, + ATTRIBUTE_MAP, + UPDATED_AT, + randomAlphaOfLength(RuleValidator.MAX_DESCRIPTION_LENGTH + 1) + ) + ); + } + + public void testInvalidUpdateTime() { + assertThrows(IllegalArgumentException.class, () -> validateRule(FEATURE_VALUE, FEATURE_TYPE, ATTRIBUTE_MAP, null, DESCRIPTION)); + } + + public void testNullOrEmptyAttributeMap() { + assertThrows( + IllegalArgumentException.class, + () -> validateRule(FEATURE_VALUE, FEATURE_TYPE, new HashMap<>(), Instant.now().toString(), DESCRIPTION) + ); + assertThrows( + IllegalArgumentException.class, + () -> validateRule(FEATURE_VALUE, FEATURE_TYPE, null, Instant.now().toString(), DESCRIPTION) + ); + } + + public void testInvalidAttributeMap() { + Map> map = new HashMap<>(); + Attribute attribute = TEST_ATTRIBUTE_1; + map.put(attribute, Set.of("")); + assertThrows( + IllegalArgumentException.class, + () -> validateRule(FEATURE_VALUE, FEATURE_TYPE, map, Instant.now().toString(), DESCRIPTION) + ); + + map.put(attribute, Set.of(randomAlphaOfLength(FEATURE_TYPE.getMaxCharLengthPerAttributeValue() + 1))); + assertThrows( + IllegalArgumentException.class, + () -> validateRule(FEATURE_VALUE, FEATURE_TYPE, map, Instant.now().toString(), DESCRIPTION) + ); + + map.put(attribute, new HashSet<>()); + for (int i = 0; i < FEATURE_TYPE.getMaxNumberOfValuesPerAttribute() + 1; i++) { + map.get(attribute).add(String.valueOf(i)); + } + assertThrows( + IllegalArgumentException.class, + () -> validateRule(FEATURE_VALUE, FEATURE_TYPE, map, Instant.now().toString(), DESCRIPTION) + ); + } + + public void testInvalidFeature() { + assertThrows( + IllegalArgumentException.class, + () -> validateRule(FEATURE_VALUE, null, new HashMap<>(), Instant.now().toString(), DESCRIPTION) + ); + } + + public void testInvalidLabel() { + assertThrows(IllegalArgumentException.class, () -> validateRule(null, FEATURE_TYPE, ATTRIBUTE_MAP, UPDATED_AT, DESCRIPTION)); + assertThrows(IllegalArgumentException.class, () -> validateRule("", FEATURE_TYPE, ATTRIBUTE_MAP, UPDATED_AT, DESCRIPTION)); + } + + public void testEqualRuleValidators() { + RuleValidator validator = new RuleValidator(DESCRIPTION, ATTRIBUTE_MAP, FEATURE_VALUE, UPDATED_AT, FEATURE_TYPE); + RuleValidator otherValidator = new RuleValidator(DESCRIPTION, ATTRIBUTE_MAP, FEATURE_VALUE, UPDATED_AT, FEATURE_TYPE); + assertEquals(validator, otherValidator); + } +} From 77a4c22e9b908162cb968e510a230227ca6de677 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 20 Mar 2025 23:44:54 -0500 Subject: [PATCH 098/550] Bump com.nimbusds:nimbus-jose-jwt in /plugins/repository-azure (#17607) Bumps [com.nimbusds:nimbus-jose-jwt](https://bitbucket.org/connect2id/nimbus-jose-jwt) from 9.41.1 to 10.0.2. - [Changelog](https://bitbucket.org/connect2id/nimbus-jose-jwt/src/master/CHANGELOG.txt) - [Commits](https://bitbucket.org/connect2id/nimbus-jose-jwt/branches/compare/10.0.2..9.41.1) --- updated-dependencies: - dependency-name: com.nimbusds:nimbus-jose-jwt dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Signed-off-by: Andrew Ross Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- CHANGELOG.md | 1 + plugins/repository-azure/build.gradle | 2 +- .../repository-azure/licenses/nimbus-jose-jwt-10.0.2.jar.sha1 | 1 + .../repository-azure/licenses/nimbus-jose-jwt-9.41.1.jar.sha1 | 1 - 4 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 plugins/repository-azure/licenses/nimbus-jose-jwt-10.0.2.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/nimbus-jose-jwt-9.41.1.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index b5bac0ee8a4d1..d27c3ea8fbef2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add dfs transformation function in XContentMapValues ([#17612](https://github.com/opensearch-project/OpenSearch/pull/17612)) ### Dependencies +- Bump `com.nimbusds:nimbus-jose-jwt` from 9.41.1 to 10.0.2 ([#17607](https://github.com/opensearch-project/OpenSearch/pull/17607)) - Bump `com.google.api:api-common` from 1.8.1 to 2.46.1 ([#17604](https://github.com/opensearch-project/OpenSearch/pull/17604)) - Bump `ch.qos.logback:logback-core` from 1.5.16 to 1.5.17 ([#17609](https://github.com/opensearch-project/OpenSearch/pull/17609)) - Bump `org.jruby.joni:joni` from 2.2.3 to 2.2.5 ([#17608](https://github.com/opensearch-project/OpenSearch/pull/17608)) diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 995ff49a355cf..c2fc2233c0473 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -63,7 +63,7 @@ dependencies { api "net.java.dev.jna:jna-platform:${versions.jna}" api 'com.microsoft.azure:msal4j:1.18.0' api 'com.nimbusds:oauth2-oidc-sdk:11.21' - api 'com.nimbusds:nimbus-jose-jwt:9.41.1' + api 'com.nimbusds:nimbus-jose-jwt:10.0.2' api 'com.nimbusds:content-type:2.3' api 'com.nimbusds:lang-tag:1.7' // Both msal4j:1.14.3 and oauth2-oidc-sdk:11.9.1 has compile dependency on different versions of json-smart, diff --git a/plugins/repository-azure/licenses/nimbus-jose-jwt-10.0.2.jar.sha1 b/plugins/repository-azure/licenses/nimbus-jose-jwt-10.0.2.jar.sha1 new file mode 100644 index 0000000000000..f5cb7b21f17aa --- /dev/null +++ b/plugins/repository-azure/licenses/nimbus-jose-jwt-10.0.2.jar.sha1 @@ -0,0 +1 @@ +93347ea9247ae09e095575e10f9cae79c195fbb8 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/nimbus-jose-jwt-9.41.1.jar.sha1 b/plugins/repository-azure/licenses/nimbus-jose-jwt-9.41.1.jar.sha1 deleted file mode 100644 index 71fa950cb9530..0000000000000 --- a/plugins/repository-azure/licenses/nimbus-jose-jwt-9.41.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -35532a88e1b49a623ec97fd276cc208ea525b6bc \ No newline at end of file From 9b9abf78acc98c44777b58ba64baf811fecebf7f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 21 Mar 2025 13:53:36 +0800 Subject: [PATCH 099/550] Bump com.google.api:gax from 2.35.0 to 2.63.1 in /plugins/repository-gcs (#17465) --- updated-dependencies: - dependency-name: com.google.api:gax dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: Andrew Ross Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- CHANGELOG.md | 1 + plugins/repository-gcs/build.gradle | 25 ++++++++++++++++--- .../licenses/gax-2.35.0.jar.sha1 | 1 - .../licenses/gax-2.63.1.jar.sha1 | 1 + 4 files changed, 23 insertions(+), 5 deletions(-) delete mode 100644 plugins/repository-gcs/licenses/gax-2.35.0.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/gax-2.63.1.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index d27c3ea8fbef2..7368b7486d7b3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `ch.qos.logback:logback-core` from 1.5.16 to 1.5.17 ([#17609](https://github.com/opensearch-project/OpenSearch/pull/17609)) - Bump `org.jruby.joni:joni` from 2.2.3 to 2.2.5 ([#17608](https://github.com/opensearch-project/OpenSearch/pull/17608)) - Bump `dangoslen/dependabot-changelog-helper` from 3 to 4 ([#17498](https://github.com/opensearch-project/OpenSearch/pull/17498)) +- Bump `com.google.api:gax` from 2.35.0 to 2.63.1 ([#17465](https://github.com/opensearch-project/OpenSearch/pull/17465)) ### Changed diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 4e772118e18d5..515ad4c3bc0ed 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -49,7 +49,7 @@ opensearchplugin { dependencies { api 'com.google.api:api-common:2.46.1' - api 'com.google.api:gax:2.35.0' + api 'com.google.api:gax:2.63.1' api 'com.google.api:gax-httpjson:2.42.0' api 'com.google.apis:google-api-services-storage:v1-rev20230617-2.0.0' @@ -202,9 +202,26 @@ thirdPartyAudit { 'javax.jms.Message', 'javax.servlet.ServletContextEvent', 'javax.servlet.ServletContextListener', - // Bump for gax 2.42.0 - 'com.google.api.gax.rpc.EndpointContext', - 'com.google.api.gax.rpc.RequestMutator' + + // opentelemetry-api is an optional dependency of com.google.api:gax + 'io.opentelemetry.api.OpenTelemetry', + 'io.opentelemetry.api.common.Attributes', + 'io.opentelemetry.api.common.AttributesBuilder', + 'io.opentelemetry.api.metrics.DoubleHistogram', + 'io.opentelemetry.api.metrics.DoubleHistogramBuilder', + 'io.opentelemetry.api.metrics.LongCounter', + 'io.opentelemetry.api.metrics.LongCounterBuilder', + 'io.opentelemetry.api.metrics.Meter', + 'io.opentelemetry.api.metrics.MeterBuilder', + + // slf4j is an optional dependency of com.google.api:gax + 'org.slf4j.ILoggerFactory', + 'org.slf4j.Logger', + 'org.slf4j.LoggerFactory', + 'org.slf4j.MDC', + 'org.slf4j.event.Level', + 'org.slf4j.helpers.NOPLogger', + 'org.slf4j.spi.LoggingEventBuilder' ) } diff --git a/plugins/repository-gcs/licenses/gax-2.35.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-2.35.0.jar.sha1 deleted file mode 100644 index 778922c637dc1..0000000000000 --- a/plugins/repository-gcs/licenses/gax-2.35.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -98d52034cfa6d1b881e16f418894afcfacd89b7a \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gax-2.63.1.jar.sha1 b/plugins/repository-gcs/licenses/gax-2.63.1.jar.sha1 new file mode 100644 index 0000000000000..d438c0b04fcb9 --- /dev/null +++ b/plugins/repository-gcs/licenses/gax-2.63.1.jar.sha1 @@ -0,0 +1 @@ +6c9a340608a63e24dc8acd8da84afd8ffecca4b7 \ No newline at end of file From 08dc95ee1bd7af3f02dc9e11892bcd624208257f Mon Sep 17 00:00:00 2001 From: Iwan Igonin <83668556+beanuwave@users.noreply.github.com> Date: Fri, 21 Mar 2025 16:26:27 +0100 Subject: [PATCH 100/550] Migrate from BC to BCFIPS libraries (#17507) * Migrate from BC to BCFIPS libraries Signed-off-by: Igonin Co-authored-by: Benny Goerzig Co-authored-by: Karsten Schnitter Co-authored-by: Kai Sternad # Conflicts: # release-notes/opensearch.release-notes-3.0.0-alpha1.md * reduce footprint of BC libs Signed-off-by: Igonin Co-authored-by: Benny Goerzig Co-authored-by: Karsten Schnitter Co-authored-by: Kai Sternad * restrain permissions in server policy file Signed-off-by: Igonin Co-authored-by: Benny Goerzig Co-authored-by: Karsten Schnitter Co-authored-by: Kai Sternad --------- Signed-off-by: Igonin Co-authored-by: Igonin Co-authored-by: Igonin Co-authored-by: Benny Goerzig Co-authored-by: Karsten Schnitter Co-authored-by: Kai Sternad --- CHANGELOG.md | 3 + .../gradle/OpenSearchTestBasePlugin.java | 6 + client/rest/build.gradle | 7 + client/rest/licenses/bc-fips-2.0.0.jar.sha1 | 1 + .../rest/licenses/bctls-fips-2.0.19.jar.sha1 | 1 + .../rest/licenses/bcutil-fips-2.0.3.jar.sha1 | 1 + client/rest/licenses/bouncycastle-LICENSE.txt | 14 ++ client/rest/licenses/bouncycastle-NOTICE.txt | 1 + .../client/RestClientBuilderIntegTests.java | 67 +++--- distribution/src/config/java.security | 10 + .../keystore/AddFileKeyStoreCommandTests.java | 18 +- .../AddStringKeyStoreCommandTests.java | 19 +- .../ChangeKeyStorePasswordCommandTests.java | 18 +- .../cli/keystore/KeyStoreWrapperTests.java | 158 +++++++------- .../keystore/ListKeyStoreCommandTests.java | 18 +- .../RemoveSettingKeyStoreCommandTests.java | 19 +- distribution/tools/plugin-cli/build.gradle | 8 +- .../licenses/bcpg-fips-2.0.10.jar.sha1 | 1 + .../licenses/bcpg-fips-2.0.9.jar.sha1 | 1 - .../cli/plugin/InstallPluginCommand.java | 3 +- .../cli/plugin/InstallPluginCommandTests.java | 4 +- gradle/libs.versions.toml | 7 +- libs/ssl-config/build.gradle | 7 +- .../licenses/bc-fips-2.0.0.jar.sha1 | 1 + .../licenses/bcpkix-fips-2.0.7.jar.sha1 | 1 + .../licenses/bcpkix-jdk18on-1.78.jar.sha1 | 1 - .../licenses/bcprov-jdk18on-1.78.jar.sha1 | 1 - .../licenses/bctls-fips-2.0.19.jar.sha1 | 1 + .../licenses/bcutil-fips-2.0.3.jar.sha1 | 1 + .../licenses/bcutil-jdk18on-1.78.jar.sha1 | 1 - .../org/opensearch/common/ssl/PemUtils.java | 9 +- .../ssl/DefaultJdkTrustConfigTests.java | 2 +- .../common/ssl/PemTrustConfigTests.java | 13 +- .../common/ssl/SslDiagnosticsTests.java | 10 +- .../reindex/ReindexRestClientSslTests.java | 12 +- plugins/identity-shiro/build.gradle | 2 +- .../licenses/bcprov-jdk18on-1.78.jar.sha1 | 1 - .../licenses/bcprov-jdk18on-LICENSE.txt | 22 -- .../licenses/password4j-1.8.2.jar.sha1 | 1 + .../licenses/password4j-LICENSE.txt | 201 ++++++++++++++++++ ...k18on-NOTICE.txt => password4j-NOTICE.txt} | 0 .../shiro/realm/BCryptPasswordMatcher.java | 31 ++- .../realm/BCryptPasswordMatcherTests.java | 29 +++ qa/smoke-test-plugins/build.gradle | 4 - .../common/settings/KeyStoreWrapper.java | 4 +- .../org/opensearch/bootstrap/security.policy | 7 + .../org/opensearch/bootstrap/test.policy | 4 +- test/framework/build.gradle | 6 +- .../framework/licenses/bc-fips-2.0.0.jar.sha1 | 1 + .../licenses/bcpkix-fips-2.0.7.jar.sha1 | 1 + .../licenses/bcpkix-jdk18on-1.78.jar.sha1 | 1 - .../licenses/bcprov-jdk18on-1.78.jar.sha1 | 1 - .../licenses/bcutil-fips-2.0.3.jar.sha1 | 1 + 53 files changed, 519 insertions(+), 243 deletions(-) create mode 100644 client/rest/licenses/bc-fips-2.0.0.jar.sha1 create mode 100644 client/rest/licenses/bctls-fips-2.0.19.jar.sha1 create mode 100644 client/rest/licenses/bcutil-fips-2.0.3.jar.sha1 create mode 100644 client/rest/licenses/bouncycastle-LICENSE.txt create mode 100644 client/rest/licenses/bouncycastle-NOTICE.txt create mode 100644 distribution/src/config/java.security create mode 100644 distribution/tools/plugin-cli/licenses/bcpg-fips-2.0.10.jar.sha1 delete mode 100644 distribution/tools/plugin-cli/licenses/bcpg-fips-2.0.9.jar.sha1 create mode 100644 libs/ssl-config/licenses/bc-fips-2.0.0.jar.sha1 create mode 100644 libs/ssl-config/licenses/bcpkix-fips-2.0.7.jar.sha1 delete mode 100644 libs/ssl-config/licenses/bcpkix-jdk18on-1.78.jar.sha1 delete mode 100644 libs/ssl-config/licenses/bcprov-jdk18on-1.78.jar.sha1 create mode 100644 libs/ssl-config/licenses/bctls-fips-2.0.19.jar.sha1 create mode 100644 libs/ssl-config/licenses/bcutil-fips-2.0.3.jar.sha1 delete mode 100644 libs/ssl-config/licenses/bcutil-jdk18on-1.78.jar.sha1 delete mode 100644 plugins/identity-shiro/licenses/bcprov-jdk18on-1.78.jar.sha1 delete mode 100644 plugins/identity-shiro/licenses/bcprov-jdk18on-LICENSE.txt create mode 100644 plugins/identity-shiro/licenses/password4j-1.8.2.jar.sha1 create mode 100644 plugins/identity-shiro/licenses/password4j-LICENSE.txt rename plugins/identity-shiro/licenses/{bcprov-jdk18on-NOTICE.txt => password4j-NOTICE.txt} (100%) create mode 100644 test/framework/licenses/bc-fips-2.0.0.jar.sha1 create mode 100644 test/framework/licenses/bcpkix-fips-2.0.7.jar.sha1 delete mode 100644 test/framework/licenses/bcpkix-jdk18on-1.78.jar.sha1 delete mode 100644 test/framework/licenses/bcprov-jdk18on-1.78.jar.sha1 create mode 100644 test/framework/licenses/bcutil-fips-2.0.3.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 7368b7486d7b3..d4dbeafde903f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix systemd integTest on deb regarding path ownership check ([#17641](https://github.com/opensearch-project/OpenSearch/pull/17641)) - Add dfs transformation function in XContentMapValues ([#17612](https://github.com/opensearch-project/OpenSearch/pull/17612)) +### Changed +- Migrate BC libs to their FIPS counterparts ([#3420](https://github.com/opensearch-project/OpenSearch/pull/14912)) + ### Dependencies - Bump `com.nimbusds:nimbus-jose-jwt` from 9.41.1 to 10.0.2 ([#17607](https://github.com/opensearch-project/OpenSearch/pull/17607)) - Bump `com.google.api:api-common` from 1.8.1 to 2.46.1 ([#17604](https://github.com/opensearch-project/OpenSearch/pull/17604)) diff --git a/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchTestBasePlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchTestBasePlugin.java index d0cb2da9c1dd3..d79dfb1124757 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchTestBasePlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchTestBasePlugin.java @@ -164,6 +164,12 @@ public void execute(Task t) { test.systemProperty("tests.seed", BuildParams.getTestSeed()); } + var securityFile = "java.security"; + test.systemProperty( + "java.security.properties", + project.getRootProject().getLayout().getProjectDirectory() + "/distribution/src/config/" + securityFile + ); + // don't track these as inputs since they contain absolute paths and break cache relocatability File gradleHome = project.getGradle().getGradleUserHomeDir(); String gradleVersion = project.getGradle().getGradleVersion(); diff --git a/client/rest/build.gradle b/client/rest/build.gradle index 29d76e6910ee3..da91206e27eed 100644 --- a/client/rest/build.gradle +++ b/client/rest/build.gradle @@ -51,6 +51,9 @@ dependencies { api "commons-codec:commons-codec:${versions.commonscodec}" api "commons-logging:commons-logging:${versions.commonslogging}" api "org.slf4j:slf4j-api:${versions.slf4j}" + runtimeOnly "org.bouncycastle:bc-fips:${versions.bouncycastle_jce}" + runtimeOnly "org.bouncycastle:bctls-fips:${versions.bouncycastle_tls}" + runtimeOnly "org.bouncycastle:bcutil-fips:${versions.bouncycastle_util}" // reactor api "io.projectreactor:reactor-core:${versions.reactor}" @@ -70,6 +73,10 @@ dependencies { testImplementation "org.apache.logging.log4j:log4j-slf4j-impl:${versions.log4j}" } +tasks.named("dependencyLicenses").configure { + mapping from: /bc.*/, to: 'bouncycastle' +} + tasks.withType(CheckForbiddenApis).configureEach { //client does not depend on server, so only jdk and http signatures should be checked replaceSignatureFiles('jdk-signatures', 'http-signatures') diff --git a/client/rest/licenses/bc-fips-2.0.0.jar.sha1 b/client/rest/licenses/bc-fips-2.0.0.jar.sha1 new file mode 100644 index 0000000000000..79f0e3e9930bb --- /dev/null +++ b/client/rest/licenses/bc-fips-2.0.0.jar.sha1 @@ -0,0 +1 @@ +ee9ac432cf08f9a9ebee35d7cf8a45f94959a7ab \ No newline at end of file diff --git a/client/rest/licenses/bctls-fips-2.0.19.jar.sha1 b/client/rest/licenses/bctls-fips-2.0.19.jar.sha1 new file mode 100644 index 0000000000000..387635e9e1594 --- /dev/null +++ b/client/rest/licenses/bctls-fips-2.0.19.jar.sha1 @@ -0,0 +1 @@ +9cc33650ede63bc1a8281ed5c8e1da314d50bc76 \ No newline at end of file diff --git a/client/rest/licenses/bcutil-fips-2.0.3.jar.sha1 b/client/rest/licenses/bcutil-fips-2.0.3.jar.sha1 new file mode 100644 index 0000000000000..d553536576656 --- /dev/null +++ b/client/rest/licenses/bcutil-fips-2.0.3.jar.sha1 @@ -0,0 +1 @@ +a1857cd639295b10cc90e6d31ecbc523cdafcc19 \ No newline at end of file diff --git a/client/rest/licenses/bouncycastle-LICENSE.txt b/client/rest/licenses/bouncycastle-LICENSE.txt new file mode 100644 index 0000000000000..5c7c14696849d --- /dev/null +++ b/client/rest/licenses/bouncycastle-LICENSE.txt @@ -0,0 +1,14 @@ +Copyright (c) 2000 - 2023 The Legion of the Bouncy Castle Inc. (https://www.bouncycastle.org) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the +Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/client/rest/licenses/bouncycastle-NOTICE.txt b/client/rest/licenses/bouncycastle-NOTICE.txt new file mode 100644 index 0000000000000..8b137891791fe --- /dev/null +++ b/client/rest/licenses/bouncycastle-NOTICE.txt @@ -0,0 +1 @@ + diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientBuilderIntegTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientBuilderIntegTests.java index 0b7cf6e8bb5fe..4f0ce6404e587 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientBuilderIntegTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientBuilderIntegTests.java @@ -38,27 +38,23 @@ import com.sun.net.httpserver.HttpsServer; import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.ssl.SSLContextBuilder; import org.junit.AfterClass; import org.junit.BeforeClass; import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.SSLContext; -import javax.net.ssl.SSLHandshakeException; +import javax.net.ssl.SSLException; import javax.net.ssl.TrustManagerFactory; import java.io.IOException; import java.io.InputStream; import java.net.InetAddress; import java.net.InetSocketAddress; -import java.nio.file.Files; -import java.nio.file.Paths; import java.security.AccessController; -import java.security.KeyFactory; import java.security.KeyStore; import java.security.PrivilegedAction; -import java.security.cert.Certificate; -import java.security.cert.CertificateFactory; -import java.security.spec.PKCS8EncodedKeySpec; +import java.security.SecureRandom; import static org.hamcrest.Matchers.instanceOf; import static org.junit.Assert.assertEquals; @@ -75,7 +71,7 @@ public class RestClientBuilderIntegTests extends RestClientTestCase { @BeforeClass public static void startHttpServer() throws Exception { httpsServer = HttpsServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); - httpsServer.setHttpsConfigurator(new HttpsConfigurator(getSslContext())); + httpsServer.setHttpsConfigurator(new HttpsConfigurator(getSslContext(true))); httpsServer.createContext("/", new ResponseHandler()); httpsServer.start(); } @@ -103,11 +99,11 @@ public void testBuilderUsesDefaultSSLContext() throws Exception { client.performRequest(new Request("GET", "/")); fail("connection should have been rejected due to SSL handshake"); } catch (Exception e) { - assertThat(e, instanceOf(SSLHandshakeException.class)); + assertThat(e.getCause(), instanceOf(SSLException.class)); } } - SSLContext.setDefault(getSslContext()); + SSLContext.setDefault(getSslContext(false)); try (RestClient client = buildRestClient()) { Response response = client.performRequest(new Request("GET", "/")); assertEquals(200, response.getStatusLine().getStatusCode()); @@ -122,34 +118,37 @@ private RestClient buildRestClient() { return RestClient.builder(new HttpHost("https", address.getHostString(), address.getPort())).build(); } - private static SSLContext getSslContext() throws Exception { - SSLContext sslContext = SSLContext.getInstance(getProtocol()); + private static SSLContext getSslContext(boolean server) throws Exception { + SSLContext sslContext; + char[] password = "password".toCharArray(); + SecureRandom secureRandom = SecureRandom.getInstance("DEFAULT", "BCFIPS"); + String fileExtension = ".jks"; + try ( - InputStream certFile = RestClientBuilderIntegTests.class.getResourceAsStream("/test.crt"); - InputStream keyStoreFile = RestClientBuilderIntegTests.class.getResourceAsStream("/test_truststore.jks") + InputStream trustStoreFile = RestClientBuilderIntegTests.class.getResourceAsStream("/test_truststore" + fileExtension); + InputStream keyStoreFile = RestClientBuilderIntegTests.class.getResourceAsStream("/testks" + fileExtension) ) { - // Build a keystore of default type programmatically since we can't use JKS keystores to - // init a KeyManagerFactory in FIPS 140 JVMs. - KeyStore keyStore = KeyStore.getInstance(KeyStore.getDefaultType()); - keyStore.load(null, "password".toCharArray()); - CertificateFactory certFactory = CertificateFactory.getInstance("X.509"); - PKCS8EncodedKeySpec privateKeySpec = new PKCS8EncodedKeySpec( - Files.readAllBytes(Paths.get(RestClientBuilderIntegTests.class.getResource("/test.der").toURI())) - ); - KeyFactory keyFactory = KeyFactory.getInstance("RSA"); - keyStore.setKeyEntry( - "mykey", - keyFactory.generatePrivate(privateKeySpec), - "password".toCharArray(), - new Certificate[] { certFactory.generateCertificate(certFile) } - ); - KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); - kmf.init(keyStore, "password".toCharArray()); + KeyStore keyStore = KeyStore.getInstance("JKS"); + keyStore.load(keyStoreFile, password); + KeyManagerFactory kmf = KeyManagerFactory.getInstance("PKIX", "BCJSSE"); + kmf.init(keyStore, password); + KeyStore trustStore = KeyStore.getInstance("JKS"); - trustStore.load(keyStoreFile, "password".toCharArray()); - TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); + trustStore.load(trustStoreFile, password); + TrustManagerFactory tmf = TrustManagerFactory.getInstance("PKIX", "BCJSSE"); tmf.init(trustStore); - sslContext.init(kmf.getKeyManagers(), tmf.getTrustManagers(), null); + + SSLContextBuilder sslContextBuilder = SSLContextBuilder.create() + .setProvider("BCJSSE") + .setProtocol(getProtocol()) + .setSecureRandom(secureRandom); + + if (server) { + sslContextBuilder.loadKeyMaterial(keyStore, password); + } + sslContextBuilder.loadTrustMaterial(trustStore, null); + sslContext = sslContextBuilder.build(); + } return sslContext; } diff --git a/distribution/src/config/java.security b/distribution/src/config/java.security new file mode 100644 index 0000000000000..d3682533af407 --- /dev/null +++ b/distribution/src/config/java.security @@ -0,0 +1,10 @@ +# Security properties for non-approved mode 'org.bouncycastle.fips.approved_only=false'. +# Intended to be used complementary with a single equal sign e.g. 'java.security.properties=java.security' + +security.provider.1=org.bouncycastle.jcajce.provider.BouncyCastleFipsProvider +security.provider.2=org.bouncycastle.jsse.provider.BouncyCastleJsseProvider +security.provider.3=SUN +security.provider.4=SunJGSS + +ssl.KeyManagerFactory.algorithm=PKIX +ssl.TrustManagerFactory.algorithm=PKIX diff --git a/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/AddFileKeyStoreCommandTests.java b/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/AddFileKeyStoreCommandTests.java index 3d188590d5c47..db6bb2d5473f4 100644 --- a/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/AddFileKeyStoreCommandTests.java +++ b/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/AddFileKeyStoreCommandTests.java @@ -211,17 +211,13 @@ public void testIncorrectPassword() throws Exception { terminal.addSecretInput("thewrongkeystorepassword"); UserException e = expectThrows(UserException.class, () -> execute("foo", file.toString())); assertEquals(e.getMessage(), ExitCodes.DATA_ERROR, e.exitCode); - if (inFipsJvm()) { - assertThat( - e.getMessage(), - anyOf( - containsString("Provided keystore password was incorrect"), - containsString("Keystore has been corrupted or tampered with") - ) - ); - } else { - assertThat(e.getMessage(), containsString("Provided keystore password was incorrect")); - } + assertThat( + e.getMessage(), + anyOf( + containsString("Provided keystore password was incorrect"), + containsString("Keystore has been corrupted or tampered with") + ) + ); } public void testAddToUnprotectedKeystore() throws Exception { diff --git a/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/AddStringKeyStoreCommandTests.java b/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/AddStringKeyStoreCommandTests.java index 22012d1f44986..41ab7c45690dc 100644 --- a/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/AddStringKeyStoreCommandTests.java +++ b/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/AddStringKeyStoreCommandTests.java @@ -72,18 +72,13 @@ public void testInvalidPassphrease() throws Exception { terminal.addSecretInput("thewrongpassword"); UserException e = expectThrows(UserException.class, () -> execute("foo2")); assertEquals(e.getMessage(), ExitCodes.DATA_ERROR, e.exitCode); - if (inFipsJvm()) { - assertThat( - e.getMessage(), - anyOf( - containsString("Provided keystore password was incorrect"), - containsString("Keystore has been corrupted or tampered with") - ) - ); - } else { - assertThat(e.getMessage(), containsString("Provided keystore password was incorrect")); - } - + assertThat( + e.getMessage(), + anyOf( + containsString("Provided keystore password was incorrect"), + containsString("Keystore has been corrupted or tampered with") + ) + ); } public void testMissingPromptCreateWithoutPasswordWhenPrompted() throws Exception { diff --git a/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/ChangeKeyStorePasswordCommandTests.java b/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/ChangeKeyStorePasswordCommandTests.java index 1ce57332a9a31..1aa62cf71ed65 100644 --- a/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/ChangeKeyStorePasswordCommandTests.java +++ b/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/ChangeKeyStorePasswordCommandTests.java @@ -104,16 +104,12 @@ public void testChangeKeyStorePasswordWrongExistingPassword() throws Exception { // We'll only be prompted once (for the old password) UserException e = expectThrows(UserException.class, this::execute); assertEquals(e.getMessage(), ExitCodes.DATA_ERROR, e.exitCode); - if (inFipsJvm()) { - assertThat( - e.getMessage(), - anyOf( - containsString("Provided keystore password was incorrect"), - containsString("Keystore has been corrupted or tampered with") - ) - ); - } else { - assertThat(e.getMessage(), containsString("Provided keystore password was incorrect")); - } + assertThat( + e.getMessage(), + anyOf( + containsString("Provided keystore password was incorrect"), + containsString("Keystore has been corrupted or tampered with") + ) + ); } } diff --git a/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/KeyStoreWrapperTests.java b/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/KeyStoreWrapperTests.java index efb833e8fd94a..e6cb77336c6e7 100644 --- a/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/KeyStoreWrapperTests.java +++ b/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/KeyStoreWrapperTests.java @@ -68,8 +68,13 @@ import java.nio.file.Path; import java.security.GeneralSecurityException; import java.security.KeyStore; +import java.security.KeyStoreException; import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.security.NoSuchProviderException; import java.security.SecureRandom; +import java.security.cert.CertificateException; +import java.security.spec.InvalidKeySpecException; import java.util.ArrayList; import java.util.Arrays; import java.util.Base64; @@ -133,17 +138,13 @@ public void testDecryptKeyStoreWithWrongPassword() throws Exception { SecurityException.class, () -> loadedKeystore.decrypt(new char[] { 'i', 'n', 'v', 'a', 'l', 'i', 'd' }) ); - if (inFipsJvm()) { - assertThat( - exception.getMessage(), - anyOf( - containsString("Provided keystore password was incorrect"), - containsString("Keystore has been corrupted or tampered with") - ) - ); - } else { - assertThat(exception.getMessage(), containsString("Provided keystore password was incorrect")); - } + assertThat( + exception.getMessage(), + anyOf( + containsString("Provided keystore password was incorrect"), + containsString("Keystore has been corrupted or tampered with") + ) + ); } public void testCannotReadStringFromClosedKeystore() throws Exception { @@ -366,29 +367,8 @@ public void testIllegalSettingName() throws Exception { public void testBackcompatV1() throws Exception { assumeFalse("Can't run in a FIPS JVM as PBE is not available", inFipsJvm()); + generateV1(); Path configDir = env.configDir(); - NIOFSDirectory directory = new NIOFSDirectory(configDir); - try (IndexOutput output = EndiannessReverserUtil.createOutput(directory, "opensearch.keystore", IOContext.DEFAULT)) { - CodecUtil.writeHeader(output, "opensearch.keystore", 1); - output.writeByte((byte) 0); // hasPassword = false - output.writeString("PKCS12"); - output.writeString("PBE"); - - SecretKeyFactory secretFactory = SecretKeyFactory.getInstance("PBE"); - KeyStore keystore = KeyStore.getInstance("PKCS12"); - keystore.load(null, null); - SecretKey secretKey = secretFactory.generateSecret(new PBEKeySpec("stringSecretValue".toCharArray())); - KeyStore.ProtectionParameter protectionParameter = new KeyStore.PasswordProtection(new char[0]); - keystore.setEntry("string_setting", new KeyStore.SecretKeyEntry(secretKey), protectionParameter); - - ByteArrayOutputStream keystoreBytesStream = new ByteArrayOutputStream(); - keystore.store(keystoreBytesStream, new char[0]); - byte[] keystoreBytes = keystoreBytesStream.toByteArray(); - output.writeInt(keystoreBytes.length); - output.writeBytes(keystoreBytes, keystoreBytes.length); - CodecUtil.writeFooter(output); - } - KeyStoreWrapper keystore = KeyStoreWrapper.load(configDir); keystore.decrypt(new char[0]); SecureString testValue = keystore.getString("string_setting"); @@ -397,47 +377,8 @@ public void testBackcompatV1() throws Exception { public void testBackcompatV2() throws Exception { assumeFalse("Can't run in a FIPS JVM as PBE is not available", inFipsJvm()); + byte[] fileBytes = generateV2(); Path configDir = env.configDir(); - NIOFSDirectory directory = new NIOFSDirectory(configDir); - byte[] fileBytes = new byte[20]; - random().nextBytes(fileBytes); - try (IndexOutput output = EndiannessReverserUtil.createOutput(directory, "opensearch.keystore", IOContext.DEFAULT)) { - - CodecUtil.writeHeader(output, "opensearch.keystore", 2); - output.writeByte((byte) 0); // hasPassword = false - output.writeString("PKCS12"); - output.writeString("PBE"); // string algo - output.writeString("PBE"); // file algo - - output.writeVInt(2); // num settings - output.writeString("string_setting"); - output.writeString("STRING"); - output.writeString("file_setting"); - output.writeString("FILE"); - - SecretKeyFactory secretFactory = SecretKeyFactory.getInstance("PBE"); - KeyStore keystore = KeyStore.getInstance("PKCS12"); - keystore.load(null, null); - SecretKey secretKey = secretFactory.generateSecret(new PBEKeySpec("stringSecretValue".toCharArray())); - KeyStore.ProtectionParameter protectionParameter = new KeyStore.PasswordProtection(new char[0]); - keystore.setEntry("string_setting", new KeyStore.SecretKeyEntry(secretKey), protectionParameter); - - byte[] base64Bytes = Base64.getEncoder().encode(fileBytes); - char[] chars = new char[base64Bytes.length]; - for (int i = 0; i < chars.length; ++i) { - chars[i] = (char) base64Bytes[i]; // PBE only stores the lower 8 bits, so this narrowing is ok - } - secretKey = secretFactory.generateSecret(new PBEKeySpec(chars)); - keystore.setEntry("file_setting", new KeyStore.SecretKeyEntry(secretKey), protectionParameter); - - ByteArrayOutputStream keystoreBytesStream = new ByteArrayOutputStream(); - keystore.store(keystoreBytesStream, new char[0]); - byte[] keystoreBytes = keystoreBytesStream.toByteArray(); - output.writeInt(keystoreBytes.length); - output.writeBytes(keystoreBytes, keystoreBytes.length); - CodecUtil.writeFooter(output); - } - KeyStoreWrapper keystore = KeyStoreWrapper.load(configDir); keystore.decrypt(new char[0]); SecureString testValue = keystore.getString("string_setting"); @@ -497,6 +438,77 @@ public void testLegacyV3() throws GeneralSecurityException, IOException { assertThat(toByteArray(wrapper.getFile("file_setting")), equalTo("file_value".getBytes(StandardCharsets.UTF_8))); } + private void generateV1() throws IOException, NoSuchAlgorithmException, NoSuchProviderException, CertificateException, + InvalidKeySpecException, KeyStoreException { + Path configDir = env.configDir(); + NIOFSDirectory directory = new NIOFSDirectory(configDir); + try (IndexOutput output = EndiannessReverserUtil.createOutput(directory, "opensearch.keystore", IOContext.DEFAULT)) { + CodecUtil.writeHeader(output, "opensearch.keystore", 1); + output.writeByte((byte) 0); // hasPassword = false + output.writeString("PKCS12"); + output.writeString("PBE"); + + SecretKeyFactory secretFactory = SecretKeyFactory.getInstance("PBE", "SunJCE"); + KeyStore keystore = KeyStore.getInstance("PKCS12", "SUN"); + keystore.load(null, null); + SecretKey secretKey = secretFactory.generateSecret(new PBEKeySpec("stringSecretValue".toCharArray())); + KeyStore.ProtectionParameter protectionParameter = new KeyStore.PasswordProtection(new char[0]); + keystore.setEntry("string_setting", new KeyStore.SecretKeyEntry(secretKey), protectionParameter); + + ByteArrayOutputStream keystoreBytesStream = new ByteArrayOutputStream(); + keystore.store(keystoreBytesStream, new char[0]); + byte[] keystoreBytes = keystoreBytesStream.toByteArray(); + output.writeInt(keystoreBytes.length); + output.writeBytes(keystoreBytes, keystoreBytes.length); + CodecUtil.writeFooter(output); + } + } + + private byte[] generateV2() throws Exception { + Path configDir = env.configDir(); + NIOFSDirectory directory = new NIOFSDirectory(configDir); + byte[] fileBytes = new byte[20]; + random().nextBytes(fileBytes); + try (IndexOutput output = EndiannessReverserUtil.createOutput(directory, "opensearch.keystore", IOContext.DEFAULT)) { + + CodecUtil.writeHeader(output, "opensearch.keystore", 2); + output.writeByte((byte) 0); // hasPassword = false + output.writeString("PKCS12"); + output.writeString("PBE"); // string algo + output.writeString("PBE"); // file algo + + output.writeVInt(2); // num settings + output.writeString("string_setting"); + output.writeString("STRING"); + output.writeString("file_setting"); + output.writeString("FILE"); + + SecretKeyFactory secretFactory = SecretKeyFactory.getInstance("PBE", "SunJCE"); + KeyStore keystore = KeyStore.getInstance("PKCS12", "SUN"); + keystore.load(null, null); + SecretKey secretKey = secretFactory.generateSecret(new PBEKeySpec("stringSecretValue".toCharArray())); + KeyStore.ProtectionParameter protectionParameter = new KeyStore.PasswordProtection(new char[0]); + keystore.setEntry("string_setting", new KeyStore.SecretKeyEntry(secretKey), protectionParameter); + + byte[] base64Bytes = Base64.getEncoder().encode(fileBytes); + char[] chars = new char[base64Bytes.length]; + for (int i = 0; i < chars.length; ++i) { + chars[i] = (char) base64Bytes[i]; // PBE only stores the lower 8 bits, so this narrowing is ok + } + secretKey = secretFactory.generateSecret(new PBEKeySpec(chars)); + keystore.setEntry("file_setting", new KeyStore.SecretKeyEntry(secretKey), protectionParameter); + + ByteArrayOutputStream keystoreBytesStream = new ByteArrayOutputStream(); + keystore.store(keystoreBytesStream, new char[0]); + byte[] keystoreBytes = keystoreBytesStream.toByteArray(); + output.writeInt(keystoreBytes.length); + output.writeBytes(keystoreBytes, keystoreBytes.length); + CodecUtil.writeFooter(output); + } + + return fileBytes; + } + private byte[] toByteArray(final InputStream is) throws IOException { final ByteArrayOutputStream os = new ByteArrayOutputStream(); final byte[] buffer = new byte[1024]; diff --git a/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/ListKeyStoreCommandTests.java b/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/ListKeyStoreCommandTests.java index 0846e28fb42af..36bef3a82281d 100644 --- a/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/ListKeyStoreCommandTests.java +++ b/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/ListKeyStoreCommandTests.java @@ -90,17 +90,13 @@ public void testListWithIncorrectPassword() throws Exception { terminal.addSecretInput("thewrongkeystorepassword"); UserException e = expectThrows(UserException.class, this::execute); assertEquals(e.getMessage(), ExitCodes.DATA_ERROR, e.exitCode); - if (inFipsJvm()) { - assertThat( - e.getMessage(), - anyOf( - containsString("Provided keystore password was incorrect"), - containsString("Keystore has been corrupted or tampered with") - ) - ); - } else { - assertThat(e.getMessage(), containsString("Provided keystore password was incorrect")); - } + assertThat( + e.getMessage(), + anyOf( + containsString("Provided keystore password was incorrect"), + containsString("Keystore has been corrupted or tampered with") + ) + ); } public void testListWithUnprotectedKeystore() throws Exception { diff --git a/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/RemoveSettingKeyStoreCommandTests.java b/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/RemoveSettingKeyStoreCommandTests.java index 66d448400d4e3..276af6cfa659f 100644 --- a/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/RemoveSettingKeyStoreCommandTests.java +++ b/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/RemoveSettingKeyStoreCommandTests.java @@ -107,18 +107,13 @@ public void testRemoveWithIncorrectPassword() throws Exception { terminal.addSecretInput("thewrongpassword"); UserException e = expectThrows(UserException.class, () -> execute("foo")); assertEquals(e.getMessage(), ExitCodes.DATA_ERROR, e.exitCode); - if (inFipsJvm()) { - assertThat( - e.getMessage(), - anyOf( - containsString("Provided keystore password was incorrect"), - containsString("Keystore has been corrupted or tampered with") - ) - ); - } else { - assertThat(e.getMessage(), containsString("Provided keystore password was incorrect")); - } - + assertThat( + e.getMessage(), + anyOf( + containsString("Provided keystore password was incorrect"), + containsString("Keystore has been corrupted or tampered with") + ) + ); } public void testRemoveFromUnprotectedKeystore() throws Exception { diff --git a/distribution/tools/plugin-cli/build.gradle b/distribution/tools/plugin-cli/build.gradle index ecb86ecb1eb0b..1f11c53742c9b 100644 --- a/distribution/tools/plugin-cli/build.gradle +++ b/distribution/tools/plugin-cli/build.gradle @@ -37,11 +37,9 @@ base { dependencies { compileOnly project(":server") compileOnly project(":libs:opensearch-cli") - api "org.bouncycastle:bcpg-fips:2.0.9" - api "org.bouncycastle:bc-fips:2.0.0" - testImplementation(project(":test:framework")) { - exclude group: 'org.bouncycastle' - } + api "org.bouncycastle:bc-fips:${versions.bouncycastle_jce}" + api "org.bouncycastle:bcpg-fips:${versions.bouncycastle_pg}" + testImplementation project(":test:framework") testImplementation 'com.google.jimfs:jimfs:1.3.0' testRuntimeOnly("com.google.guava:guava:${versions.guava}") { transitive = false diff --git a/distribution/tools/plugin-cli/licenses/bcpg-fips-2.0.10.jar.sha1 b/distribution/tools/plugin-cli/licenses/bcpg-fips-2.0.10.jar.sha1 new file mode 100644 index 0000000000000..c7aa41c7996d8 --- /dev/null +++ b/distribution/tools/plugin-cli/licenses/bcpg-fips-2.0.10.jar.sha1 @@ -0,0 +1 @@ +f21aff3416359ad20b2712c0727696858a2e769a \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/bcpg-fips-2.0.9.jar.sha1 b/distribution/tools/plugin-cli/licenses/bcpg-fips-2.0.9.jar.sha1 deleted file mode 100644 index 20cdbf6dc8aa8..0000000000000 --- a/distribution/tools/plugin-cli/licenses/bcpg-fips-2.0.9.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f69719ef8dbf34d5f906ce480496446b2fd2ae27 \ No newline at end of file diff --git a/distribution/tools/plugin-cli/src/main/java/org/opensearch/tools/cli/plugin/InstallPluginCommand.java b/distribution/tools/plugin-cli/src/main/java/org/opensearch/tools/cli/plugin/InstallPluginCommand.java index 1ab2697d5ced8..94864828f61cc 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/opensearch/tools/cli/plugin/InstallPluginCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/opensearch/tools/cli/plugin/InstallPluginCommand.java @@ -40,7 +40,6 @@ import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.Constants; import org.bouncycastle.bcpg.ArmoredInputStream; -import org.bouncycastle.jcajce.provider.BouncyCastleFipsProvider; import org.bouncycastle.openpgp.PGPException; import org.bouncycastle.openpgp.PGPPublicKey; import org.bouncycastle.openpgp.PGPPublicKeyRingCollection; @@ -633,7 +632,7 @@ void verifySignature(final Path zip, final String urlString) throws IOException, // compute the signature of the downloaded plugin zip final PGPPublicKeyRingCollection collection = new PGPPublicKeyRingCollection(ain, new JcaKeyFingerprintCalculator()); final PGPPublicKey key = collection.getPublicKey(signature.getKeyID()); - signature.init(new JcaPGPContentVerifierBuilderProvider().setProvider(new BouncyCastleFipsProvider()), key); + signature.init(new JcaPGPContentVerifierBuilderProvider().setProvider("BCFIPS"), key); final byte[] buffer = new byte[1024]; int read; while ((read = fin.read(buffer)) != -1) { diff --git a/distribution/tools/plugin-cli/src/test/java/org/opensearch/tools/cli/plugin/InstallPluginCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/opensearch/tools/cli/plugin/InstallPluginCommandTests.java index 56ef09c5c9128..2a627dffb7c3a 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/opensearch/tools/cli/plugin/InstallPluginCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/opensearch/tools/cli/plugin/InstallPluginCommandTests.java @@ -40,7 +40,6 @@ import org.bouncycastle.bcpg.ArmoredOutputStream; import org.bouncycastle.bcpg.BCPGOutputStream; import org.bouncycastle.bcpg.HashAlgorithmTags; -import org.bouncycastle.jcajce.provider.BouncyCastleFipsProvider; import org.bouncycastle.openpgp.PGPEncryptedData; import org.bouncycastle.openpgp.PGPException; import org.bouncycastle.openpgp.PGPKeyPair; @@ -1362,8 +1361,7 @@ public PGPSecretKey newSecretKey() throws NoSuchAlgorithmException, NoSuchProvid null, null, new JcaPGPContentSignerBuilder(pkp.getPublicKey().getAlgorithm(), HashAlgorithmTags.SHA256), - new JcePBESecretKeyEncryptorBuilder(PGPEncryptedData.AES_192, sha1Calc).setProvider(new BouncyCastleFipsProvider()) - .build("passphrase".toCharArray()) + new JcePBESecretKeyEncryptorBuilder(PGPEncryptedData.AES_192, sha1Calc).setProvider("BCFIPS").build("passphrase".toCharArray()) ); } diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index b423523bb9e3f..4ccb794137c14 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -60,7 +60,12 @@ reactivestreams = "1.0.4" # when updating this version, you need to ensure compatibility with: # - plugins/ingest-attachment (transitive dependency, check the upstream POM) # - distribution/tools/plugin-cli -bouncycastle="1.78" +bouncycastle_jce = "2.0.0" +bouncycastle_tls = "2.0.19" +bouncycastle_pkix = "2.0.7" +bouncycastle_pg = "2.0.10" +bouncycastle_util = "2.0.3" +password4j = "1.8.2" # test dependencies randomizedrunner = "2.7.1" junit = "4.13.2" diff --git a/libs/ssl-config/build.gradle b/libs/ssl-config/build.gradle index da0829cb533da..478baf4c6a9c5 100644 --- a/libs/ssl-config/build.gradle +++ b/libs/ssl-config/build.gradle @@ -34,9 +34,10 @@ apply plugin: "opensearch.publish" dependencies { api project(':libs:opensearch-common') - api "org.bouncycastle:bcprov-jdk18on:${versions.bouncycastle}" - api "org.bouncycastle:bcpkix-jdk18on:${versions.bouncycastle}" - runtimeOnly "org.bouncycastle:bcutil-jdk18on:${versions.bouncycastle}" + api "org.bouncycastle:bc-fips:${versions.bouncycastle_jce}" + api "org.bouncycastle:bcpkix-fips:${versions.bouncycastle_pkix}" + runtimeOnly "org.bouncycastle:bctls-fips:${versions.bouncycastle_tls}" + runtimeOnly "org.bouncycastle:bcutil-fips:${versions.bouncycastle_util}" testImplementation(project(":test:framework")) { exclude group: 'org.opensearch', module: 'opensearch-ssl-config' diff --git a/libs/ssl-config/licenses/bc-fips-2.0.0.jar.sha1 b/libs/ssl-config/licenses/bc-fips-2.0.0.jar.sha1 new file mode 100644 index 0000000000000..79f0e3e9930bb --- /dev/null +++ b/libs/ssl-config/licenses/bc-fips-2.0.0.jar.sha1 @@ -0,0 +1 @@ +ee9ac432cf08f9a9ebee35d7cf8a45f94959a7ab \ No newline at end of file diff --git a/libs/ssl-config/licenses/bcpkix-fips-2.0.7.jar.sha1 b/libs/ssl-config/licenses/bcpkix-fips-2.0.7.jar.sha1 new file mode 100644 index 0000000000000..5df930b54fe44 --- /dev/null +++ b/libs/ssl-config/licenses/bcpkix-fips-2.0.7.jar.sha1 @@ -0,0 +1 @@ +01eea0f325315ca6295b0a6926ff862d8001cdf9 \ No newline at end of file diff --git a/libs/ssl-config/licenses/bcpkix-jdk18on-1.78.jar.sha1 b/libs/ssl-config/licenses/bcpkix-jdk18on-1.78.jar.sha1 deleted file mode 100644 index 385a9d930eede..0000000000000 --- a/libs/ssl-config/licenses/bcpkix-jdk18on-1.78.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dd61bcdb87678451dd42d42e267979bd4b4451a1 \ No newline at end of file diff --git a/libs/ssl-config/licenses/bcprov-jdk18on-1.78.jar.sha1 b/libs/ssl-config/licenses/bcprov-jdk18on-1.78.jar.sha1 deleted file mode 100644 index 47fb5fd5e5f5d..0000000000000 --- a/libs/ssl-config/licenses/bcprov-jdk18on-1.78.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -619aafb92dc0b4c6cc4cf86c487ca48ee2d67a8e \ No newline at end of file diff --git a/libs/ssl-config/licenses/bctls-fips-2.0.19.jar.sha1 b/libs/ssl-config/licenses/bctls-fips-2.0.19.jar.sha1 new file mode 100644 index 0000000000000..387635e9e1594 --- /dev/null +++ b/libs/ssl-config/licenses/bctls-fips-2.0.19.jar.sha1 @@ -0,0 +1 @@ +9cc33650ede63bc1a8281ed5c8e1da314d50bc76 \ No newline at end of file diff --git a/libs/ssl-config/licenses/bcutil-fips-2.0.3.jar.sha1 b/libs/ssl-config/licenses/bcutil-fips-2.0.3.jar.sha1 new file mode 100644 index 0000000000000..d553536576656 --- /dev/null +++ b/libs/ssl-config/licenses/bcutil-fips-2.0.3.jar.sha1 @@ -0,0 +1 @@ +a1857cd639295b10cc90e6d31ecbc523cdafcc19 \ No newline at end of file diff --git a/libs/ssl-config/licenses/bcutil-jdk18on-1.78.jar.sha1 b/libs/ssl-config/licenses/bcutil-jdk18on-1.78.jar.sha1 deleted file mode 100644 index 9c88eef3ace17..0000000000000 --- a/libs/ssl-config/licenses/bcutil-jdk18on-1.78.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -81c1f5e06f206be5dad137d563609dbe66c81d31 \ No newline at end of file diff --git a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/PemUtils.java b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/PemUtils.java index 441e17b808feb..b9faa9a72ee51 100644 --- a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/PemUtils.java +++ b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/PemUtils.java @@ -34,7 +34,6 @@ import org.bouncycastle.asn1.ASN1ObjectIdentifier; import org.bouncycastle.asn1.pkcs.PrivateKeyInfo; -import org.bouncycastle.jce.provider.BouncyCastleProvider; import org.bouncycastle.openssl.PEMEncryptedKeyPair; import org.bouncycastle.openssl.PEMKeyPair; import org.bouncycastle.openssl.PEMParser; @@ -50,7 +49,6 @@ import java.nio.file.Files; import java.nio.file.Path; import java.security.PrivateKey; -import java.security.Provider; import java.security.cert.Certificate; import java.security.cert.CertificateException; import java.security.cert.CertificateFactory; @@ -62,7 +60,7 @@ final class PemUtils { - private static final Provider BC = new BouncyCastleProvider(); + public static final String BCFIPS = "BCFIPS"; PemUtils() { throw new IllegalStateException("Utility class should not be instantiated"); @@ -116,11 +114,12 @@ private static PrivateKeyInfo loadPrivateKeyFromFile(Path keyPath, Supplier EMPTY_SYSTEM_PROPERTIES = (key, defaultValue) -> defaultValue; public void testGetSystemTrustStoreWithNoSystemProperties() throws Exception { - final DefaultJdkTrustConfig trustConfig = new DefaultJdkTrustConfig((key, defaultValue) -> defaultValue); + final DefaultJdkTrustConfig trustConfig = new DefaultJdkTrustConfig(EMPTY_SYSTEM_PROPERTIES); assertThat(trustConfig.getDependentFiles(), emptyIterable()); final X509ExtendedTrustManager trustManager = trustConfig.createTrustManager(); assertStandardIssuers(trustManager); diff --git a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemTrustConfigTests.java b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemTrustConfigTests.java index 4175b0ee424b7..d420c4634165a 100644 --- a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemTrustConfigTests.java +++ b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemTrustConfigTests.java @@ -42,7 +42,6 @@ import java.nio.file.Path; import java.nio.file.StandardCopyOption; import java.nio.file.StandardOpenOption; -import java.security.GeneralSecurityException; import java.security.Principal; import java.security.cert.X509Certificate; import java.util.Arrays; @@ -74,7 +73,7 @@ public void testBadFileFormatFails() throws Exception { Files.write(ca, generateRandomByteArrayOfLength(128), StandardOpenOption.APPEND); final PemTrustConfig trustConfig = new PemTrustConfig(Collections.singletonList(ca)); assertThat(trustConfig.getDependentFiles(), Matchers.containsInAnyOrder(ca)); - assertInvalidFileFormat(trustConfig, ca); + assertFailedToParse(trustConfig, ca); } public void testEmptyFileFails() throws Exception { @@ -121,7 +120,7 @@ public void testTrustConfigReloadsFileContents() throws Exception { assertFileNotFound(trustConfig, ca1); Files.write(ca1, generateRandomByteArrayOfLength(128), StandardOpenOption.CREATE); - assertInvalidFileFormat(trustConfig, ca1); + assertFailedToParse(trustConfig, ca1); } private void assertCertificateChain(PemTrustConfig trustConfig, String... caNames) { @@ -142,14 +141,6 @@ private void assertFailedToParse(PemTrustConfig trustConfig, Path file) { assertThat(exception.getMessage(), Matchers.containsString("Failed to parse any certificate from")); } - private void assertInvalidFileFormat(PemTrustConfig trustConfig, Path file) { - final SslConfigException exception = expectThrows(SslConfigException.class, trustConfig::createTrustManager); - assertThat(exception.getMessage(), Matchers.containsString(file.toAbsolutePath().toString())); - assertThat(exception.getMessage(), Matchers.containsString("cannot create trust")); - assertThat(exception.getMessage(), Matchers.containsString("PEM")); - assertThat(exception.getCause(), Matchers.instanceOf(GeneralSecurityException.class)); - } - private void assertFileNotFound(PemTrustConfig trustConfig, Path file) { final SslConfigException exception = expectThrows(SslConfigException.class, trustConfig::createTrustManager); assertThat(exception.getMessage(), Matchers.containsString("files do not exist")); diff --git a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/SslDiagnosticsTests.java b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/SslDiagnosticsTests.java index e19fa91f7773e..31a4082f0609a 100644 --- a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/SslDiagnosticsTests.java +++ b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/SslDiagnosticsTests.java @@ -52,6 +52,7 @@ import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -72,8 +73,13 @@ public class SslDiagnosticsTests extends OpenSearchTestCase { public void testTrustEmptyStore() { var fileName = "cert-all/empty.jks"; - var exception = assertThrows(CertificateException.class, () -> loadCertificate(fileName)); - assertThat(exception.getMessage(), Matchers.equalTo("No certificate data found")); + var exception = assertThrows(SslConfigException.class, () -> loadCertificate(fileName)); + assertThat( + exception.getMessage(), + Matchers.equalTo( + String.format(Locale.ROOT, "Failed to parse any certificate from [%s]", getDataPath("/certs/" + fileName).toAbsolutePath()) + ) + ); } public void testDiagnosticMessageWhenServerProvidesAFullCertChainThatIsTrusted() throws Exception { diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexRestClientSslTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexRestClientSslTests.java index d0b0403874c7a..170f89838dd0d 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexRestClientSslTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexRestClientSslTests.java @@ -56,7 +56,6 @@ import javax.net.ssl.KeyManager; import javax.net.ssl.SSLContext; -import javax.net.ssl.SSLHandshakeException; import javax.net.ssl.SSLPeerUnverifiedException; import javax.net.ssl.TrustManager; import javax.net.ssl.X509ExtendedKeyManager; @@ -65,6 +64,7 @@ import java.io.IOException; import java.net.InetSocketAddress; import java.nio.file.Path; +import java.security.cert.CertPathBuilderException; import java.security.cert.Certificate; import java.security.cert.X509Certificate; import java.util.ArrayList; @@ -138,7 +138,13 @@ public void testClientFailsWithUntrustedCertificate() throws IOException { final Environment environment = TestEnvironment.newEnvironment(settings); final ReindexSslConfig ssl = new ReindexSslConfig(settings, environment, mock(ResourceWatcherService.class)); try (RestClient client = Reindexer.buildRestClient(getRemoteInfo(), ssl, 1L, threads)) { - expectThrows(SSLHandshakeException.class, () -> client.performRequest(new Request("GET", "/"))); + var exception = expectThrows(Exception.class, () -> client.performRequest(new Request("GET", "/"))); + var rootCause = exception.getCause().getCause().getCause().getCause(); + assertThat(rootCause, Matchers.instanceOf(CertPathBuilderException.class)); + assertThat( + rootCause.getMessage(), + Matchers.containsString("No issuer certificate for certificate in certification path found") + ); } } @@ -206,7 +212,7 @@ public void testClientPassesClientCertificate() throws IOException { assertThat(certs, Matchers.arrayWithSize(1)); assertThat(certs[0], Matchers.instanceOf(X509Certificate.class)); final X509Certificate clientCert = (X509Certificate) certs[0]; - assertThat(clientCert.getSubjectDN().getName(), Matchers.is("CN=localhost, OU=UNIT, O=ORG, L=TORONTO, ST=ONTARIO, C=CA")); + assertThat(clientCert.getSubjectDN().getName(), Matchers.is("CN=localhost,OU=UNIT,O=ORG,L=TORONTO,ST=ONTARIO,C=CA")); assertThat(clientCert.getIssuerDN().getName(), Matchers.is("CN=OpenSearch Test Node")); } } diff --git a/plugins/identity-shiro/build.gradle b/plugins/identity-shiro/build.gradle index 436a9b3e48128..b4952c8484a95 100644 --- a/plugins/identity-shiro/build.gradle +++ b/plugins/identity-shiro/build.gradle @@ -28,7 +28,7 @@ dependencies { implementation 'org.passay:passay:1.6.3' - implementation "org.bouncycastle:bcprov-jdk18on:${versions.bouncycastle}" + api "com.password4j:password4j:${versions.password4j}" testImplementation project(path: ':modules:transport-netty4') // for http testImplementation "org.mockito:mockito-core:${versions.mockito}" diff --git a/plugins/identity-shiro/licenses/bcprov-jdk18on-1.78.jar.sha1 b/plugins/identity-shiro/licenses/bcprov-jdk18on-1.78.jar.sha1 deleted file mode 100644 index 47fb5fd5e5f5d..0000000000000 --- a/plugins/identity-shiro/licenses/bcprov-jdk18on-1.78.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -619aafb92dc0b4c6cc4cf86c487ca48ee2d67a8e \ No newline at end of file diff --git a/plugins/identity-shiro/licenses/bcprov-jdk18on-LICENSE.txt b/plugins/identity-shiro/licenses/bcprov-jdk18on-LICENSE.txt deleted file mode 100644 index 9f27bafe96885..0000000000000 --- a/plugins/identity-shiro/licenses/bcprov-jdk18on-LICENSE.txt +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2000 - 2013 The Legion of the Bouncy Castle Inc. - (http://www.bouncycastle.org) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/plugins/identity-shiro/licenses/password4j-1.8.2.jar.sha1 b/plugins/identity-shiro/licenses/password4j-1.8.2.jar.sha1 new file mode 100644 index 0000000000000..bee14467d32a2 --- /dev/null +++ b/plugins/identity-shiro/licenses/password4j-1.8.2.jar.sha1 @@ -0,0 +1 @@ +f8ac106c667c0b081075e81a90dc92861b9bb66e \ No newline at end of file diff --git a/plugins/identity-shiro/licenses/password4j-LICENSE.txt b/plugins/identity-shiro/licenses/password4j-LICENSE.txt new file mode 100644 index 0000000000000..261eeb9e9f8b2 --- /dev/null +++ b/plugins/identity-shiro/licenses/password4j-LICENSE.txt @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/identity-shiro/licenses/bcprov-jdk18on-NOTICE.txt b/plugins/identity-shiro/licenses/password4j-NOTICE.txt similarity index 100% rename from plugins/identity-shiro/licenses/bcprov-jdk18on-NOTICE.txt rename to plugins/identity-shiro/licenses/password4j-NOTICE.txt diff --git a/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/realm/BCryptPasswordMatcher.java b/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/realm/BCryptPasswordMatcher.java index f8113101deb70..55e3e3414ac2c 100644 --- a/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/realm/BCryptPasswordMatcher.java +++ b/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/realm/BCryptPasswordMatcher.java @@ -12,7 +12,16 @@ import org.apache.shiro.authc.AuthenticationToken; import org.apache.shiro.authc.UsernamePasswordToken; import org.apache.shiro.authc.credential.CredentialsMatcher; -import org.bouncycastle.crypto.generators.OpenBSDBCrypt; +import org.opensearch.SpecialPermission; + +import java.nio.CharBuffer; +import java.security.AccessController; +import java.security.PrivilegedAction; + +import com.password4j.BcryptFunction; +import com.password4j.Password; + +import static org.opensearch.core.common.Strings.isNullOrEmpty; /** * Password matcher for BCrypt @@ -28,7 +37,25 @@ public class BCryptPasswordMatcher implements CredentialsMatcher { @Override public boolean doCredentialsMatch(AuthenticationToken token, AuthenticationInfo info) { final UsernamePasswordToken userToken = (UsernamePasswordToken) token; - return OpenBSDBCrypt.checkPassword((String) info.getCredentials(), userToken.getPassword()); + return check(userToken.getPassword(), (String) info.getCredentials()); + } + + @SuppressWarnings("removal") + private boolean check(char[] password, String hash) { + if (password == null || password.length == 0) { + throw new IllegalStateException("Password cannot be empty or null"); + } + if (isNullOrEmpty(hash)) { + throw new IllegalStateException("Hash cannot be empty or null"); + } + CharBuffer passwordBuffer = CharBuffer.wrap(password); + SecurityManager securityManager = System.getSecurityManager(); + if (securityManager != null) { + securityManager.checkPermission(new SpecialPermission()); + } + return AccessController.doPrivileged( + (PrivilegedAction) () -> Password.check(passwordBuffer, hash).with(BcryptFunction.getInstanceFromHash(hash)) + ); } } diff --git a/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/realm/BCryptPasswordMatcherTests.java b/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/realm/BCryptPasswordMatcherTests.java index 91e88ed1bf701..451f0eae06234 100644 --- a/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/realm/BCryptPasswordMatcherTests.java +++ b/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/realm/BCryptPasswordMatcherTests.java @@ -41,4 +41,33 @@ public void testCredentialDoNotMatch() { assertThat(result, equalTo(false)); } + + public void testEmptyPassword() { + { + final UsernamePasswordToken token = mock(UsernamePasswordToken.class); + when(token.getPassword()).thenReturn(null); + final AuthenticationInfo info = mock(AuthenticationInfo.class); + + Exception e = assertThrows(IllegalStateException.class, () -> new BCryptPasswordMatcher().doCredentialsMatch(token, info)); + assertThat(e.getMessage(), equalTo("Password cannot be empty or null")); + } + { + final UsernamePasswordToken token = mock(UsernamePasswordToken.class); + when(token.getPassword()).thenReturn("".toCharArray()); + final AuthenticationInfo info = mock(AuthenticationInfo.class); + + Exception e = assertThrows(IllegalStateException.class, () -> new BCryptPasswordMatcher().doCredentialsMatch(token, info)); + assertThat(e.getMessage(), equalTo("Password cannot be empty or null")); + } + } + + public void testEmptyHash() { + final UsernamePasswordToken token = mock(UsernamePasswordToken.class); + when(token.getPassword()).thenReturn("HashedPassword".toCharArray()); + final AuthenticationInfo info = mock(AuthenticationInfo.class); + when(info.getCredentials()).thenReturn(randomFrom("", null)); + + Exception e = assertThrows(IllegalStateException.class, () -> new BCryptPasswordMatcher().doCredentialsMatch(token, info)); + assertThat(e.getMessage(), equalTo("Hash cannot be empty or null")); + } } diff --git a/qa/smoke-test-plugins/build.gradle b/qa/smoke-test-plugins/build.gradle index 6abba5577d605..89f9d071e7e83 100644 --- a/qa/smoke-test-plugins/build.gradle +++ b/qa/smoke-test-plugins/build.gradle @@ -40,10 +40,6 @@ int pluginsCount = 0 testClusters.integTest { project(':plugins').getChildProjects().each { pluginName, pluginProject -> - if (BuildParams.inFipsJvm && pluginName == "ingest-attachment"){ - //Do not attempt to install ingest-attachment in FIPS 140 as it is not supported (it depends on non-FIPS BouncyCastle - return - } plugin pluginProject.path pluginsCount += 1 } diff --git a/server/src/main/java/org/opensearch/common/settings/KeyStoreWrapper.java b/server/src/main/java/org/opensearch/common/settings/KeyStoreWrapper.java index ed58e6b21e165..81fb1309df310 100644 --- a/server/src/main/java/org/opensearch/common/settings/KeyStoreWrapper.java +++ b/server/src/main/java/org/opensearch/common/settings/KeyStoreWrapper.java @@ -449,7 +449,7 @@ private byte[] encrypt(char[] password, byte[] salt, byte[] iv) throws GeneralSe private void decryptLegacyEntries() throws GeneralSecurityException, IOException { // v1 and v2 keystores never had passwords actually used, so we always use an empty password - KeyStore keystore = KeyStore.getInstance("PKCS12"); + KeyStore keystore = KeyStore.getInstance("PKCS12", "SUN"); Map settingTypes = new HashMap<>(); ByteArrayInputStream inputBytes = new ByteArrayInputStream(dataBytes); try (DataInputStream input = new DataInputStream(inputBytes)) { @@ -488,7 +488,7 @@ private void decryptLegacyEntries() throws GeneralSecurityException, IOException // fill in the entries now that we know all the types to expect this.entries.set(new HashMap<>()); - SecretKeyFactory keyFactory = SecretKeyFactory.getInstance("PBE"); + SecretKeyFactory keyFactory = SecretKeyFactory.getInstance("PBE", "SunJCE"); KeyStore.PasswordProtection password = new KeyStore.PasswordProtection("".toCharArray()); for (Map.Entry settingEntry : settingTypes.entrySet()) { diff --git a/server/src/main/resources/org/opensearch/bootstrap/security.policy b/server/src/main/resources/org/opensearch/bootstrap/security.policy index a6d6014b26bfb..f521ce0011540 100644 --- a/server/src/main/resources/org/opensearch/bootstrap/security.policy +++ b/server/src/main/resources/org/opensearch/bootstrap/security.policy @@ -195,4 +195,11 @@ grant { permission java.io.FilePermission "/sys/fs/cgroup/cpuacct/-", "read"; permission java.io.FilePermission "/sys/fs/cgroup/memory", "read"; permission java.io.FilePermission "/sys/fs/cgroup/memory/-", "read"; + + // needed by RestClientBuilder + permission java.io.FilePermission "${java.home}/lib/security/cacerts", "read"; + permission java.io.FilePermission "${java.home}/lib/security/jssecacerts", "read"; + permission java.security.SecurityPermission "getProperty.jdk.certpath.disabledAlgorithms"; + permission java.security.SecurityPermission "getProperty.jdk.tls.disabledAlgorithms"; + permission java.security.SecurityPermission "getProperty.keystore.type.compat"; }; diff --git a/server/src/main/resources/org/opensearch/bootstrap/test.policy b/server/src/main/resources/org/opensearch/bootstrap/test.policy index 9e1d5cebffc0e..f1a6e73fa5335 100644 --- a/server/src/main/resources/org/opensearch/bootstrap/test.policy +++ b/server/src/main/resources/org/opensearch/bootstrap/test.policy @@ -25,9 +25,9 @@ grant { permission java.security.SecurityPermission "getProperty.jdk.tls.server.defaultDHEParameters"; permission java.security.SecurityPermission "getProperty.keystore.type.compat"; permission java.security.SecurityPermission "getProperty.org.bouncycastle.*"; - permission java.security.SecurityPermission "putProviderProperty.BC"; + permission java.security.SecurityPermission "putProviderProperty.BCFIPS"; permission java.security.SecurityPermission "removeProvider.SunJCE"; - permission java.security.SecurityPermission "removeProviderProperty.BC"; + permission java.security.SecurityPermission "removeProviderProperty.BCFIPS"; permission java.util.PropertyPermission "java.runtime.name", "read"; permission org.bouncycastle.crypto.CryptoServicesPermission "defaultRandomConfig"; permission org.bouncycastle.crypto.CryptoServicesPermission "exportPrivateKey"; diff --git a/test/framework/build.gradle b/test/framework/build.gradle index 47addd36318a4..e5297ca0807a4 100644 --- a/test/framework/build.gradle +++ b/test/framework/build.gradle @@ -49,9 +49,9 @@ dependencies { api "org.mockito:mockito-core:${versions.mockito}" api "net.bytebuddy:byte-buddy:${versions.bytebuddy}" api "org.objenesis:objenesis:${versions.objenesis}" - api "org.bouncycastle:bcprov-jdk18on:${versions.bouncycastle}" - api "org.bouncycastle:bcpkix-jdk18on:${versions.bouncycastle}" - api "org.bouncycastle:bcutil-jdk18on:${versions.bouncycastle}" + api "org.bouncycastle:bc-fips:${versions.bouncycastle_jce}" + api "org.bouncycastle:bcpkix-fips:${versions.bouncycastle_pkix}" + api "org.bouncycastle:bcutil-fips:${versions.bouncycastle_util}" annotationProcessor "org.apache.logging.log4j:log4j-core:${versions.log4j}" } diff --git a/test/framework/licenses/bc-fips-2.0.0.jar.sha1 b/test/framework/licenses/bc-fips-2.0.0.jar.sha1 new file mode 100644 index 0000000000000..79f0e3e9930bb --- /dev/null +++ b/test/framework/licenses/bc-fips-2.0.0.jar.sha1 @@ -0,0 +1 @@ +ee9ac432cf08f9a9ebee35d7cf8a45f94959a7ab \ No newline at end of file diff --git a/test/framework/licenses/bcpkix-fips-2.0.7.jar.sha1 b/test/framework/licenses/bcpkix-fips-2.0.7.jar.sha1 new file mode 100644 index 0000000000000..5df930b54fe44 --- /dev/null +++ b/test/framework/licenses/bcpkix-fips-2.0.7.jar.sha1 @@ -0,0 +1 @@ +01eea0f325315ca6295b0a6926ff862d8001cdf9 \ No newline at end of file diff --git a/test/framework/licenses/bcpkix-jdk18on-1.78.jar.sha1 b/test/framework/licenses/bcpkix-jdk18on-1.78.jar.sha1 deleted file mode 100644 index 385a9d930eede..0000000000000 --- a/test/framework/licenses/bcpkix-jdk18on-1.78.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dd61bcdb87678451dd42d42e267979bd4b4451a1 \ No newline at end of file diff --git a/test/framework/licenses/bcprov-jdk18on-1.78.jar.sha1 b/test/framework/licenses/bcprov-jdk18on-1.78.jar.sha1 deleted file mode 100644 index 47fb5fd5e5f5d..0000000000000 --- a/test/framework/licenses/bcprov-jdk18on-1.78.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -619aafb92dc0b4c6cc4cf86c487ca48ee2d67a8e \ No newline at end of file diff --git a/test/framework/licenses/bcutil-fips-2.0.3.jar.sha1 b/test/framework/licenses/bcutil-fips-2.0.3.jar.sha1 new file mode 100644 index 0000000000000..d553536576656 --- /dev/null +++ b/test/framework/licenses/bcutil-fips-2.0.3.jar.sha1 @@ -0,0 +1 @@ +a1857cd639295b10cc90e6d31ecbc523cdafcc19 \ No newline at end of file From 6d53f9db3e2d420b8b799834172a6ab9ea5ffe9f Mon Sep 17 00:00:00 2001 From: Rishabh Maurya Date: Fri, 21 Mar 2025 08:36:31 -0700 Subject: [PATCH 101/550] Fix flaky test due to incorrect use of SSDV.nextOrd() (#17626) Signed-off-by: Rishabh Maurya --- .../index/fielddata/ordinals/MultiOrdinalsTests.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/test/java/org/opensearch/index/fielddata/ordinals/MultiOrdinalsTests.java b/server/src/test/java/org/opensearch/index/fielddata/ordinals/MultiOrdinalsTests.java index 64cb953a27cf3..8260c2dc417f5 100644 --- a/server/src/test/java/org/opensearch/index/fielddata/ordinals/MultiOrdinalsTests.java +++ b/server/src/test/java/org/opensearch/index/fielddata/ordinals/MultiOrdinalsTests.java @@ -128,10 +128,10 @@ public int compare(OrdAndId o1, OrdAndId o2) { assertThat((long) singleOrds.ordValue(), equalTo(docOrds.get(0))); assertTrue(docs.advanceExact(docId)); + assertEquals(docOrds.size(), docs.docValueCount()); for (Long ord : docOrds) { assertThat(docs.nextOrd(), equalTo(ord)); } - assertEquals(SortedSetDocValues.NO_MORE_DOCS, docs.nextOrd()); } for (int i = docId + 1; i < ordAndId.id; i++) { assertFalse(singleOrds.advanceExact(i)); @@ -277,10 +277,10 @@ private void assertEquals(SortedSetDocValues docs, long[][] ordinalPlan) throws long[] ords = ordinalPlan[doc]; assertEquals(ords.length > 0, docs.advanceExact(doc)); if (ords.length > 0) { + assertEquals(ords.length, docs.docValueCount()); for (long ord : ords) { assertThat(docs.nextOrd(), equalTo(ord)); } - assertThat(docs.nextOrd(), equalTo((long) SortedSetDocValues.NO_MORE_DOCS)); } } } From 83edf7502fbab7eb477ccc8fb7714d840754a4d2 Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Fri, 21 Mar 2025 10:17:32 -0700 Subject: [PATCH 102/550] Fix label in changelog entry (#17651) Signed-off-by: Andrew Ross --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d4dbeafde903f..bbd8d2c427b29 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,7 +13,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add dfs transformation function in XContentMapValues ([#17612](https://github.com/opensearch-project/OpenSearch/pull/17612)) ### Changed -- Migrate BC libs to their FIPS counterparts ([#3420](https://github.com/opensearch-project/OpenSearch/pull/14912)) +- Migrate BC libs to their FIPS counterparts ([#14912](https://github.com/opensearch-project/OpenSearch/pull/14912)) ### Dependencies - Bump `com.nimbusds:nimbus-jose-jwt` from 9.41.1 to 10.0.2 ([#17607](https://github.com/opensearch-project/OpenSearch/pull/17607)) From 1acba95906877249aa7beed68b212483849d3fe9 Mon Sep 17 00:00:00 2001 From: Vinay Krishna Pudyodu Date: Fri, 21 Mar 2025 11:15:11 -0700 Subject: [PATCH 103/550] Search Replica Allocation and Recovery (#17457) * Restrict Search Replicas to Allocate only to Search dedicated node Signed-off-by: Vinay Krishna Pudyodu * fixed the javadoc Signed-off-by: Vinay Krishna Pudyodu * fixed tests Signed-off-by: Vinay Krishna Pudyodu * Treat Regular and Search Replicas Separately to Prevent Allocation Blocking Signed-off-by: Vinay Krishna Pudyodu * Updated tests and some refactor Signed-off-by: Vinay Krishna Pudyodu * Fixed SearchReplica recovery scenario for same node and new node Signed-off-by: Vinay Krishna Pudyodu * Updated the logic for SearchReplica recovery scenario for new node Signed-off-by: Vinay Krishna Pudyodu * Fixed nits after self review Signed-off-by: Vinay Krishna Pudyodu * Modified the search replica allocation based on node attribute Signed-off-by: Vinay Krishna Pudyodu * fixed PR comments Signed-off-by: Vinay Krishna Pudyodu * Revert "Fixed SearchReplica recovery scenario for same node and new node" This reverts commit de1e71905125dbc664f1d95227eff0bf17f72a7b. Signed-off-by: Vinay Krishna Pudyodu * Separated the recovery flow method for search replica Signed-off-by: Vinay Krishna Pudyodu * Revert "fixed PR comments" This reverts commit 8fe8dcfa1015e93454d1c20f999b6600ab53fc46. Signed-off-by: Vinay Krishna Pudyodu * Added unit tests in IndexShardTests Signed-off-by: Vinay Krishna Pudyodu * updated method name and minor refactor Signed-off-by: Vinay Krishna Pudyodu * Removed search replica recovery logic from internalRecoverFromStore method Signed-off-by: Vinay Krishna Pudyodu * Added integ test to cover search node restart scenario Signed-off-by: Vinay Krishna Pudyodu * Applied search node role in tests and removed searchonly attribute Signed-off-by: Vinay Krishna Pudyodu * Fixed failing test Signed-off-by: Vinay Krishna Pudyodu * Removed unwanted comment Signed-off-by: Vinay Krishna Pudyodu * Address PR comments Signed-off-by: Vinay Krishna Pudyodu --------- Signed-off-by: Vinay Krishna Pudyodu --- ...IT.java => SearchReplicaAllocationIT.java} | 83 +++--- ...SearchReplicaReplicationAndRecoveryIT.java | 81 +++--- .../replication/SearchReplicaRestoreIT.java | 19 +- .../SearchOnlyReplicaFeatureFlagIT.java | 12 - .../indices/settings/SearchOnlyReplicaIT.java | 108 +++++++- .../org/opensearch/cluster/ClusterModule.java | 2 +- .../allocator/LocalShardsBalancer.java | 11 + .../SearchReplicaAllocationDecider.java | 93 ++----- .../common/settings/ClusterSettings.java | 5 +- .../opensearch/index/shard/StoreRecovery.java | 100 ++++++- .../allocator/LocalShardsBalancerTests.java | 261 ++++++++++++++++++ .../decider/FilterAllocationDeciderTests.java | 2 +- .../SearchReplicaAllocationDeciderTests.java | 53 ++-- .../index/shard/IndexShardTests.java | 218 +++++++++++++++ .../cluster/OpenSearchAllocationTestCase.java | 12 + 15 files changed, 821 insertions(+), 239 deletions(-) rename server/src/internalClusterTest/java/org/opensearch/cluster/allocation/{SearchReplicaFilteringAllocationIT.java => SearchReplicaAllocationIT.java} (55%) create mode 100644 server/src/test/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancerTests.java diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/SearchReplicaFilteringAllocationIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/SearchReplicaAllocationIT.java similarity index 55% rename from server/src/internalClusterTest/java/org/opensearch/cluster/allocation/SearchReplicaFilteringAllocationIT.java rename to server/src/internalClusterTest/java/org/opensearch/cluster/allocation/SearchReplicaAllocationIT.java index df2620b794686..0ede555098834 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/SearchReplicaFilteringAllocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/SearchReplicaAllocationIT.java @@ -21,31 +21,21 @@ import java.util.stream.Collectors; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; -import static org.opensearch.cluster.routing.allocation.decider.SearchReplicaAllocationDecider.SEARCH_REPLICA_ROUTING_INCLUDE_GROUP_SETTING; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) -public class SearchReplicaFilteringAllocationIT extends RemoteStoreBaseIntegTestCase { +public class SearchReplicaAllocationIT extends RemoteStoreBaseIntegTestCase { @Override protected Settings featureFlagSettings() { return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL, Boolean.TRUE).build(); } - public void testSearchReplicaDedicatedIncludes() { - List nodesIds = internalCluster().startNodes(3); - final String node_0 = nodesIds.get(0); - final String node_1 = nodesIds.get(1); - final String node_2 = nodesIds.get(2); - assertEquals(3, cluster().size()); + public void testSearchReplicaAllocatedToDedicatedSearchNode() { + internalCluster().startClusterManagerOnlyNode(); + String primaryNode = internalCluster().startDataOnlyNode(); + internalCluster().startSearchOnlyNode(); - client().admin() - .cluster() - .prepareUpdateSettings() - .setTransientSettings( - Settings.builder().put(SEARCH_REPLICA_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "_name", node_1 + "," + node_0) - ) - .execute() - .actionGet(); + assertEquals(3, cluster().size()); createIndex( "test", @@ -57,42 +47,16 @@ public void testSearchReplicaDedicatedIncludes() { .build() ); ensureGreen("test"); - // ensure primary is not on node 0 or 1, + // ensure primary is not on searchNode IndexShardRoutingTable routingTable = getRoutingTable(); - assertEquals(node_2, getNodeName(routingTable.primaryShard().currentNodeId())); - - String existingSearchReplicaNode = getNodeName(routingTable.searchOnlyReplicas().get(0).currentNodeId()); - String emptyAllowedNode = existingSearchReplicaNode.equals(node_0) ? node_1 : node_0; - - // set the included nodes to the other open node, search replica should relocate to that node. - client().admin() - .cluster() - .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(SEARCH_REPLICA_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "_name", emptyAllowedNode)) - .execute() - .actionGet(); - ensureGreen("test"); - - routingTable = getRoutingTable(); - assertEquals(node_2, getNodeName(routingTable.primaryShard().currentNodeId())); - assertEquals(emptyAllowedNode, getNodeName(routingTable.searchOnlyReplicas().get(0).currentNodeId())); + assertEquals(primaryNode, getNodeName(routingTable.primaryShard().currentNodeId())); } public void testSearchReplicaDedicatedIncludes_DoNotAssignToOtherNodes() { - List nodesIds = internalCluster().startNodes(3); - final String node_0 = nodesIds.get(0); - final String node_1 = nodesIds.get(1); - final String node_2 = nodesIds.get(2); + internalCluster().startNodes(2); + final String node_1 = internalCluster().startSearchOnlyNode(); assertEquals(3, cluster().size()); - // set filter on 1 node and set search replica count to 2 - should leave 1 unassigned - client().admin() - .cluster() - .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(SEARCH_REPLICA_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "_name", node_1)) - .execute() - .actionGet(); - logger.info("--> creating an index with no replicas"); createIndex( "test", @@ -115,9 +79,32 @@ public void testSearchReplicaDedicatedIncludes_DoNotAssignToOtherNodes() { assertEquals(1, routingTable.searchOnlyReplicas().stream().filter(ShardRouting::unassigned).count()); } + public void testSearchReplicaDedicatedIncludes_WhenNotSetDoNotAssign() { + internalCluster().startNodes(2); + assertEquals(2, cluster().size()); + + createIndex( + "test", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS, 1) + .put(SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build() + ); + ensureYellowAndNoInitializingShards("test"); + IndexShardRoutingTable routingTable = getRoutingTable(); + assertNull(routingTable.searchOnlyReplicas().get(0).currentNodeId()); + + // Add a search node + final String searchNode = internalCluster().startSearchOnlyNode(); + + ensureGreen("test"); + assertEquals(searchNode, getNodeName(getRoutingTable().searchOnlyReplicas().get(0).currentNodeId())); + } + private IndexShardRoutingTable getRoutingTable() { - IndexShardRoutingTable routingTable = getClusterState().routingTable().index("test").getShards().get(0); - return routingTable; + return getClusterState().routingTable().index("test").getShards().get(0); } private String getNodeName(String id) { diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationAndRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationAndRecoveryIT.java index 7d4dd62cdca61..a550f6cc6586c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationAndRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationAndRecoveryIT.java @@ -31,7 +31,6 @@ import org.junit.After; import java.nio.file.Path; -import java.util.List; import java.util.Set; import java.util.concurrent.ExecutionException; @@ -39,7 +38,6 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS; import static org.opensearch.cluster.routing.RecoverySource.Type.EMPTY_STORE; import static org.opensearch.cluster.routing.RecoverySource.Type.EXISTING_STORE; -import static org.opensearch.cluster.routing.allocation.decider.SearchReplicaAllocationDecider.SEARCH_REPLICA_ROUTING_INCLUDE_GROUP_SETTING; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class SearchReplicaReplicationAndRecoveryIT extends SegmentReplicationBaseIT { @@ -84,7 +82,8 @@ public void testReplication() throws Exception { final String primary = internalCluster().startDataOnlyNode(); createIndex(INDEX_NAME); ensureYellowAndNoInitializingShards(INDEX_NAME); - final String replica = internalCluster().startDataOnlyNode(); + final String searchNode = internalCluster().startSearchOnlyNode(); + ensureGreen(INDEX_NAME); final int docCount = 10; @@ -92,12 +91,13 @@ public void testReplication() throws Exception { client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get(); } refresh(INDEX_NAME); - waitForSearchableDocs(docCount, primary, replica); + waitForSearchableDocs(docCount, primary, searchNode); } public void testSegmentReplicationStatsResponseWithSearchReplica() throws Exception { internalCluster().startClusterManagerOnlyNode(); - final List nodes = internalCluster().startDataOnlyNodes(2); + final String searchNode = internalCluster().startSearchOnlyNode(); + final String primary = internalCluster().startDataOnlyNode(); createIndex( INDEX_NAME, Settings.builder() @@ -107,6 +107,7 @@ public void testSegmentReplicationStatsResponseWithSearchReplica() throws Except .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .build() ); + ensureGreen(INDEX_NAME); final int docCount = 5; @@ -114,7 +115,7 @@ public void testSegmentReplicationStatsResponseWithSearchReplica() throws Except client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get(); } refresh(INDEX_NAME); - waitForSearchableDocs(docCount, nodes); + waitForSearchableDocs(docCount, primary, searchNode); SegmentReplicationStatsResponse segmentReplicationStatsResponse = dataNodeClient().admin() .indices() @@ -142,19 +143,11 @@ public void testSegmentReplicationStatsResponseWithSearchReplica() throws Except public void testSearchReplicaRecovery() throws Exception { internalCluster().startClusterManagerOnlyNode(); final String primary = internalCluster().startDataOnlyNode(); - final String replica = internalCluster().startDataOnlyNode(); - - // ensure search replicas are only allocated to "replica" node. - client().admin() - .cluster() - .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(SEARCH_REPLICA_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "_name", replica)) - .execute() - .actionGet(); + final String searchNode = internalCluster().startSearchOnlyNode(); createIndex(INDEX_NAME); ensureGreen(INDEX_NAME); - assertRecoverySourceType(replica, EMPTY_STORE); + assertRecoverySourceType(searchNode, EMPTY_STORE); final int docCount = 10; for (int i = 0; i < docCount; i++) { @@ -162,26 +155,23 @@ public void testSearchReplicaRecovery() throws Exception { } refresh(INDEX_NAME); flush(INDEX_NAME); - waitForSearchableDocs(10, primary, replica); + waitForSearchableDocs(10, primary, searchNode); // Node stats should show remote download stats as nonzero, use this as a precondition to compare // post restart. - assertDownloadStats(replica, true); - NodesStatsResponse nodesStatsResponse; - NodeStats nodeStats; + assertDownloadStats(searchNode, true); - internalCluster().restartNode(replica); + internalCluster().restartNode(searchNode); ensureGreen(INDEX_NAME); - assertDocCounts(10, replica); + assertDocCounts(10, searchNode); - // assert existing store recovery - assertRecoverySourceType(replica, EXISTING_STORE); - assertDownloadStats(replica, false); + assertRecoverySourceType(searchNode, EXISTING_STORE); + assertDownloadStats(searchNode, false); } public void testRecoveryAfterDocsIndexed() throws Exception { internalCluster().startClusterManagerOnlyNode(); - final String primary = internalCluster().startDataOnlyNode(); + internalCluster().startDataOnlyNode(); createIndex(INDEX_NAME); ensureYellowAndNoInitializingShards(INDEX_NAME); final int docCount = 10; @@ -190,13 +180,14 @@ public void testRecoveryAfterDocsIndexed() throws Exception { } refresh(INDEX_NAME); - final String replica = internalCluster().startDataOnlyNode(); + final String searchNode = internalCluster().startSearchOnlyNode(); + ensureGreen(INDEX_NAME); - assertDocCounts(10, replica); + assertDocCounts(10, searchNode); - assertRecoverySourceType(replica, EMPTY_STORE); + assertRecoverySourceType(searchNode, EMPTY_STORE); // replica should have downloaded from remote - assertDownloadStats(replica, true); + assertDownloadStats(searchNode, true); client().admin() .indices() @@ -212,14 +203,14 @@ public void testRecoveryAfterDocsIndexed() throws Exception { .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 1)) .get(); ensureGreen(INDEX_NAME); - assertDocCounts(10, replica); + assertDocCounts(10, searchNode); - internalCluster().restartNode(replica); + internalCluster().restartNode(searchNode); ensureGreen(INDEX_NAME); - assertDocCounts(10, replica); - assertRecoverySourceType(replica, EXISTING_STORE); - assertDownloadStats(replica, false); + assertDocCounts(10, searchNode); + assertRecoverySourceType(searchNode, EXISTING_STORE); + assertDownloadStats(searchNode, false); } private static void assertRecoverySourceType(String replica, RecoverySource.Type recoveryType) throws InterruptedException, @@ -257,9 +248,10 @@ public void testStopPrimary_RestoreOnNewNode() throws Exception { refresh(INDEX_NAME); assertDocCounts(docCount, primary); - final String replica = internalCluster().startDataOnlyNode(); + final String searchNode = internalCluster().startSearchOnlyNode(); + ensureGreen(INDEX_NAME); - assertDocCounts(docCount, replica); + assertDocCounts(docCount, searchNode); // stop the primary internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primary)); @@ -267,19 +259,19 @@ public void testStopPrimary_RestoreOnNewNode() throws Exception { ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth(INDEX_NAME).get(); assertEquals(ClusterHealthStatus.RED, clusterHealthResponse.getStatus()); }); - assertDocCounts(docCount, replica); + assertDocCounts(docCount, searchNode); String restoredPrimary = internalCluster().startDataOnlyNode(); client().admin().cluster().restoreRemoteStore(new RestoreRemoteStoreRequest().indices(INDEX_NAME), PlainActionFuture.newFuture()); ensureGreen(INDEX_NAME); - assertDocCounts(docCount, replica, restoredPrimary); + assertDocCounts(docCount, searchNode, restoredPrimary); for (int i = docCount; i < docCount * 2; i++) { client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get(); } refresh(INDEX_NAME); - assertBusy(() -> assertDocCounts(20, replica, restoredPrimary)); + assertBusy(() -> assertDocCounts(20, searchNode, restoredPrimary)); } public void testFailoverToNewPrimaryWithPollingReplication() throws Exception { @@ -293,9 +285,10 @@ public void testFailoverToNewPrimaryWithPollingReplication() throws Exception { } refresh(INDEX_NAME); - final String replica = internalCluster().startDataOnlyNode(); + final String searchNode = internalCluster().startSearchOnlyNode(); + ensureGreen(INDEX_NAME); - assertDocCounts(10, replica); + assertDocCounts(10, searchNode); client().admin() .indices() @@ -314,12 +307,12 @@ public void testFailoverToNewPrimaryWithPollingReplication() throws Exception { }); ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth(INDEX_NAME).get(); assertEquals(ClusterHealthStatus.YELLOW, clusterHealthResponse.getStatus()); - assertDocCounts(10, replica); + assertDocCounts(10, searchNode); for (int i = docCount; i < docCount * 2; i++) { client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get(); } refresh(INDEX_NAME); - assertBusy(() -> assertDocCounts(20, replica, writer_replica)); + assertBusy(() -> assertDocCounts(20, searchNode, writer_replica)); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaRestoreIT.java index e8d65e07c7dd9..af911162d4458 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaRestoreIT.java @@ -72,7 +72,9 @@ public void testSearchReplicaRestore_WhenSnapshotOnSegRep_RestoreOnSegRepWithSea Settings.builder().put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 1).build() ); ensureYellowAndNoInitializingShards(RESTORED_INDEX_NAME); - internalCluster().startDataOnlyNode(); + + internalCluster().startSearchOnlyNode(); + ensureGreen(RESTORED_INDEX_NAME); assertEquals(1, getNumberOfSearchReplicas(RESTORED_INDEX_NAME)); @@ -80,7 +82,7 @@ public void testSearchReplicaRestore_WhenSnapshotOnSegRep_RestoreOnSegRepWithSea assertHitCount(resp, DOC_COUNT); } - public void testSearchReplicaRestore_WhenSnapshotOnSegRepWithSearchReplica_RestoreOnDocRep() throws Exception { + public void testSearchReplicaRestore_WhenSnapshotOnSegRepWithSearchReplica_RestoreOnDocRep() { bootstrapIndexWithSearchReplicas(); createRepoAndSnapshot(REPOSITORY_NAME, FS_REPOSITORY_TYPE, SNAPSHOT_NAME, INDEX_NAME); @@ -98,7 +100,7 @@ public void testSearchReplicaRestore_WhenSnapshotOnSegRepWithSearchReplica_Resto } private void bootstrapIndexWithOutSearchReplicas(ReplicationType replicationType) throws InterruptedException { - startCluster(2); + internalCluster().startNodes(2); Settings settings = Settings.builder() .put(super.indexSettings()) @@ -114,8 +116,9 @@ private void bootstrapIndexWithOutSearchReplicas(ReplicationType replicationType ensureGreen(INDEX_NAME); } - private void bootstrapIndexWithSearchReplicas() throws InterruptedException { - startCluster(3); + private void bootstrapIndexWithSearchReplicas() { + internalCluster().startNodes(2); + internalCluster().startSearchOnlyNode(); Settings settings = Settings.builder() .put(super.indexSettings()) @@ -126,6 +129,7 @@ private void bootstrapIndexWithSearchReplicas() throws InterruptedException { .build(); createIndex(INDEX_NAME, settings); + ensureGreen(INDEX_NAME); for (int i = 0; i < DOC_COUNT; i++) { client().prepareIndex(INDEX_NAME).setId(String.valueOf(i)).setSource("foo", "bar").get(); @@ -133,11 +137,6 @@ private void bootstrapIndexWithSearchReplicas() throws InterruptedException { flushAndRefresh(INDEX_NAME); } - private void startCluster(int numOfNodes) { - internalCluster().startClusterManagerOnlyNode(); - internalCluster().startDataOnlyNodes(numOfNodes); - } - private void createRepoAndSnapshot(String repositoryName, String repositoryType, String snapshotName, String indexName) { createRepository(repositoryName, repositoryType, randomRepoPath().toAbsolutePath()); createSnapshot(repositoryName, snapshotName, List.of(indexName)); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/settings/SearchOnlyReplicaFeatureFlagIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/settings/SearchOnlyReplicaFeatureFlagIT.java index ef18cff7e5b29..e5a05c04fa7ee 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/settings/SearchOnlyReplicaFeatureFlagIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/settings/SearchOnlyReplicaFeatureFlagIT.java @@ -17,7 +17,6 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; -import static org.opensearch.cluster.routing.allocation.decider.SearchReplicaAllocationDecider.SEARCH_REPLICA_ROUTING_INCLUDE_GROUP_SETTING; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, numDataNodes = 1) public class SearchOnlyReplicaFeatureFlagIT extends OpenSearchIntegTestCase { @@ -54,15 +53,4 @@ public void testUpdateFeatureFlagDisabled() { }); assertTrue(settingsException.getMessage().contains("unknown setting")); } - - public void testFilterAllocationSettingNotRegistered() { - expectThrows(SettingsException.class, () -> { - client().admin() - .cluster() - .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(SEARCH_REPLICA_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "_name", "node")) - .execute() - .actionGet(); - }); - } } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/settings/SearchOnlyReplicaIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/settings/SearchOnlyReplicaIT.java index f524f4d1298c1..6e3cd8e724214 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/settings/SearchOnlyReplicaIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/settings/SearchOnlyReplicaIT.java @@ -71,7 +71,9 @@ public void testFailoverWithSearchReplica_WithWriterReplicas() throws IOExceptio ); ensureYellow(TEST_INDEX); // add 2 nodes for the replicas - internalCluster().startDataOnlyNodes(2); + internalCluster().startDataOnlyNode(); + internalCluster().startSearchOnlyNode(); + ensureGreen(TEST_INDEX); // assert shards are on separate nodes & all active @@ -105,21 +107,68 @@ public void testFailoverWithSearchReplica_WithoutWriterReplicas() throws IOExcep ensureYellow(TEST_INDEX); client().prepareIndex(TEST_INDEX).setId("1").setSource("foo", "bar").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); // start a node for our search replica - String replica = internalCluster().startDataOnlyNode(); + final String searchNode = internalCluster().startSearchOnlyNode(); + ensureGreen(TEST_INDEX); assertActiveSearchShards(numSearchReplicas); - assertHitCount(client(replica).prepareSearch(TEST_INDEX).setSize(0).setPreference("_only_local").get(), 1); + assertHitCount(client(searchNode).prepareSearch(TEST_INDEX).setSize(0).setPreference("_only_local").get(), 1); // stop the primary and ensure search shard is not promoted: internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName)); ensureRed(TEST_INDEX); assertActiveSearchShards(numSearchReplicas); // while red our search shard is still searchable - assertHitCount(client(replica).prepareSearch(TEST_INDEX).setSize(0).setPreference("_only_local").get(), 1); + assertHitCount(client(searchNode).prepareSearch(TEST_INDEX).setSize(0).setPreference("_only_local").get(), 1); + } + + public void testFailoverWithSearchReplicaWhenNodeLeavesCluster() throws IOException { + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNode(); + createIndex(TEST_INDEX); + indexSingleDoc(TEST_INDEX, true); + ensureYellow(TEST_INDEX); + // add another node for the search replica + String searchNode = internalCluster().startSearchOnlyNode(); + ensureGreen(TEST_INDEX); + + // Stop Node which hosts the search replica + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(searchNode)); + // Ensure search shard is unassigned + ensureYellowAndNoInitializingShards(TEST_INDEX); + assertActiveSearchShards(0); + + // Add a node and ensure search shard will get assigned + internalCluster().startSearchOnlyNode(); + + // Ensure search shard is recovered + ensureGreen(TEST_INDEX); + assertActiveSearchShards(1); + } + + public void testFailoverWithSearchReplicaWhenSearchNodeRestarts() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNode(); + createIndex(TEST_INDEX); + indexSingleDoc(TEST_INDEX, true); + ensureYellow(TEST_INDEX); + // add another node for the search replica + String searchNode = internalCluster().startSearchOnlyNode(); + ensureGreen(TEST_INDEX); + // Restart Search Node + internalCluster().restartNode(searchNode); + // Ensure search shard is unassigned + ensureYellowAndNoInitializingShards(TEST_INDEX); + assertActiveSearchShards(0); + // Ensure search shard is recovered + ensureGreen(TEST_INDEX); + assertActiveSearchShards(1); } public void testSearchReplicaScaling() { - internalCluster().startNodes(2); + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startSearchOnlyNode(); + internalCluster().startDataOnlyNode(); + createIndex(TEST_INDEX); ensureGreen(TEST_INDEX); // assert settings @@ -130,8 +179,9 @@ public void testSearchReplicaScaling() { // assert cluster state & routing table assertActiveSearchShards(1); - // Add another node and search replica - internalCluster().startDataOnlyNode(); + // Add another search node and search replica + internalCluster().startSearchOnlyNode(); + client().admin() .indices() .prepareUpdateSettings(TEST_INDEX) @@ -155,7 +205,7 @@ public void testSearchReplicaRoutingPreference() throws IOException { int numSearchReplicas = 1; int numWriterReplicas = 1; internalCluster().startClusterManagerOnlyNode(); - String primaryNodeName = internalCluster().startDataOnlyNode(); + internalCluster().startDataOnlyNode(); createIndex( TEST_INDEX, Settings.builder() @@ -167,7 +217,9 @@ public void testSearchReplicaRoutingPreference() throws IOException { ensureYellow(TEST_INDEX); client().prepareIndex(TEST_INDEX).setId("1").setSource("foo", "bar").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); // add 2 nodes for the replicas - internalCluster().startDataOnlyNodes(2); + internalCluster().startDataOnlyNode(); + internalCluster().startSearchOnlyNode(); + ensureGreen(TEST_INDEX); assertActiveShardCounts(numSearchReplicas, numWriterReplicas); @@ -184,6 +236,44 @@ public void testSearchReplicaRoutingPreference() throws IOException { assertEquals(nodeId, indexShardRoutingTable.searchOnlyReplicas().get(0).currentNodeId()); } + public void testUnableToAllocateSearchReplicaWontBlockRegularReplicaAllocation() { + int numSearchReplicas = 1; + int numWriterReplicas = 1; + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNodes(3); + + createIndex( + TEST_INDEX, + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numWriterReplicas) + .put(IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS, numSearchReplicas) + .build() + ); + + ensureYellowAndNoInitializingShards(TEST_INDEX); + assertActiveShardCounts(0, numWriterReplicas); + } + + public void testUnableToAllocateRegularReplicaWontBlockSearchReplicaAllocation() { + int numSearchReplicas = 1; + int numWriterReplicas = 1; + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNode(); + internalCluster().startSearchOnlyNode(); + + createIndex( + TEST_INDEX, + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numWriterReplicas) + .put(IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS, numSearchReplicas) + .build() + ); + ensureYellowAndNoInitializingShards(TEST_INDEX); + assertActiveShardCounts(numSearchReplicas, 0); + } + /** * Helper to assert counts of active shards for each type. */ diff --git a/server/src/main/java/org/opensearch/cluster/ClusterModule.java b/server/src/main/java/org/opensearch/cluster/ClusterModule.java index 9081432093106..939d9ec6b9ae8 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterModule.java @@ -392,7 +392,7 @@ public static Collection createAllocationDeciders( addAllocationDecider(deciders, new RestoreInProgressAllocationDecider()); addAllocationDecider(deciders, new FilterAllocationDecider(settings, clusterSettings)); if (FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL_SETTING.get(settings)) { - addAllocationDecider(deciders, new SearchReplicaAllocationDecider(settings, clusterSettings)); + addAllocationDecider(deciders, new SearchReplicaAllocationDecider()); } addAllocationDecider(deciders, new SameShardAllocationDecider(settings, clusterSettings)); addAllocationDecider(deciders, new DiskThresholdDecider(settings, clusterSettings)); diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java index f6e3e94e9b9a6..89fa728438062 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java @@ -806,10 +806,21 @@ void allocateUnassigned() { final PriorityComparator secondaryComparator = PriorityComparator.getAllocationComparator(allocation); final Comparator comparator = (o1, o2) -> { if (o1.primary() ^ o2.primary()) { + // If one is primary and the other isn't, primary comes first return o1.primary() ? -1 : 1; } final int indexCmp; + if ((indexCmp = o1.getIndexName().compareTo(o2.getIndexName())) == 0) { + if (o1.isSearchOnly() ^ o2.isSearchOnly()) { + // Orders replicas first, followed by search replicas (e.g., R1, R1, S1, S1). + // This order is maintained because the logic that moves all replicas to unassigned + // when a replica cannot be allocated relies on this comparator. + // Ensures that a failed replica allocation does not block the allocation of a search replica. + return o1.isSearchOnly() ? 1 : -1; + } + + // If both are primary or both are non-primary, compare by ID return o1.getId() - o2.getId(); } // this comparator is more expensive than all the others up there diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/SearchReplicaAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/SearchReplicaAllocationDecider.java index 955c396bee4da..f020a6622d147 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/SearchReplicaAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/SearchReplicaAllocationDecider.java @@ -9,91 +9,50 @@ package org.opensearch.cluster.routing.allocation.decider; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.cluster.node.DiscoveryNodeFilters; import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.allocation.RoutingAllocation; -import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.settings.Setting; -import org.opensearch.common.settings.Setting.Property; -import org.opensearch.common.settings.Settings; -import org.opensearch.node.remotestore.RemoteStoreNodeService; - -import java.util.Map; - -import static org.opensearch.cluster.node.DiscoveryNodeFilters.IP_VALIDATOR; -import static org.opensearch.cluster.node.DiscoveryNodeFilters.OpType.OR; /** - * This allocation decider is similar to FilterAllocationDecider but provides - * the option to filter specifically for search replicas. - * The filter behaves similar to an include for any defined node attribute. - * A search replica can be allocated to only nodes with one of the specified attributes while - * other shard types will be rejected from nodes with any othe attributes. + * A search replica can be allocated to only nodes with a search role + * other shard types will not be allocated to these nodes. * @opensearch.internal */ public class SearchReplicaAllocationDecider extends AllocationDecider { - public static final String NAME = "filter"; - private static final String SEARCH_REPLICA_ROUTING_INCLUDE_GROUP_PREFIX = "cluster.routing.allocation.search.replica.dedicated.include"; - public static final Setting.AffixSetting SEARCH_REPLICA_ROUTING_INCLUDE_GROUP_SETTING = Setting.prefixKeySetting( - SEARCH_REPLICA_ROUTING_INCLUDE_GROUP_PREFIX + ".", - key -> Setting.simpleString(key, value -> IP_VALIDATOR.accept(key, value), Property.Dynamic, Property.NodeScope) - ); - - private volatile DiscoveryNodeFilters searchReplicaIncludeFilters; - - private volatile RemoteStoreNodeService.Direction migrationDirection; - private volatile RemoteStoreNodeService.CompatibilityMode compatibilityMode; - - public SearchReplicaAllocationDecider(Settings settings, ClusterSettings clusterSettings) { - setSearchReplicaIncludeFilters(SEARCH_REPLICA_ROUTING_INCLUDE_GROUP_SETTING.getAsMap(settings)); - clusterSettings.addAffixMapUpdateConsumer( - SEARCH_REPLICA_ROUTING_INCLUDE_GROUP_SETTING, - this::setSearchReplicaIncludeFilters, - (a, b) -> {} - ); - } + public static final String NAME = "search_replica_allocation"; @Override public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { - return shouldFilter(shardRouting, node.node(), allocation); + return canAllocate(shardRouting, node.node(), allocation); } @Override public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { - return shouldFilter(shardRouting, node.node(), allocation); + return canAllocate(shardRouting, node.node(), allocation); } - private Decision shouldFilter(ShardRouting shardRouting, DiscoveryNode node, RoutingAllocation allocation) { - if (searchReplicaIncludeFilters != null) { - final boolean match = searchReplicaIncludeFilters.match(node); - if (match == false && shardRouting.isSearchOnly()) { - return allocation.decision( - Decision.NO, - NAME, - "node does not match shard setting [%s] filters [%s]", - SEARCH_REPLICA_ROUTING_INCLUDE_GROUP_PREFIX, - searchReplicaIncludeFilters - ); - } - // filter will only apply to search replicas - if (shardRouting.isSearchOnly() == false && match) { - return allocation.decision( - Decision.NO, - NAME, - "only search replicas can be allocated to node with setting [%s] filters [%s]", - SEARCH_REPLICA_ROUTING_INCLUDE_GROUP_PREFIX, - searchReplicaIncludeFilters - ); - } + private Decision canAllocate(ShardRouting shardRouting, DiscoveryNode node, RoutingAllocation allocation) { + boolean isSearchReplica = shardRouting.isSearchOnly(); + + if ((node.isSearchNode() && isSearchReplica) || (node.isSearchNode() == false && isSearchReplica == false)) { + return allocation.decision( + Decision.YES, + NAME, + "node and shard are compatible. node: [%s], is search node: [%s], shard: [%s]", + node.getId(), + node.isSearchNode(), + shardRouting.shortSummary() + ); + } else { + return allocation.decision( + Decision.NO, + NAME, + "node and shard are not compatible. node: [%s], is search node: [%s], shard: [%s]", + node.getId(), + node.isSearchNode(), + shardRouting.shortSummary() + ); } - return allocation.decision(Decision.YES, NAME, "node passes include/exclude/require filters"); - } - - private void setSearchReplicaIncludeFilters(Map filters) { - searchReplicaIncludeFilters = DiscoveryNodeFilters.trimTier( - DiscoveryNodeFilters.buildOrUpdateFromKeyValue(searchReplicaIncludeFilters, OR, filters) - ); } } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index d45d72e0817e0..456357005d0ab 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -75,7 +75,6 @@ import org.opensearch.cluster.routing.allocation.decider.FilterAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.NodeLoadAwareAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.SameShardAllocationDecider; -import org.opensearch.cluster.routing.allocation.decider.SearchReplicaAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.opensearch.cluster.service.ClusterApplierService; @@ -841,8 +840,6 @@ public void apply(Settings value, Settings current, Settings previous) { TelemetrySettings.METRICS_PUBLISH_INTERVAL_SETTING, TelemetrySettings.TRACER_FEATURE_ENABLED_SETTING, TelemetrySettings.METRICS_FEATURE_ENABLED_SETTING - ), - List.of(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL), - List.of(SearchReplicaAllocationDecider.SEARCH_REPLICA_ROUTING_INCLUDE_GROUP_SETTING) + ) ); } diff --git a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java index 74d9cc4b4f6dd..8d572f95c384e 100644 --- a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java @@ -122,7 +122,11 @@ void recoverFromStore(final IndexShard indexShard, ActionListener liste if (canRecover(indexShard)) { ActionListener.completeWith(recoveryListener(indexShard, listener), () -> { logger.debug("starting recovery from store ..."); - internalRecoverFromStore(indexShard); + if (indexShard.shardRouting.isSearchOnly()) { + internalRecoverFromStoreForSearchReplica(indexShard); + } else { + internalRecoverFromStore(indexShard); + } return true; }); } else { @@ -747,17 +751,7 @@ private void internalRecoverFromStore(IndexShard indexShard) throws IndexShardRe writeEmptyRetentionLeasesFile(indexShard); indexShard.recoveryState().getIndex().setFileDetailsComplete(); } - if (indexShard.routingEntry().isSearchOnly() == false) { - indexShard.openEngineAndRecoverFromTranslog(); - } else { - // Opens the engine for pull based replica copies that are - // not primary eligible. This will skip any checkpoint tracking and ensure - // that the shards are sync'd with remote store before opening. - // - // first bootstrap new history / translog so that the TranslogUUID matches the UUID from the latest commit. - bootstrapForSnapshot(indexShard, store); - indexShard.openEngineAndSkipTranslogRecoveryFromSnapshot(); - } + indexShard.openEngineAndRecoverFromTranslog(); if (indexShard.shouldSeedRemoteStore()) { indexShard.getThreadPool().executor(ThreadPool.Names.GENERIC).execute(() -> { logger.info("Attempting to seed Remote Store via local recovery for {}", indexShard.shardId()); @@ -776,6 +770,88 @@ private void internalRecoverFromStore(IndexShard indexShard) throws IndexShardRe } } + private void internalRecoverFromStoreForSearchReplica(IndexShard indexShard) throws IndexShardRecoveryException { + indexShard.preRecovery(); + final RecoveryState recoveryState = indexShard.recoveryState(); + + assert recoveryState.getRecoverySource().getType().equals(RecoverySource.Type.EMPTY_STORE) + || recoveryState.getRecoverySource().getType().equals(RecoverySource.Type.EXISTING_STORE) + : "unsupported recovery source for search replica: " + recoveryState.getRecoverySource().getType(); + + indexShard.prepareForIndexRecovery(); + final Store store = indexShard.store(); + store.incRef(); + + try { + if (recoveryState.getRecoverySource().getType().equals(RecoverySource.Type.EXISTING_STORE)) { + SegmentInfos segmentInfos = readSegmentInfosFromStore(store); + if (segmentInfos != null) { + recoverLocalFiles(recoveryState, segmentInfos, store); + } else { + // During a node-left scenario where the search shard was unassigned + // and is now recovering on a new node. + // The node lacks SegmentsInfo for that shard, it falls back to Empty Store recovery. + recoverEmptyStore(indexShard, store); + } + } else { + recoverEmptyStore(indexShard, store); + } + completeRecovery(indexShard, store); + } catch (EngineException | IOException e) { + throw new IndexShardRecoveryException(shardId, "Failed to recover from gateway", e); + } finally { + store.decRef(); + } + } + + private SegmentInfos readSegmentInfosFromStore(Store store) throws IndexShardRecoveryException { + SegmentInfos segmentInfos = null; + try { + store.failIfCorrupted(); + try { + segmentInfos = store.readLastCommittedSegmentsInfo(); + } catch (Exception ignored) { + // Ignore the exception + logger.error("Failed to readLastCommittedSegmentsInfo"); + } + } catch (Exception e) { + throw new IndexShardRecoveryException(shardId, "failed to fetch index version", e); + } + return segmentInfos; + } + + private void completeRecovery(IndexShard indexShard, Store store) throws IOException { + // Opens the engine for pull-based replica copies that are + // not primary eligible. + // This will skip any checkpoint tracking and ensure that the shards are sync with remote store before opening. + // First bootstrap new history / translog so that the TranslogUUID matches the UUID from the latest commit. + bootstrapForSnapshot(indexShard, store); + indexShard.openEngineAndSkipTranslogRecoveryFromSnapshot(); + + indexShard.getEngine().fillSeqNoGaps(indexShard.getPendingPrimaryTerm()); + indexShard.finalizeRecovery(); + indexShard.postRecovery("Post recovery from shard_store"); + } + + private void recoverEmptyStore(IndexShard indexShard, Store store) throws IOException { + store.createEmpty(indexShard.indexSettings().getIndexVersionCreated().luceneVersion); + final String translogUUID = Translog.createEmptyTranslog( + indexShard.shardPath().resolveTranslog(), + SequenceNumbers.NO_OPS_PERFORMED, + shardId, + indexShard.getPendingPrimaryTerm() + ); + store.associateIndexWithNewTranslog(translogUUID); + writeEmptyRetentionLeasesFile(indexShard); + indexShard.recoveryState().getIndex().setFileDetailsComplete(); + } + + private void recoverLocalFiles(RecoveryState recoveryState, SegmentInfos segmentInfos, Store store) throws IOException { + final ReplicationLuceneIndex index = recoveryState.getIndex(); + addRecoveredFileDetails(segmentInfos, store, index); + index.setFileDetailsComplete(); + } + private static void writeEmptyRetentionLeasesFile(IndexShard indexShard) throws IOException { assert indexShard.getRetentionLeases().leases().isEmpty() : indexShard.getRetentionLeases(); // not loaded yet indexShard.persistRetentionLeases(); diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancerTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancerTests.java new file mode 100644 index 0000000000000..14088efae8cbe --- /dev/null +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancerTests.java @@ -0,0 +1,261 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing.allocation.allocator; + +import org.opensearch.Version; +import org.opensearch.cluster.ClusterInfo; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.OpenSearchAllocationTestCase; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.RoutingNode; +import org.opensearch.cluster.routing.RoutingNodes; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.cluster.routing.allocation.RoutingAllocation; +import org.opensearch.cluster.routing.allocation.decider.AllocationDecider; +import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.opensearch.cluster.routing.allocation.decider.Decision; +import org.opensearch.cluster.routing.allocation.decider.SameShardAllocationDecider; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.function.Function; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class LocalShardsBalancerTests extends OpenSearchAllocationTestCase { + + private final DiscoveryNode node1 = newNode("node1", "node1", Collections.singletonMap("zone", "1a")); + private final DiscoveryNode node2 = newNode("node2", "node2", Collections.singletonMap("zone", "1b")); + private final DiscoveryNode node3 = newNode("node3", "node3", Collections.singletonMap("zone", "1c")); + private final DiscoveryNode node4 = newNode("node4", "node4", Collections.singletonMap("zone", "1a")); + private final DiscoveryNode node5 = newNode("node5", "node5", Collections.singletonMap("zone", "1b")); + private final DiscoveryNode node6 = newNode("node6", "node6", Collections.singletonMap("zone", "1c")); + + public void testAllocateUnassignedWhenAllShardsCanBeAllocated() { + int numberOfIndices = 2; + int numberOfShards = 1; + int numberOfReplicas = 2; + int numberOfSearchReplicas = 3; + + Metadata metadata = buildMetadata(Metadata.builder(), numberOfIndices, numberOfShards, numberOfReplicas, numberOfSearchReplicas); + RoutingTable routingTable = buildRoutingTable(metadata); + ClusterState state = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(routingTable) + .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3).add(node4).add(node5).add(node6)) + .build(); + + RoutingAllocation allocation = new RoutingAllocation( + yesAllocationDeciders(), + new RoutingNodes(state, false), + state, + ClusterInfo.EMPTY, + null, + System.nanoTime() + ); + + BalancedShardsAllocator.WeightFunction weightFunction = mock(BalancedShardsAllocator.WeightFunction.class); + when(weightFunction.weightWithAllocationConstraints(any(), any(), any())).thenReturn(0.5F); + + final ShardsBalancer localShardsBalancer = new LocalShardsBalancer( + logger, + allocation, + null, + weightFunction, + 0, + false, + false, + false, + null + ); + + localShardsBalancer.allocateUnassigned(); + + List initializingShards = allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING); + assertEquals(12, initializingShards.size()); + + List unassignedShards = allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED); + assertEquals(0, unassignedShards.size()); + } + + public void testAllocateUnassignedWhenSearchShardsCannotBeAllocated() { + int numberOfIndices = 2; + int numberOfShards = 1; + int numberOfReplicas = 2; + int numberOfSearchReplicas = 3; + + Metadata metadata = buildMetadata(Metadata.builder(), numberOfIndices, numberOfShards, numberOfReplicas, numberOfSearchReplicas); + RoutingTable routingTable = buildRoutingTable(metadata); + ClusterState state = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(routingTable) + .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3).add(node4).add(node5).add(node6)) + .build(); + + RoutingAllocation allocation = new RoutingAllocation( + provideAllocationDecidersWithNoDecisionForSearchReplica(), + new RoutingNodes(state, false), + state, + ClusterInfo.EMPTY, + null, + System.nanoTime() + ); + + BalancedShardsAllocator.WeightFunction weightFunction = mock(BalancedShardsAllocator.WeightFunction.class); + when(weightFunction.weightWithAllocationConstraints(any(), any(), any())).thenReturn(0.5F); + + final ShardsBalancer localShardsBalancer = new LocalShardsBalancer( + logger, + allocation, + null, + weightFunction, + 0, + false, + false, + false, + null + ); + + localShardsBalancer.allocateUnassigned(); + + List initializingShards = allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING); + assertEquals(6, initializingShards.size()); + + List unassignedShards = allocation.routingNodes().unassigned().ignored(); + assertEquals(6, unassignedShards.size()); + } + + public void testAllocateUnassignedWhenRegularReplicaShardsCannotBeAllocated() { + int numberOfIndices = 2; + int numberOfShards = 1; + int numberOfReplicas = 2; + int numberOfSearchReplicas = 3; + + Metadata metadata = buildMetadata(Metadata.builder(), numberOfIndices, numberOfShards, numberOfReplicas, numberOfSearchReplicas); + RoutingTable routingTable = buildRoutingTable(metadata); + ClusterState state = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(routingTable) + .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3).add(node4).add(node5).add(node6)) + .build(); + + RoutingAllocation allocation = new RoutingAllocation( + provideAllocationDecidersWithNoDecisionForRegularReplica(), + new RoutingNodes(state, false), + state, + ClusterInfo.EMPTY, + null, + System.nanoTime() + ); + + BalancedShardsAllocator.WeightFunction weightFunction = mock(BalancedShardsAllocator.WeightFunction.class); + when(weightFunction.weightWithAllocationConstraints(any(), any(), any())).thenReturn(0.5F); + + final ShardsBalancer localShardsBalancer = new LocalShardsBalancer( + logger, + allocation, + null, + weightFunction, + 0, + false, + false, + false, + null + ); + + localShardsBalancer.allocateUnassigned(); + + List initializingShards = allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING); + assertEquals(8, initializingShards.size()); + + List unassignedShards = allocation.routingNodes().unassigned().ignored(); + assertEquals(4, unassignedShards.size()); + } + + private RoutingTable buildRoutingTable(Metadata metadata) { + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + for (Map.Entry entry : metadata.getIndices().entrySet()) { + routingTableBuilder.addAsNew(entry.getValue()); + } + return routingTableBuilder.build(); + } + + private Metadata buildMetadata( + Metadata.Builder mb, + int numberOfIndices, + int numberOfShards, + int numberOfReplicas, + int numberOfSearchReplicas + ) { + for (int i = 0; i < numberOfIndices; i++) { + mb.put( + IndexMetadata.builder("test_" + i) + .settings(settings(Version.CURRENT)) + .numberOfShards(numberOfShards) + .numberOfReplicas(numberOfReplicas) + .numberOfSearchReplicas(numberOfSearchReplicas) + ); + } + + return mb.build(); + } + + private AllocationDeciders provideAllocationDecidersWithNoDecisionForSearchReplica() { + return new AllocationDeciders(Arrays.asList(new TestAllocateDecision((shardRouting -> { + if (shardRouting.isSearchOnly()) { + return Decision.NO; + } else { + return Decision.YES; + } + })), new SameShardAllocationDecider(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))) + ); + } + + private AllocationDeciders provideAllocationDecidersWithNoDecisionForRegularReplica() { + return new AllocationDeciders(Arrays.asList(new TestAllocateDecision((shardRouting -> { + if (!shardRouting.isSearchOnly() && !shardRouting.primary()) { + return Decision.NO; + } else { + return Decision.YES; + } + })), new SameShardAllocationDecider(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))) + ); + } + + public static class TestAllocateDecision extends AllocationDecider { + + private final Function decider; + + public TestAllocateDecision(Function decider) { + this.decider = decider; + } + + @Override + public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { + return decider.apply(shardRouting); + } + + @Override + public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocation) { + return decider.apply(shardRouting); + } + } +} diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java index f226c45553d57..df8d35a13b50a 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java @@ -355,7 +355,7 @@ static ClusterState createInitialClusterState(AllocationService service, Setting .routingTable(routingTable) .build(); clusterState = ClusterState.builder(clusterState) - .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newSearchNode("node2"))) .build(); return service.reroute(clusterState, "reroute"); } diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/SearchReplicaAllocationDeciderTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/SearchReplicaAllocationDeciderTests.java index 9604e82fe4c88..584d22794024c 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/SearchReplicaAllocationDeciderTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/SearchReplicaAllocationDeciderTests.java @@ -42,7 +42,6 @@ import static org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING; import static org.opensearch.cluster.routing.ShardRoutingState.STARTED; -import static org.opensearch.cluster.routing.allocation.decider.SearchReplicaAllocationDecider.SEARCH_REPLICA_ROUTING_INCLUDE_GROUP_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; @@ -50,15 +49,9 @@ public class SearchReplicaAllocationDeciderTests extends OpenSearchAllocationTestCase { public void testSearchReplicaRoutingDedicatedIncludes() { - // we aren't using a settingsModule here so we need to set feature flag gated setting Set> settings = new HashSet<>(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - settings.add(SEARCH_REPLICA_ROUTING_INCLUDE_GROUP_SETTING); ClusterSettings clusterSettings = new ClusterSettings(Settings.builder().build(), settings); - Settings initialSettings = Settings.builder() - .put("cluster.routing.allocation.search.replica.dedicated.include._id", "node1,node2") - .build(); - - SearchReplicaAllocationDecider filterAllocationDecider = new SearchReplicaAllocationDecider(initialSettings, clusterSettings); + SearchReplicaAllocationDecider filterAllocationDecider = new SearchReplicaAllocationDecider(); AllocationDeciders allocationDeciders = new AllocationDeciders( Arrays.asList( filterAllocationDecider, @@ -102,59 +95,59 @@ public void testSearchReplicaRoutingDedicatedIncludes() { new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "") ); + // Tests for canAllocate + // Can allocate searchReplica on search node Decision.Single decision = (Decision.Single) filterAllocationDecider.canAllocate( searchReplica, state.getRoutingNodes().node("node2"), allocation ); assertEquals(decision.toString(), Decision.Type.YES, decision.type()); + + // Cannot allocate searchReplica on data node decision = (Decision.Single) filterAllocationDecider.canAllocate(searchReplica, state.getRoutingNodes().node("node1"), allocation); - assertEquals(decision.toString(), Decision.Type.YES, decision.type()); + assertEquals(decision.toString(), Decision.Type.NO, decision.type()); + // Cannot allocate regularReplica on search node decision = (Decision.Single) filterAllocationDecider.canAllocate(regularReplica, state.getRoutingNodes().node("node2"), allocation); assertEquals(decision.toString(), Decision.Type.NO, decision.type()); + + // Can allocate regularReplica on data node decision = (Decision.Single) filterAllocationDecider.canAllocate(regularReplica, state.getRoutingNodes().node("node1"), allocation); - assertEquals(decision.toString(), Decision.Type.NO, decision.type()); + assertEquals(decision.toString(), Decision.Type.YES, decision.type()); + // Can allocate primary on data node decision = (Decision.Single) filterAllocationDecider.canAllocate(primary, state.getRoutingNodes().node("node1"), allocation); - assertEquals(decision.toString(), Decision.Type.NO, decision.type()); + assertEquals(decision.toString(), Decision.Type.YES, decision.type()); + + // Cannot allocate primary on search node decision = (Decision.Single) filterAllocationDecider.canAllocate(primary, state.getRoutingNodes().node("node2"), allocation); assertEquals(decision.toString(), Decision.Type.NO, decision.type()); - Settings updatedSettings = Settings.builder() - .put("cluster.routing.allocation.search.replica.dedicated.include._id", "node2") - .build(); - clusterSettings.applySettings(updatedSettings); - - decision = (Decision.Single) filterAllocationDecider.canAllocate(searchReplica, state.getRoutingNodes().node("node2"), allocation); + // Tests for canRemain + decision = (Decision.Single) filterAllocationDecider.canRemain(searchReplica, state.getRoutingNodes().node("node2"), allocation); assertEquals(decision.toString(), Decision.Type.YES, decision.type()); - decision = (Decision.Single) filterAllocationDecider.canAllocate(searchReplica, state.getRoutingNodes().node("node1"), allocation); - assertEquals(decision.toString(), Decision.Type.NO, decision.type()); + decision = (Decision.Single) filterAllocationDecider.canRemain(searchReplica, state.getRoutingNodes().node("node1"), allocation); assertEquals(decision.toString(), Decision.Type.NO, decision.type()); - decision = (Decision.Single) filterAllocationDecider.canAllocate(regularReplica, state.getRoutingNodes().node("node2"), allocation); + decision = (Decision.Single) filterAllocationDecider.canRemain(regularReplica, state.getRoutingNodes().node("node2"), allocation); assertEquals(decision.toString(), Decision.Type.NO, decision.type()); - decision = (Decision.Single) filterAllocationDecider.canAllocate(regularReplica, state.getRoutingNodes().node("node1"), allocation); - assertEquals(decision.toString(), Decision.Type.YES, decision.type()); + decision = (Decision.Single) filterAllocationDecider.canRemain(regularReplica, state.getRoutingNodes().node("node1"), allocation); assertEquals(decision.toString(), Decision.Type.YES, decision.type()); - decision = (Decision.Single) filterAllocationDecider.canAllocate(primary, state.getRoutingNodes().node("node1"), allocation); - assertEquals(decision.toString(), Decision.Type.YES, decision.type()); - decision = (Decision.Single) filterAllocationDecider.canAllocate(primary, state.getRoutingNodes().node("node2"), allocation); - assertEquals(decision.toString(), Decision.Type.NO, decision.type()); decision = (Decision.Single) filterAllocationDecider.canRemain(primary, state.getRoutingNodes().node("node1"), allocation); assertEquals(decision.toString(), Decision.Type.YES, decision.type()); + + decision = (Decision.Single) filterAllocationDecider.canRemain(primary, state.getRoutingNodes().node("node2"), allocation); + assertEquals(decision.toString(), Decision.Type.NO, decision.type()); } public void testSearchReplicaWithThrottlingDecider_PrimaryBasedReplication() { TestGatewayAllocator gatewayAllocator = new TestGatewayAllocator(); // throttle outgoing on primary AllocationService strategy = createAllocationService(Settings.EMPTY, gatewayAllocator); - - Set> settings = new HashSet<>(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - settings.add(SEARCH_REPLICA_ROUTING_INCLUDE_GROUP_SETTING); Metadata metadata = Metadata.builder() .put( IndexMetadata.builder("test") @@ -208,8 +201,6 @@ public void testSearchReplicaWithThrottlingDecider_PrimaryBasedReplication() { public void testSearchReplicaWithThrottlingDeciderWithoutPrimary_RemoteStoreEnabled() { TestGatewayAllocator gatewayAllocator = new TestGatewayAllocator(); AllocationService strategy = createAllocationService(Settings.EMPTY, gatewayAllocator); - Set> settings = new HashSet<>(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - settings.add(SEARCH_REPLICA_ROUTING_INCLUDE_GROUP_SETTING); Metadata metadata = Metadata.builder() .put( IndexMetadata.builder("test") diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index 9fc779891b810..c93c997215a55 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -3059,6 +3059,224 @@ public void testRestoreSearchOnlyShardFromStore() throws IOException { closeShards(primary, replica); } + public void testRestoreSearchOnlyShardFromStoreOnNewNode() throws IOException { + // this test indexes docs on a primary, refreshes, + // then recovers a new Search Replica and asserts all docs are present + String remoteStorePath = createTempDir().toString(); + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) + .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, remoteStorePath + "__test") + .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, remoteStorePath + "__test") + .build(); + IndexShard primary = newStartedShard(true, settings, new InternalEngineFactory()); + indexDoc(primary, "_doc", "1"); + indexDoc(primary, "_doc", "2"); + primary.refresh("test"); + assertDocs(primary, "1", "2"); + + // Setting the RecoverySource to ExistingStoreRecoverySource to simulate a shard initializing on a new node + // during a node-left scenario. The shard attempts recovery using ExistingStoreRecoverySource, + // but since no segment info is found, it falls back to Empty Store recovery logic. + ShardRouting searchReplicaShardRouting = TestShardRouting.newShardRouting( + primary.shardId, + randomAlphaOfLength(10), + false, + true, + ShardRoutingState.INITIALIZING, + RecoverySource.ExistingStoreRecoverySource.INSTANCE + ); + IndexShard replica = newShard(searchReplicaShardRouting, settings, new NRTReplicationEngineFactory()); + + recoverShardFromStore(replica); + assertDocs(replica, "1", "2"); + assertEquals( + primary.getLatestReplicationCheckpoint().getSegmentInfosVersion(), + replica.getLatestReplicationCheckpoint().getSegmentInfosVersion() + ); + closeShards(primary, replica); + } + + public void testSearchShardDoesNotStartIfCorruptedMarkerIsPresent() throws Exception { + // this test indexes docs on a primary, refreshes, then recovers a new Search Replica and asserts + // all docs are present + String remoteStorePath = createTempDir().toString(); + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) + .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, remoteStorePath + "__test") + .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, remoteStorePath + "__test") + .build(); + IndexShard primary = newStartedShard(true, settings, new InternalEngineFactory()); + indexDoc(primary, "_doc", "1"); + indexDoc(primary, "_doc", "2"); + primary.refresh("test"); + assertDocs(primary, "1", "2"); + + // start search replica + ShardRouting searchReplicaShardRouting = TestShardRouting.newShardRouting( + primary.shardId, + randomAlphaOfLength(10), + false, + true, + ShardRoutingState.INITIALIZING, + RecoverySource.EmptyStoreRecoverySource.INSTANCE + ); + IndexShard replica = newShard(searchReplicaShardRouting, settings, new NRTReplicationEngineFactory()); + recoverShardFromStore(replica); + assertDocs(replica, "1", "2"); + assertEquals( + primary.getLatestReplicationCheckpoint().getSegmentInfosVersion(), + replica.getLatestReplicationCheckpoint().getSegmentInfosVersion() + ); + closeShards(replica); + + final ShardPath searchReplicaShardPath = replica.shardPath(); + final IndexMetadata indexMetadata = replica.indexSettings().getIndexMetadata(); + final Path indexPath = searchReplicaShardPath.getDataPath().resolve(ShardPath.INDEX_FOLDER_NAME); + + // create corrupted marker + final String corruptionMessage = "fake ioexception"; + try (Store store = createStore(replica.indexSettings(), searchReplicaShardPath)) { + store.markStoreCorrupted(new IOException(corruptionMessage)); + } + + ShardRouting shardRouting = TestShardRouting.newShardRouting( + primary.shardId, + randomAlphaOfLength(10), + false, + true, + ShardRoutingState.INITIALIZING, + RecoverySource.ExistingStoreRecoverySource.INSTANCE + ); + + // try to start shard on corrupted files + IndexShard replicaCorrupted = newShard( + shardRouting, + searchReplicaShardPath, + indexMetadata, + null, + null, + replica.engineFactory, + replica.engineConfigFactory, + replica.getGlobalCheckpointSyncer(), + replica.getRetentionLeaseSyncer(), + EMPTY_EVENT_LISTENER, + null + ); + + final IndexShardRecoveryException exception1 = expectThrows( + IndexShardRecoveryException.class, + () -> newStartedShard(p -> replicaCorrupted, true) + ); + + assertThat(exception1.getCause().getMessage(), equalTo(corruptionMessage + " (resource=preexisting_corruption)")); + closeShards(replicaCorrupted); + closeShards(primary); + + final AtomicInteger corruptedMarkerCount = new AtomicInteger(); + final SimpleFileVisitor corruptedVisitor = new SimpleFileVisitor<>() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) { + if (Files.isRegularFile(file) && file.getFileName().toString().startsWith(Store.CORRUPTED_MARKER_NAME_PREFIX)) { + corruptedMarkerCount.incrementAndGet(); + } + return FileVisitResult.CONTINUE; + } + }; + Files.walkFileTree(indexPath, corruptedVisitor); + assertThat("store has to be marked as corrupted", corruptedMarkerCount.get(), equalTo(1)); + } + + public void testShardDoesNotStartIfCorruptedMarkerIsPresentSearch() throws Exception { + final IndexShard indexShard = newStartedShard(true); + + final long numDocs = between(10, 100); + for (long i = 0; i < numDocs; i++) { + indexDoc(indexShard, "_doc", Long.toString(i), "{}"); + } + indexShard.flush(new FlushRequest()); + closeShards(indexShard); + + final ShardPath shardPath = indexShard.shardPath(); + + final ShardRouting shardRouting = ShardRoutingHelper.initWithSameId( + indexShard.routingEntry(), + RecoverySource.ExistingStoreRecoverySource.INSTANCE + ); + final IndexMetadata indexMetadata = indexShard.indexSettings().getIndexMetadata(); + + final Path indexPath = shardPath.getDataPath().resolve(ShardPath.INDEX_FOLDER_NAME); + + // create corrupted marker + final String corruptionMessage = "fake ioexception"; + try (Store store = createStore(indexShard.indexSettings(), shardPath)) { + store.markStoreCorrupted(new IOException(corruptionMessage)); + } + + // try to start shard on corrupted files + final IndexShard corruptedShard = newShard( + shardRouting, + shardPath, + indexMetadata, + null, + null, + indexShard.engineFactory, + indexShard.engineConfigFactory, + indexShard.getGlobalCheckpointSyncer(), + indexShard.getRetentionLeaseSyncer(), + EMPTY_EVENT_LISTENER, + null + ); + + final IndexShardRecoveryException exception1 = expectThrows( + IndexShardRecoveryException.class, + () -> newStartedShard(p -> corruptedShard, true) + ); + assertThat(exception1.getCause().getMessage(), equalTo(corruptionMessage + " (resource=preexisting_corruption)")); + closeShards(corruptedShard); + + final AtomicInteger corruptedMarkerCount = new AtomicInteger(); + final SimpleFileVisitor corruptedVisitor = new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + if (Files.isRegularFile(file) && file.getFileName().toString().startsWith(Store.CORRUPTED_MARKER_NAME_PREFIX)) { + corruptedMarkerCount.incrementAndGet(); + } + return FileVisitResult.CONTINUE; + } + }; + Files.walkFileTree(indexPath, corruptedVisitor); + assertThat("store has to be marked as corrupted", corruptedMarkerCount.get(), equalTo(1)); + + // try to start another time shard on corrupted files + final IndexShard corruptedShard2 = newShard( + shardRouting, + shardPath, + indexMetadata, + null, + null, + indexShard.engineFactory, + indexShard.engineConfigFactory, + indexShard.getGlobalCheckpointSyncer(), + indexShard.getRetentionLeaseSyncer(), + EMPTY_EVENT_LISTENER, + null + ); + + final IndexShardRecoveryException exception2 = expectThrows( + IndexShardRecoveryException.class, + () -> newStartedShard(p -> corruptedShard2, true) + ); + assertThat(exception2.getCause().getMessage(), equalTo(corruptionMessage + " (resource=preexisting_corruption)")); + closeShards(corruptedShard2); + + // check that corrupt marker is there + corruptedMarkerCount.set(0); + Files.walkFileTree(indexPath, corruptedVisitor); + assertThat("store still has a single corrupt marker", corruptedMarkerCount.get(), equalTo(1)); + } + public void testReaderWrapperIsUsed() throws IOException { IndexShard shard = newStartedShard(true); indexDoc(shard, "_doc", "0", "{\"foo\" : \"bar\"}"); diff --git a/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationTestCase.java b/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationTestCase.java index bc42993ac2096..102a0e5aa2e6d 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationTestCase.java @@ -154,10 +154,22 @@ public static AllocationDeciders randomAllocationDeciders(Settings settings, Clu new HashSet<>(Arrays.asList(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE, DiscoveryNodeRole.DATA_ROLE)) ); + protected static Set SEARCH_ROLE = Collections.unmodifiableSet( + new HashSet<>(Arrays.asList(DiscoveryNodeRole.SEARCH_ROLE, DiscoveryNodeRole.SEARCH_ROLE)) + ); + protected static DiscoveryNode newNode(String nodeId) { return newNode(nodeId, Version.CURRENT); } + protected static DiscoveryNode newSearchNode(String nodeId) { + return newSearchNode(nodeId, Version.CURRENT); + } + + protected static DiscoveryNode newSearchNode(String nodeId, Version version) { + return new DiscoveryNode(nodeId, buildNewFakeTransportAddress(), emptyMap(), SEARCH_ROLE, version); + } + protected static DiscoveryNode newNode(String nodeName, String nodeId, Map attributes) { return new DiscoveryNode(nodeName, nodeId, buildNewFakeTransportAddress(), attributes, CLUSTER_MANAGER_DATA_ROLES, Version.CURRENT); } From c4723142f5abebe769f4e58350fd20b8c0f4e96d Mon Sep 17 00:00:00 2001 From: kkewwei Date: Tue, 25 Mar 2025 01:10:16 +0800 Subject: [PATCH 104/550] Fix Flaky Test SpecificClusterManagerNodesIT.testElectOnlyBetweenClusterManagerNodes (#17640) Signed-off-by: kkewwei Signed-off-by: kkewwei --- .../org/opensearch/cluster/SpecificClusterManagerNodesIT.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/SpecificClusterManagerNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/SpecificClusterManagerNodesIT.java index 8e3426b9cae26..20c5088071766 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/SpecificClusterManagerNodesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/SpecificClusterManagerNodesIT.java @@ -267,7 +267,7 @@ public void testElectOnlyBetweenClusterManagerNodes() throws Exception { .nodes() .getClusterManagerNode() .getName(); - } catch (ClusterManagerNotDiscoveredException e) { + } catch (Exception e) { logger.debug("failed to get cluster-manager name", e); return null; } From 5c59f301a35f732155678130bc4e8ef03039a264 Mon Sep 17 00:00:00 2001 From: Yupeng Fu Date: Mon, 24 Mar 2025 12:02:51 -0700 Subject: [PATCH 105/550] [Pull-based Ingestion] Add Kinesis plugin support (#17615) --------- Signed-off-by: Yupeng Fu --- CHANGELOG.md | 1 + .../plugin/kafka/KafkaPartitionConsumer.java | 54 +- .../kafka/KafkaPartitionConsumerTests.java | 3 +- plugins/ingestion-kinesis/build.gradle | 251 ++++++++ .../licenses/annotations-2.30.31.jar.sha1 | 1 + .../licenses/annotations-LICENSE.txt | 202 +++++++ .../licenses/annotations-NOTICE.txt | 0 .../licenses/apache-client-2.30.31.jar.sha1 | 1 + .../licenses/apache-client-LICENSE.txt | 202 +++++++ .../licenses/apache-client-NOTICE.txt | 14 + .../licenses/auth-2.30.31.jar.sha1 | 1 + .../licenses/auth-LICENSE.txt | 202 +++++++ .../licenses/auth-NOTICE.txt | 0 .../aws-cbor-protocol-2.30.31.jar.sha1 | 1 + .../licenses/aws-cbor-protocol-LICENSE.txt | 202 +++++++ .../licenses/aws-cbor-protocol-NOTICE.txt | 0 .../licenses/aws-core-2.30.31.jar.sha1 | 1 + .../licenses/aws-core-LICENSE.txt | 202 +++++++ .../licenses/aws-core-NOTICE.txt | 0 .../licenses/aws-crt-0.35.0.jar.sha1 | 1 + .../licenses/aws-crt-LICENSE.txt | 202 +++++++ .../licenses/aws-crt-NOTICE.txt | 0 .../aws-json-protocol-2.30.31.jar.sha1 | 1 + .../licenses/aws-json-protocol-LICENSE.txt | 202 +++++++ .../licenses/aws-json-protocol-NOTICE.txt | 0 .../aws-query-protocol-2.30.31.jar.sha1 | 1 + .../licenses/aws-query-protocol-LICENSE.txt | 202 +++++++ .../licenses/aws-query-protocol-NOTICE.txt | 0 .../aws-xml-protocol-2.30.31.jar.sha1 | 1 + .../licenses/aws-xml-protocol-LICENSE.txt | 202 +++++++ .../licenses/aws-xml-protocol-NOTICE.txt | 0 .../licenses/checksums-2.30.31.jar.sha1 | 1 + .../licenses/checksums-LICENSE.txt | 202 +++++++ .../licenses/checksums-NOTICE.txt | 0 .../licenses/checksums-spi-2.30.31.jar.sha1 | 1 + .../licenses/checksums-spi-LICENSE.txt | 202 +++++++ .../licenses/checksums-spi-NOTICE.txt | 0 .../licenses/commons-codec-1.16.1.jar.sha1 | 1 + .../licenses/commons-codec-LICENSE.txt | 202 +++++++ .../licenses/commons-codec-NOTICE.txt | 17 + .../licenses/commons-logging-1.2.jar.sha1 | 1 + .../licenses/commons-logging-LICENSE.txt | 202 +++++++ .../licenses/commons-logging-NOTICE.txt | 5 + .../licenses/endpoints-spi-2.30.31.jar.sha1 | 1 + .../licenses/endpoints-spi-LICENSE.txt | 202 +++++++ .../licenses/endpoints-spi-NOTICE.txt | 0 .../licenses/http-auth-2.30.31.jar.sha1 | 1 + .../licenses/http-auth-LICENSE.txt | 202 +++++++ .../licenses/http-auth-NOTICE.txt | 0 .../licenses/http-auth-aws-2.30.31.jar.sha1 | 1 + .../licenses/http-auth-aws-LICENSE.txt | 202 +++++++ .../licenses/http-auth-aws-NOTICE.txt | 0 .../licenses/http-auth-spi-2.30.31.jar.sha1 | 1 + .../licenses/http-auth-spi-LICENSE.txt | 202 +++++++ .../licenses/http-auth-spi-NOTICE.txt | 0 .../licenses/http-client-spi-2.30.31.jar.sha1 | 1 + .../licenses/http-client-spi-LICENSE.txt | 202 +++++++ .../licenses/http-client-spi-NOTICE.txt | 0 .../licenses/httpclient-4.5.14.jar.sha1 | 1 + .../licenses/httpclient-LICENSE.txt | 558 ++++++++++++++++++ .../licenses/httpclient-NOTICE.txt | 5 + .../licenses/httpcore-4.4.16.jar.sha1 | 1 + .../licenses/httpcore-LICENSE.txt | 241 ++++++++ .../licenses/httpcore-NOTICE.txt | 8 + .../licenses/identity-spi-2.30.31.jar.sha1 | 1 + .../licenses/identity-spi-LICENSE.txt | 202 +++++++ .../licenses/identity-spi-NOTICE.txt | 0 .../licenses/jackson-LICENSE | 8 + .../ingestion-kinesis/licenses/jackson-NOTICE | 20 + .../jackson-annotations-2.18.2.jar.sha1 | 1 + .../licenses/jackson-databind-2.18.2.jar.sha1 | 1 + .../licenses/json-utils-2.30.31.jar.sha1 | 1 + .../licenses/json-utils-LICENSE.txt | 202 +++++++ .../licenses/json-utils-NOTICE.txt | 0 .../licenses/kinesis-2.30.31.jar.sha1 | 1 + .../licenses/kinesis-LICENSE.txt | 202 +++++++ .../licenses/kinesis-NOTICE.txt | 0 .../licenses/metrics-spi-2.30.31.jar.sha1 | 1 + .../licenses/metrics-spi-LICENSE.txt | 202 +++++++ .../licenses/metrics-spi-NOTICE.txt | 0 .../licenses/netty-LICENSE.txt | 202 +++++++ .../licenses/netty-NOTICE.txt | 116 ++++ .../netty-buffer-4.1.118.Final.jar.sha1 | 1 + .../netty-codec-4.1.118.Final.jar.sha1 | 1 + .../netty-codec-http-4.1.118.Final.jar.sha1 | 1 + .../netty-codec-http2-4.1.118.Final.jar.sha1 | 1 + .../netty-common-4.1.118.Final.jar.sha1 | 1 + .../netty-handler-4.1.118.Final.jar.sha1 | 1 + .../netty-nio-client-2.30.31.jar.sha1 | 1 + .../netty-resolver-4.1.118.Final.jar.sha1 | 1 + .../netty-transport-4.1.118.Final.jar.sha1 | 1 + ...sport-classes-epoll-4.1.118.Final.jar.sha1 | 1 + ...-native-unix-common-4.1.118.Final.jar.sha1 | 1 + .../licenses/profiles-2.30.31.jar.sha1 | 1 + .../licenses/profiles-LICENSE.txt | 202 +++++++ .../licenses/profiles-NOTICE.txt | 0 .../licenses/protocol-core-2.30.31.jar.sha1 | 1 + .../licenses/protocol-core-LICENSE.txt | 202 +++++++ .../licenses/protocol-core-NOTICE.txt | 0 .../licenses/regions-2.30.31.jar.sha1 | 1 + .../licenses/regions-LICENSE.txt | 202 +++++++ .../licenses/regions-NOTICE.txt | 0 .../licenses/retries-2.30.31.jar.sha1 | 1 + .../licenses/retries-LICENSE.txt | 202 +++++++ .../licenses/retries-NOTICE.txt | 0 .../licenses/retries-spi-2.30.31.jar.sha1 | 1 + .../licenses/retries-spi-LICENSE.txt | 202 +++++++ .../licenses/retries-spi-NOTICE.txt | 0 .../licenses/sdk-core-2.30.31.jar.sha1 | 1 + .../licenses/sdk-core-LICENSE.txt | 202 +++++++ .../licenses/sdk-core-NOTICE.txt | 0 .../licenses/slf4j-api-1.7.36.jar.sha1 | 1 + .../licenses/slf4j-api-LICENSE.txt | 24 + .../licenses/slf4j-api-NOTICE.txt | 0 .../licenses/sts-2.30.31.jar.sha1 | 1 + .../licenses/sts-LICENSE.txt | 202 +++++++ .../ingestion-kinesis/licenses/sts-NOTICE.txt | 0 .../third-party-jackson-core-2.30.31.jar.sha1 | 1 + .../third-party-jackson-core-LICENSE.txt | 202 +++++++ .../third-party-jackson-core-NOTICE.txt | 14 + ...y-jackson-dataformat-cbor-2.30.31.jar.sha1 | 1 + ...-party-jackson-dataformat-cbor-LICENSE.txt | 202 +++++++ ...d-party-jackson-dataformat-cbor-NOTICE.txt | 14 + .../licenses/utils-2.30.31.jar.sha1 | 1 + .../licenses/utils-LICENSE.txt | 202 +++++++ .../licenses/utils-NOTICE.txt | 0 .../plugin/kinesis/IngestFromKinesisIT.java | 128 ++++ .../kinesis/KinesisIngestionBaseIT.java | 129 ++++ .../TestContainerThreadLeakFilter.java | 26 + .../kinesis/KinesisConsumerFactory.java | 45 ++ .../plugin/kinesis/KinesisMessage.java | 31 + .../plugin/kinesis/KinesisPlugin.java | 41 ++ .../plugin/kinesis/KinesisShardConsumer.java | 263 +++++++++ .../plugin/kinesis/KinesisSourceConfig.java | 83 +++ .../plugin/kinesis/SequenceNumber.java | 99 ++++ .../plugin/kinesis/package-info.java | 10 + .../plugin-metadata/plugin-security.policy | 13 + .../kinesis/KinesisConsumerFactoryTests.java | 55 ++ .../plugin/kinesis/KinesisMessageTests.java | 27 + .../kinesis/KinesisShardConsumerTests.java | 200 +++++++ .../kinesis/KinesisSourceConfigTests.java | 52 ++ .../plugin/kinesis/SequenceNumberTests.java | 90 +++ .../opensearch.release-notes-3.0.0-alpha1.md | 1 - .../index/IngestionShardConsumer.java | 20 +- .../index/engine/IngestionEngine.java | 2 + .../pollingingest/DefaultStreamPoller.java | 38 +- .../index/engine/FakeIngestionSource.java | 21 +- .../index/engine/IngestionEngineTests.java | 3 +- .../DefaultStreamPollerTests.java | 25 +- 149 files changed, 9408 insertions(+), 61 deletions(-) create mode 100644 plugins/ingestion-kinesis/build.gradle create mode 100644 plugins/ingestion-kinesis/licenses/annotations-2.30.31.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/annotations-LICENSE.txt create mode 100644 plugins/ingestion-kinesis/licenses/annotations-NOTICE.txt create mode 100644 plugins/ingestion-kinesis/licenses/apache-client-2.30.31.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/apache-client-LICENSE.txt create mode 100644 plugins/ingestion-kinesis/licenses/apache-client-NOTICE.txt create mode 100644 plugins/ingestion-kinesis/licenses/auth-2.30.31.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/auth-LICENSE.txt create mode 100644 plugins/ingestion-kinesis/licenses/auth-NOTICE.txt create mode 100644 plugins/ingestion-kinesis/licenses/aws-cbor-protocol-2.30.31.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/aws-cbor-protocol-LICENSE.txt create mode 100644 plugins/ingestion-kinesis/licenses/aws-cbor-protocol-NOTICE.txt create mode 100644 plugins/ingestion-kinesis/licenses/aws-core-2.30.31.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/aws-core-LICENSE.txt create mode 100644 plugins/ingestion-kinesis/licenses/aws-core-NOTICE.txt create mode 100644 plugins/ingestion-kinesis/licenses/aws-crt-0.35.0.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/aws-crt-LICENSE.txt create mode 100644 plugins/ingestion-kinesis/licenses/aws-crt-NOTICE.txt create mode 100644 plugins/ingestion-kinesis/licenses/aws-json-protocol-2.30.31.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/aws-json-protocol-LICENSE.txt create mode 100644 plugins/ingestion-kinesis/licenses/aws-json-protocol-NOTICE.txt create mode 100644 plugins/ingestion-kinesis/licenses/aws-query-protocol-2.30.31.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/aws-query-protocol-LICENSE.txt create mode 100644 plugins/ingestion-kinesis/licenses/aws-query-protocol-NOTICE.txt create mode 100644 plugins/ingestion-kinesis/licenses/aws-xml-protocol-2.30.31.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/aws-xml-protocol-LICENSE.txt create mode 100644 plugins/ingestion-kinesis/licenses/aws-xml-protocol-NOTICE.txt create mode 100644 plugins/ingestion-kinesis/licenses/checksums-2.30.31.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/checksums-LICENSE.txt create mode 100644 plugins/ingestion-kinesis/licenses/checksums-NOTICE.txt create mode 100644 plugins/ingestion-kinesis/licenses/checksums-spi-2.30.31.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/checksums-spi-LICENSE.txt create mode 100644 plugins/ingestion-kinesis/licenses/checksums-spi-NOTICE.txt create mode 100644 plugins/ingestion-kinesis/licenses/commons-codec-1.16.1.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/commons-codec-LICENSE.txt create mode 100644 plugins/ingestion-kinesis/licenses/commons-codec-NOTICE.txt create mode 100644 plugins/ingestion-kinesis/licenses/commons-logging-1.2.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/commons-logging-LICENSE.txt create mode 100644 plugins/ingestion-kinesis/licenses/commons-logging-NOTICE.txt create mode 100644 plugins/ingestion-kinesis/licenses/endpoints-spi-2.30.31.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/endpoints-spi-LICENSE.txt create mode 100644 plugins/ingestion-kinesis/licenses/endpoints-spi-NOTICE.txt create mode 100644 plugins/ingestion-kinesis/licenses/http-auth-2.30.31.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/http-auth-LICENSE.txt create mode 100644 plugins/ingestion-kinesis/licenses/http-auth-NOTICE.txt create mode 100644 plugins/ingestion-kinesis/licenses/http-auth-aws-2.30.31.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/http-auth-aws-LICENSE.txt create mode 100644 plugins/ingestion-kinesis/licenses/http-auth-aws-NOTICE.txt create mode 100644 plugins/ingestion-kinesis/licenses/http-auth-spi-2.30.31.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/http-auth-spi-LICENSE.txt create mode 100644 plugins/ingestion-kinesis/licenses/http-auth-spi-NOTICE.txt create mode 100644 plugins/ingestion-kinesis/licenses/http-client-spi-2.30.31.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/http-client-spi-LICENSE.txt create mode 100644 plugins/ingestion-kinesis/licenses/http-client-spi-NOTICE.txt create mode 100644 plugins/ingestion-kinesis/licenses/httpclient-4.5.14.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/httpclient-LICENSE.txt create mode 100644 plugins/ingestion-kinesis/licenses/httpclient-NOTICE.txt create mode 100644 plugins/ingestion-kinesis/licenses/httpcore-4.4.16.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/httpcore-LICENSE.txt create mode 100644 plugins/ingestion-kinesis/licenses/httpcore-NOTICE.txt create mode 100644 plugins/ingestion-kinesis/licenses/identity-spi-2.30.31.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/identity-spi-LICENSE.txt create mode 100644 plugins/ingestion-kinesis/licenses/identity-spi-NOTICE.txt create mode 100644 plugins/ingestion-kinesis/licenses/jackson-LICENSE create mode 100644 plugins/ingestion-kinesis/licenses/jackson-NOTICE create mode 100644 plugins/ingestion-kinesis/licenses/jackson-annotations-2.18.2.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/jackson-databind-2.18.2.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/json-utils-2.30.31.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/json-utils-LICENSE.txt create mode 100644 plugins/ingestion-kinesis/licenses/json-utils-NOTICE.txt create mode 100644 plugins/ingestion-kinesis/licenses/kinesis-2.30.31.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/kinesis-LICENSE.txt create mode 100644 plugins/ingestion-kinesis/licenses/kinesis-NOTICE.txt create mode 100644 plugins/ingestion-kinesis/licenses/metrics-spi-2.30.31.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/metrics-spi-LICENSE.txt create mode 100644 plugins/ingestion-kinesis/licenses/metrics-spi-NOTICE.txt create mode 100644 plugins/ingestion-kinesis/licenses/netty-LICENSE.txt create mode 100644 plugins/ingestion-kinesis/licenses/netty-NOTICE.txt create mode 100644 plugins/ingestion-kinesis/licenses/netty-buffer-4.1.118.Final.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/netty-codec-4.1.118.Final.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/netty-codec-http-4.1.118.Final.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/netty-codec-http2-4.1.118.Final.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/netty-common-4.1.118.Final.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/netty-handler-4.1.118.Final.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/netty-nio-client-2.30.31.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/netty-resolver-4.1.118.Final.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/netty-transport-4.1.118.Final.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/netty-transport-classes-epoll-4.1.118.Final.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/netty-transport-native-unix-common-4.1.118.Final.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/profiles-2.30.31.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/profiles-LICENSE.txt create mode 100644 plugins/ingestion-kinesis/licenses/profiles-NOTICE.txt create mode 100644 plugins/ingestion-kinesis/licenses/protocol-core-2.30.31.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/protocol-core-LICENSE.txt create mode 100644 plugins/ingestion-kinesis/licenses/protocol-core-NOTICE.txt create mode 100644 plugins/ingestion-kinesis/licenses/regions-2.30.31.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/regions-LICENSE.txt create mode 100644 plugins/ingestion-kinesis/licenses/regions-NOTICE.txt create mode 100644 plugins/ingestion-kinesis/licenses/retries-2.30.31.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/retries-LICENSE.txt create mode 100644 plugins/ingestion-kinesis/licenses/retries-NOTICE.txt create mode 100644 plugins/ingestion-kinesis/licenses/retries-spi-2.30.31.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/retries-spi-LICENSE.txt create mode 100644 plugins/ingestion-kinesis/licenses/retries-spi-NOTICE.txt create mode 100644 plugins/ingestion-kinesis/licenses/sdk-core-2.30.31.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/sdk-core-LICENSE.txt create mode 100644 plugins/ingestion-kinesis/licenses/sdk-core-NOTICE.txt create mode 100644 plugins/ingestion-kinesis/licenses/slf4j-api-1.7.36.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/slf4j-api-LICENSE.txt create mode 100644 plugins/ingestion-kinesis/licenses/slf4j-api-NOTICE.txt create mode 100644 plugins/ingestion-kinesis/licenses/sts-2.30.31.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/sts-LICENSE.txt create mode 100644 plugins/ingestion-kinesis/licenses/sts-NOTICE.txt create mode 100644 plugins/ingestion-kinesis/licenses/third-party-jackson-core-2.30.31.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/third-party-jackson-core-LICENSE.txt create mode 100644 plugins/ingestion-kinesis/licenses/third-party-jackson-core-NOTICE.txt create mode 100644 plugins/ingestion-kinesis/licenses/third-party-jackson-dataformat-cbor-2.30.31.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/third-party-jackson-dataformat-cbor-LICENSE.txt create mode 100644 plugins/ingestion-kinesis/licenses/third-party-jackson-dataformat-cbor-NOTICE.txt create mode 100644 plugins/ingestion-kinesis/licenses/utils-2.30.31.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/utils-LICENSE.txt create mode 100644 plugins/ingestion-kinesis/licenses/utils-NOTICE.txt create mode 100644 plugins/ingestion-kinesis/src/internalClusterTest/java/org/opensearch/plugin/kinesis/IngestFromKinesisIT.java create mode 100644 plugins/ingestion-kinesis/src/internalClusterTest/java/org/opensearch/plugin/kinesis/KinesisIngestionBaseIT.java create mode 100644 plugins/ingestion-kinesis/src/internalClusterTest/java/org/opensearch/plugin/kinesis/TestContainerThreadLeakFilter.java create mode 100644 plugins/ingestion-kinesis/src/main/java/org/opensearch/plugin/kinesis/KinesisConsumerFactory.java create mode 100644 plugins/ingestion-kinesis/src/main/java/org/opensearch/plugin/kinesis/KinesisMessage.java create mode 100644 plugins/ingestion-kinesis/src/main/java/org/opensearch/plugin/kinesis/KinesisPlugin.java create mode 100644 plugins/ingestion-kinesis/src/main/java/org/opensearch/plugin/kinesis/KinesisShardConsumer.java create mode 100644 plugins/ingestion-kinesis/src/main/java/org/opensearch/plugin/kinesis/KinesisSourceConfig.java create mode 100644 plugins/ingestion-kinesis/src/main/java/org/opensearch/plugin/kinesis/SequenceNumber.java create mode 100644 plugins/ingestion-kinesis/src/main/java/org/opensearch/plugin/kinesis/package-info.java create mode 100644 plugins/ingestion-kinesis/src/main/plugin-metadata/plugin-security.policy create mode 100644 plugins/ingestion-kinesis/src/test/java/org/opensearch/plugin/kinesis/KinesisConsumerFactoryTests.java create mode 100644 plugins/ingestion-kinesis/src/test/java/org/opensearch/plugin/kinesis/KinesisMessageTests.java create mode 100644 plugins/ingestion-kinesis/src/test/java/org/opensearch/plugin/kinesis/KinesisShardConsumerTests.java create mode 100644 plugins/ingestion-kinesis/src/test/java/org/opensearch/plugin/kinesis/KinesisSourceConfigTests.java create mode 100644 plugins/ingestion-kinesis/src/test/java/org/opensearch/plugin/kinesis/SequenceNumberTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index bbd8d2c427b29..e62a6235a3609 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Introduce a new search node role to hold search only shards ([#17620](https://github.com/opensearch-project/OpenSearch/pull/17620)) - Fix systemd integTest on deb regarding path ownership check ([#17641](https://github.com/opensearch-project/OpenSearch/pull/17641)) - Add dfs transformation function in XContentMapValues ([#17612](https://github.com/opensearch-project/OpenSearch/pull/17612)) +- Added Kinesis support as a plugin for the pull-based ingestion ([#17615](https://github.com/opensearch-project/OpenSearch/pull/17615) ### Changed - Migrate BC libs to their FIPS counterparts ([#14912](https://github.com/opensearch-project/OpenSearch/pull/14912)) diff --git a/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaPartitionConsumer.java b/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaPartitionConsumer.java index 15f20af6d6275..97b4ac1b0103e 100644 --- a/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaPartitionConsumer.java +++ b/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaPartitionConsumer.java @@ -119,22 +119,34 @@ protected static Consumer createConsumer(String clientId, KafkaS } @Override - public List> readNext(KafkaOffset offset, long maxMessages, int timeoutMillis) - throws TimeoutException { + public List> readNext( + KafkaOffset offset, + boolean includeStart, + long maxMessages, + int timeoutMillis + ) throws TimeoutException { List> records = AccessController.doPrivileged( - (PrivilegedAction>>) () -> fetch(offset.getOffset(), maxMessages, timeoutMillis) + (PrivilegedAction>>) () -> fetch( + offset.getOffset(), + includeStart, + maxMessages, + timeoutMillis + ) ); return records; } @Override - public KafkaOffset nextPointer() { - return new KafkaOffset(lastFetchedOffset + 1); - } - - @Override - public KafkaOffset nextPointer(KafkaOffset pointer) { - return new KafkaOffset(pointer.getOffset() + 1); + public List> readNext(long maxMessages, int timeoutMillis) throws TimeoutException { + List> records = AccessController.doPrivileged( + (PrivilegedAction>>) () -> fetch( + lastFetchedOffset, + false, + maxMessages, + timeoutMillis + ) + ); + return records; } @Override @@ -191,18 +203,28 @@ public IngestionShardPointer pointerFromOffset(String offset) { return new KafkaOffset(offsetValue); } - private synchronized List> fetch(long startOffset, long maxMessages, int timeoutMillis) { - if (lastFetchedOffset < 0 || lastFetchedOffset != startOffset - 1) { - logger.info("Seeking to offset {}", startOffset); - consumer.seek(topicPartition, startOffset); + private synchronized List> fetch( + long startOffset, + boolean includeStart, + long maxMessages, + int timeoutMillis + ) { + long kafkaStartOffset = startOffset; + if (!includeStart) { + kafkaStartOffset += 1; + } + + if (lastFetchedOffset < 0 || lastFetchedOffset != kafkaStartOffset - 1) { + logger.info("Seeking to offset {}", kafkaStartOffset); + consumer.seek(topicPartition, kafkaStartOffset); // update the last fetched offset so that we don't need to seek again if no more messages to fetch - lastFetchedOffset = startOffset - 1; + lastFetchedOffset = kafkaStartOffset - 1; } ConsumerRecords consumerRecords = consumer.poll(Duration.ofMillis(timeoutMillis)); List> messageAndOffsets = consumerRecords.records(topicPartition); - long endOffset = startOffset + maxMessages; + long endOffset = kafkaStartOffset + maxMessages; List> results = new ArrayList<>(); for (ConsumerRecord messageAndOffset : messageAndOffsets) { diff --git a/plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaPartitionConsumerTests.java b/plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaPartitionConsumerTests.java index d1d9ad4fbf8ae..bfb711f288864 100644 --- a/plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaPartitionConsumerTests.java +++ b/plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaPartitionConsumerTests.java @@ -61,11 +61,10 @@ public void testReadNext() throws Exception { when(mockConsumer.poll(any(Duration.class))).thenReturn(records); - List> result = consumer.readNext(new KafkaOffset(0), 10, 1000); + List> result = consumer.readNext(new KafkaOffset(0), true, 10, 1000); assertEquals(1, result.size()); assertEquals("message", new String(result.get(0).getMessage().getPayload(), StandardCharsets.UTF_8)); - assertEquals(1, consumer.nextPointer().getOffset()); assertEquals(0, consumer.getShardId()); assertEquals("client1", consumer.getClientId()); } diff --git a/plugins/ingestion-kinesis/build.gradle b/plugins/ingestion-kinesis/build.gradle new file mode 100644 index 0000000000000..a8100018c7f4a --- /dev/null +++ b/plugins/ingestion-kinesis/build.gradle @@ -0,0 +1,251 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +* +* Modifications Copyright OpenSearch Contributors. See +* GitHub history for details. +*/ + +apply plugin: 'opensearch.internal-cluster-test' + +opensearchplugin { + description = 'Pull-based ingestion plugin to consume from Kinesis' + classname = 'org.opensearch.plugin.kinesis.KinesisPlugin' +} + +versions << [ + 'docker': '3.3.6', + 'testcontainers': '1.19.7', + 'ducttape': '1.0.8', + 'snappy': '1.1.10.7', +] + +dependencies { + // aws sdk + api "software.amazon.awssdk:sdk-core:${versions.aws}" + api "software.amazon.awssdk:annotations:${versions.aws}" + api "software.amazon.awssdk:aws-core:${versions.aws}" + api "software.amazon.awssdk:auth:${versions.aws}" + api "software.amazon.awssdk:identity-spi:${versions.aws}" + api "software.amazon.awssdk:checksums:${versions.aws}" + api "software.amazon.awssdk:checksums-spi:${versions.aws}" + api "software.amazon.awssdk.crt:aws-crt:${versions.awscrt}" + api "software.amazon.awssdk:http-auth:${versions.aws}" + api "software.amazon.awssdk:http-auth-aws:${versions.aws}" + api "software.amazon.awssdk:http-auth-spi:${versions.aws}" + api "software.amazon.awssdk:retries:${versions.aws}" + api "software.amazon.awssdk:retries-spi:${versions.aws}" + api "software.amazon.awssdk:endpoints-spi:${versions.aws}" + api "software.amazon.awssdk:http-client-spi:${versions.aws}" + api "software.amazon.awssdk:apache-client:${versions.aws}" + api "software.amazon.awssdk:metrics-spi:${versions.aws}" + api "software.amazon.awssdk:profiles:${versions.aws}" + api "software.amazon.awssdk:regions:${versions.aws}" + api "software.amazon.awssdk:utils:${versions.aws}" + api "software.amazon.awssdk:aws-json-protocol:${versions.aws}" + api "software.amazon.awssdk:protocol-core:${versions.aws}" + api "software.amazon.awssdk:json-utils:${versions.aws}" + api "software.amazon.awssdk:third-party-jackson-core:${versions.aws}" + api "software.amazon.awssdk:aws-xml-protocol:${versions.aws}" + api "software.amazon.awssdk:aws-json-protocol:${versions.aws}" + api "software.amazon.awssdk:aws-query-protocol:${versions.aws}" + api "software.amazon.awssdk:sts:${versions.aws}" + api "software.amazon.awssdk:netty-nio-client:${versions.aws}" + api "software.amazon.awssdk:kinesis:${versions.aws}" + api "software.amazon.awssdk:aws-cbor-protocol:${versions.aws}" + api "software.amazon.awssdk:third-party-jackson-dataformat-cbor:${versions.aws}" + + api "org.apache.httpcomponents:httpclient:${versions.httpclient}" + api "org.apache.httpcomponents:httpcore:${versions.httpcore}" + api "commons-logging:commons-logging:${versions.commonslogging}" + api "commons-codec:commons-codec:${versions.commonscodec}" + api "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" + api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" + api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" + api "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}" + api "joda-time:joda-time:${versions.joda}" + api "org.slf4j:slf4j-api:${versions.slf4j}" + + // network stack + api "io.netty:netty-buffer:${versions.netty}" + api "io.netty:netty-codec:${versions.netty}" + api "io.netty:netty-codec-http:${versions.netty}" + api "io.netty:netty-codec-http2:${versions.netty}" + api "io.netty:netty-common:${versions.netty}" + api "io.netty:netty-handler:${versions.netty}" + api "io.netty:netty-resolver:${versions.netty}" + api "io.netty:netty-transport:${versions.netty}" + api "io.netty:netty-transport-native-unix-common:${versions.netty}" + api "io.netty:netty-transport-classes-epoll:${versions.netty}" + + + // test + testImplementation "com.github.docker-java:docker-java-api:${versions.docker}" + testImplementation "com.github.docker-java:docker-java-transport:${versions.docker}" + testImplementation "com.github.docker-java:docker-java-transport-zerodep:${versions.docker}" + testImplementation "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" + testImplementation "org.testcontainers:testcontainers:${versions.testcontainers}" + testImplementation "org.testcontainers:localstack:${versions.testcontainers}" + testImplementation "org.rnorth.duct-tape:duct-tape:${versions.ducttape}" + testImplementation "org.apache.commons:commons-compress:${versions.commonscompress}" + testImplementation "commons-io:commons-io:${versions.commonsio}" + testImplementation 'org.awaitility:awaitility:4.2.0' +} + +internalClusterTest{ + environment 'TESTCONTAINERS_RYUK_DISABLED', 'true' + // TODO: Adding permission in plugin-security.policy doesn't seem to work. + systemProperty 'tests.security.manager', 'false' +} + +tasks.named("dependencyLicenses").configure { + mapping from: /jackson-.*/, to: 'jackson' + mapping from: /netty-.*/, to: 'netty' + mapping from: /log4j-.*/, to: 'log4j' +} + +thirdPartyAudit { + ignoreMissingClasses( + 'com.aayushatharva.brotli4j.Brotli4jLoader', + 'com.aayushatharva.brotli4j.decoder.DecoderJNI$Status', + 'com.aayushatharva.brotli4j.decoder.DecoderJNI$Wrapper', + 'com.aayushatharva.brotli4j.encoder.BrotliEncoderChannel', + 'com.aayushatharva.brotli4j.encoder.Encoder$Mode', + 'com.aayushatharva.brotli4j.encoder.Encoder$Parameters', + + 'com.google.protobuf.nano.CodedOutputByteBufferNano', + 'com.google.protobuf.nano.MessageNano', + + 'org.apache.avalon.framework.logger.Logger', + 'org.apache.log.Hierarchy', + 'org.apache.log.Logger', + 'org.apache.log4j.Level', + 'org.apache.log4j.Logger', + 'org.apache.log4j.Priority', + + 'org.slf4j.impl.StaticLoggerBinder', + 'org.slf4j.impl.StaticMDCBinder', + 'org.slf4j.impl.StaticMarkerBinder', + + 'org.graalvm.nativeimage.hosted.Feature', + 'org.graalvm.nativeimage.hosted.Feature$AfterImageWriteAccess', + + 'com.ning.compress.BufferRecycler', + 'com.ning.compress.lzf.ChunkDecoder', + 'com.ning.compress.lzf.ChunkEncoder', + 'com.ning.compress.lzf.LZFChunk', + 'com.ning.compress.lzf.LZFEncoder', + 'com.ning.compress.lzf.util.ChunkDecoderFactory', + 'com.ning.compress.lzf.util.ChunkEncoderFactory', + + 'javax.servlet.ServletContextEvent', + 'javax.servlet.ServletContextListener', + + 'io.netty.internal.tcnative.Buffer', + 'io.netty.internal.tcnative.CertificateCompressionAlgo', + 'io.netty.internal.tcnative.Library', + 'io.netty.internal.tcnative.SSLContext', + 'io.netty.internal.tcnative.SSLPrivateKeyMethod', + + 'io.netty.internal.tcnative.AsyncSSLPrivateKeyMethod', + 'io.netty.internal.tcnative.AsyncTask', + 'io.netty.internal.tcnative.CertificateCallback', + 'io.netty.internal.tcnative.CertificateVerifier', + 'io.netty.internal.tcnative.ResultCallback', + 'io.netty.internal.tcnative.SessionTicketKey', + 'io.netty.internal.tcnative.SniHostNameMatcher', + 'io.netty.internal.tcnative.SSL', + 'io.netty.internal.tcnative.SSLSession', + 'io.netty.internal.tcnative.SSLSessionCache', + + 'lzma.sdk.lzma.Encoder', + 'net.jpountz.lz4.LZ4Compressor', + 'net.jpountz.lz4.LZ4Factory', + 'net.jpountz.lz4.LZ4FastDecompressor', + 'net.jpountz.xxhash.XXHash32', + 'net.jpountz.xxhash.XXHashFactory', + + // from io.netty.handler.ssl.util.BouncyCastleSelfSignedCertGenerator (netty) + 'org.bouncycastle.cert.X509v3CertificateBuilder', + 'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter', + 'org.bouncycastle.operator.jcajce.JcaContentSignerBuilder', + 'org.bouncycastle.openssl.PEMEncryptedKeyPair', + 'org.bouncycastle.openssl.PEMParser', + 'org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter', + 'org.bouncycastle.openssl.jcajce.JceOpenSSLPKCS8DecryptorProviderBuilder', + 'org.bouncycastle.openssl.jcajce.JcePEMDecryptorProviderBuilder', + 'org.bouncycastle.pkcs.PKCS8EncryptedPrivateKeyInfo', + + 'org.conscrypt.AllocatedBuffer', + 'org.conscrypt.BufferAllocator', + 'org.conscrypt.Conscrypt', + 'org.conscrypt.HandshakeListener', + + 'org.eclipse.jetty.alpn.ALPN$ClientProvider', + 'org.eclipse.jetty.alpn.ALPN$ServerProvider', + 'org.eclipse.jetty.alpn.ALPN', + + // from io.netty.handler.ssl.JettyNpnSslEngine (netty) + 'org.eclipse.jetty.npn.NextProtoNego$ClientProvider', + 'org.eclipse.jetty.npn.NextProtoNego$ServerProvider', + 'org.eclipse.jetty.npn.NextProtoNego', + + // from io.netty.handler.codec.marshalling.ChannelBufferByteInput (netty) + 'org.jboss.marshalling.ByteInput', + + // from io.netty.handler.codec.marshalling.ChannelBufferByteOutput (netty) + 'org.jboss.marshalling.ByteOutput', + + // from io.netty.handler.codec.marshalling.CompatibleMarshallingEncoder (netty) + 'org.jboss.marshalling.Marshaller', + + // from io.netty.handler.codec.marshalling.ContextBoundUnmarshallerProvider (netty) + 'org.jboss.marshalling.MarshallerFactory', + 'org.jboss.marshalling.MarshallingConfiguration', + 'org.jboss.marshalling.Unmarshaller', + + 'reactor.blockhound.BlockHound$Builder', + 'reactor.blockhound.integration.BlockHoundIntegration', + + 'software.amazon.eventstream.HeaderValue', + 'software.amazon.eventstream.Message', + 'software.amazon.eventstream.MessageDecoder' + ) + + ignoreViolations ( + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$1', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$2', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$3', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$4', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$5', + + 'io.netty.util.internal.PlatformDependent0', + 'io.netty.util.internal.PlatformDependent0$1', + 'io.netty.util.internal.PlatformDependent0$2', + 'io.netty.util.internal.PlatformDependent0$3', + 'io.netty.util.internal.PlatformDependent0$4', + 'io.netty.util.internal.PlatformDependent0$6', + + 'io.netty.util.internal.shaded.org.jctools.queues.BaseLinkedQueueConsumerNodeRef', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseLinkedQueueProducerNodeRef', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueColdProducerFields', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueConsumerFields', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueProducerFields', + 'io.netty.util.internal.shaded.org.jctools.queues.LinkedQueueNode', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueProducerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerLimitField', + 'io.netty.util.internal.shaded.org.jctools.queues.unpadded.MpscUnpaddedArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.unpadded.MpscUnpaddedArrayQueueProducerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.unpadded.MpscUnpaddedArrayQueueProducerLimitField', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeAccess', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeLongArrayAccess', + ) +} diff --git a/plugins/ingestion-kinesis/licenses/annotations-2.30.31.jar.sha1 b/plugins/ingestion-kinesis/licenses/annotations-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..d45f8758c9405 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/annotations-2.30.31.jar.sha1 @@ -0,0 +1 @@ +c5acc1da9567290302d80ffa1633785afa4ce630 \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/annotations-LICENSE.txt b/plugins/ingestion-kinesis/licenses/annotations-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/annotations-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/ingestion-kinesis/licenses/annotations-NOTICE.txt b/plugins/ingestion-kinesis/licenses/annotations-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/ingestion-kinesis/licenses/apache-client-2.30.31.jar.sha1 b/plugins/ingestion-kinesis/licenses/apache-client-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..97331cbda2c1b --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/apache-client-2.30.31.jar.sha1 @@ -0,0 +1 @@ +d1c602dba702782a0afec0a08c919322693a3bf8 \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/apache-client-LICENSE.txt b/plugins/ingestion-kinesis/licenses/apache-client-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/apache-client-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/ingestion-kinesis/licenses/apache-client-NOTICE.txt b/plugins/ingestion-kinesis/licenses/apache-client-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/apache-client-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/ingestion-kinesis/licenses/auth-2.30.31.jar.sha1 b/plugins/ingestion-kinesis/licenses/auth-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..c1e199ca02fc8 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/auth-2.30.31.jar.sha1 @@ -0,0 +1 @@ +8887962b04ce5f1a9f46d44acd806949b17082da \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/auth-LICENSE.txt b/plugins/ingestion-kinesis/licenses/auth-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/auth-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/ingestion-kinesis/licenses/auth-NOTICE.txt b/plugins/ingestion-kinesis/licenses/auth-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/ingestion-kinesis/licenses/aws-cbor-protocol-2.30.31.jar.sha1 b/plugins/ingestion-kinesis/licenses/aws-cbor-protocol-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..a50ab2a27f127 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/aws-cbor-protocol-2.30.31.jar.sha1 @@ -0,0 +1 @@ +e045d7fc59043054fe8a71a527cd88d4b6c15929 \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/aws-cbor-protocol-LICENSE.txt b/plugins/ingestion-kinesis/licenses/aws-cbor-protocol-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/aws-cbor-protocol-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/ingestion-kinesis/licenses/aws-cbor-protocol-NOTICE.txt b/plugins/ingestion-kinesis/licenses/aws-cbor-protocol-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/ingestion-kinesis/licenses/aws-core-2.30.31.jar.sha1 b/plugins/ingestion-kinesis/licenses/aws-core-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..16050fd1d8c6d --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/aws-core-2.30.31.jar.sha1 @@ -0,0 +1 @@ +5016fadbd7146171b4afe09eb0675b710b0f2d12 \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/aws-core-LICENSE.txt b/plugins/ingestion-kinesis/licenses/aws-core-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/aws-core-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/ingestion-kinesis/licenses/aws-core-NOTICE.txt b/plugins/ingestion-kinesis/licenses/aws-core-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/ingestion-kinesis/licenses/aws-crt-0.35.0.jar.sha1 b/plugins/ingestion-kinesis/licenses/aws-crt-0.35.0.jar.sha1 new file mode 100644 index 0000000000000..1097f5bb4d814 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/aws-crt-0.35.0.jar.sha1 @@ -0,0 +1 @@ +33041403e1a9dd94f40330206eda5ffc22ee185c \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/aws-crt-LICENSE.txt b/plugins/ingestion-kinesis/licenses/aws-crt-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/aws-crt-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/ingestion-kinesis/licenses/aws-crt-NOTICE.txt b/plugins/ingestion-kinesis/licenses/aws-crt-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/ingestion-kinesis/licenses/aws-json-protocol-2.30.31.jar.sha1 b/plugins/ingestion-kinesis/licenses/aws-json-protocol-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..bfc742d8687d1 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/aws-json-protocol-2.30.31.jar.sha1 @@ -0,0 +1 @@ +4600659276f84e114c1fabeb1478911c581a7739 \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/aws-json-protocol-LICENSE.txt b/plugins/ingestion-kinesis/licenses/aws-json-protocol-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/aws-json-protocol-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/ingestion-kinesis/licenses/aws-json-protocol-NOTICE.txt b/plugins/ingestion-kinesis/licenses/aws-json-protocol-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/ingestion-kinesis/licenses/aws-query-protocol-2.30.31.jar.sha1 b/plugins/ingestion-kinesis/licenses/aws-query-protocol-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..9508295147c96 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/aws-query-protocol-2.30.31.jar.sha1 @@ -0,0 +1 @@ +61596c0cb577a4a6c438a5a7ee0391d2d825b3fe \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/aws-query-protocol-LICENSE.txt b/plugins/ingestion-kinesis/licenses/aws-query-protocol-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/aws-query-protocol-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/ingestion-kinesis/licenses/aws-query-protocol-NOTICE.txt b/plugins/ingestion-kinesis/licenses/aws-query-protocol-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/ingestion-kinesis/licenses/aws-xml-protocol-2.30.31.jar.sha1 b/plugins/ingestion-kinesis/licenses/aws-xml-protocol-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..79a09fa635a20 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/aws-xml-protocol-2.30.31.jar.sha1 @@ -0,0 +1 @@ +ad1620b4e221840e2215348a296cc762c23a59c3 \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/aws-xml-protocol-LICENSE.txt b/plugins/ingestion-kinesis/licenses/aws-xml-protocol-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/aws-xml-protocol-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/ingestion-kinesis/licenses/aws-xml-protocol-NOTICE.txt b/plugins/ingestion-kinesis/licenses/aws-xml-protocol-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/ingestion-kinesis/licenses/checksums-2.30.31.jar.sha1 b/plugins/ingestion-kinesis/licenses/checksums-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..4447b86f6e872 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/checksums-2.30.31.jar.sha1 @@ -0,0 +1 @@ +6d00287bc0ceb013dd5c74f1c4eb296ae61b34d4 \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/checksums-LICENSE.txt b/plugins/ingestion-kinesis/licenses/checksums-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/checksums-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/ingestion-kinesis/licenses/checksums-NOTICE.txt b/plugins/ingestion-kinesis/licenses/checksums-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/ingestion-kinesis/licenses/checksums-spi-2.30.31.jar.sha1 b/plugins/ingestion-kinesis/licenses/checksums-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..078cab150c5ad --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/checksums-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +b5a5b0a39403acf41c21fd16cd11c7c8d887601b \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/checksums-spi-LICENSE.txt b/plugins/ingestion-kinesis/licenses/checksums-spi-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/checksums-spi-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/ingestion-kinesis/licenses/checksums-spi-NOTICE.txt b/plugins/ingestion-kinesis/licenses/checksums-spi-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/ingestion-kinesis/licenses/commons-codec-1.16.1.jar.sha1 b/plugins/ingestion-kinesis/licenses/commons-codec-1.16.1.jar.sha1 new file mode 100644 index 0000000000000..6b8803089c6d7 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/commons-codec-1.16.1.jar.sha1 @@ -0,0 +1 @@ +47bd4d333fba53406f6c6c51884ddbca435c8862 \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/commons-codec-LICENSE.txt b/plugins/ingestion-kinesis/licenses/commons-codec-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/commons-codec-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/ingestion-kinesis/licenses/commons-codec-NOTICE.txt b/plugins/ingestion-kinesis/licenses/commons-codec-NOTICE.txt new file mode 100644 index 0000000000000..56916449bbe10 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/commons-codec-NOTICE.txt @@ -0,0 +1,17 @@ +Apache Commons Codec +Copyright 2002-2015 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +src/test/org/apache/commons/codec/language/DoubleMetaphoneTest.java +contains test data from http://aspell.net/test/orig/batch0.tab. +Copyright (C) 2002 Kevin Atkinson (kevina@gnu.org) + +=============================================================================== + +The content of package org.apache.commons.codec.language.bm has been translated +from the original php source code available at http://stevemorse.org/phoneticinfo.htm +with permission from the original authors. +Original source copyright: +Copyright (c) 2008 Alexander Beider & Stephen P. Morse. diff --git a/plugins/ingestion-kinesis/licenses/commons-logging-1.2.jar.sha1 b/plugins/ingestion-kinesis/licenses/commons-logging-1.2.jar.sha1 new file mode 100644 index 0000000000000..f40f0242448e8 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/commons-logging-1.2.jar.sha1 @@ -0,0 +1 @@ +4bfc12adfe4842bf07b657f0369c4cb522955686 \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/commons-logging-LICENSE.txt b/plugins/ingestion-kinesis/licenses/commons-logging-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/commons-logging-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/ingestion-kinesis/licenses/commons-logging-NOTICE.txt b/plugins/ingestion-kinesis/licenses/commons-logging-NOTICE.txt new file mode 100644 index 0000000000000..d3d6e140ce4f3 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/commons-logging-NOTICE.txt @@ -0,0 +1,5 @@ +Apache Commons Logging +Copyright 2003-2014 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). diff --git a/plugins/ingestion-kinesis/licenses/endpoints-spi-2.30.31.jar.sha1 b/plugins/ingestion-kinesis/licenses/endpoints-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..4dbc884c3da6f --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/endpoints-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +0734f4b9c68f19201896dd47639035b4e0a7964d \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/endpoints-spi-LICENSE.txt b/plugins/ingestion-kinesis/licenses/endpoints-spi-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/endpoints-spi-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/ingestion-kinesis/licenses/endpoints-spi-NOTICE.txt b/plugins/ingestion-kinesis/licenses/endpoints-spi-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/ingestion-kinesis/licenses/http-auth-2.30.31.jar.sha1 b/plugins/ingestion-kinesis/licenses/http-auth-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..79893fb4fbf58 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/http-auth-2.30.31.jar.sha1 @@ -0,0 +1 @@ +b7baeb158b0af0e400d89a32595c9127db2bbb6e \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/http-auth-LICENSE.txt b/plugins/ingestion-kinesis/licenses/http-auth-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/http-auth-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/ingestion-kinesis/licenses/http-auth-NOTICE.txt b/plugins/ingestion-kinesis/licenses/http-auth-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/ingestion-kinesis/licenses/http-auth-aws-2.30.31.jar.sha1 b/plugins/ingestion-kinesis/licenses/http-auth-aws-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..d190c6ca52e98 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/http-auth-aws-2.30.31.jar.sha1 @@ -0,0 +1 @@ +f2a7d383158746c82b0f41b021e0da23a2597b35 \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/http-auth-aws-LICENSE.txt b/plugins/ingestion-kinesis/licenses/http-auth-aws-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/http-auth-aws-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/ingestion-kinesis/licenses/http-auth-aws-NOTICE.txt b/plugins/ingestion-kinesis/licenses/http-auth-aws-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/ingestion-kinesis/licenses/http-auth-spi-2.30.31.jar.sha1 b/plugins/ingestion-kinesis/licenses/http-auth-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..491ffe4dd0584 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/http-auth-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +513519f79635441d5205fc31d56c2e0d5826d27f \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/http-auth-spi-LICENSE.txt b/plugins/ingestion-kinesis/licenses/http-auth-spi-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/http-auth-spi-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/ingestion-kinesis/licenses/http-auth-spi-NOTICE.txt b/plugins/ingestion-kinesis/licenses/http-auth-spi-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/ingestion-kinesis/licenses/http-client-spi-2.30.31.jar.sha1 b/plugins/ingestion-kinesis/licenses/http-client-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..d86fa139f535c --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/http-client-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +5fa894c333793b7481aa03aa87512b20e11b057d \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/http-client-spi-LICENSE.txt b/plugins/ingestion-kinesis/licenses/http-client-spi-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/http-client-spi-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/ingestion-kinesis/licenses/http-client-spi-NOTICE.txt b/plugins/ingestion-kinesis/licenses/http-client-spi-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/ingestion-kinesis/licenses/httpclient-4.5.14.jar.sha1 b/plugins/ingestion-kinesis/licenses/httpclient-4.5.14.jar.sha1 new file mode 100644 index 0000000000000..66e05851c2e3c --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/httpclient-4.5.14.jar.sha1 @@ -0,0 +1 @@ +1194890e6f56ec29177673f2f12d0b8e627dec98 \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/httpclient-LICENSE.txt b/plugins/ingestion-kinesis/licenses/httpclient-LICENSE.txt new file mode 100644 index 0000000000000..32f01eda18fe9 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/httpclient-LICENSE.txt @@ -0,0 +1,558 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +========================================================================= + +This project includes Public Suffix List copied from + +licensed under the terms of the Mozilla Public License, v. 2.0 + +Full license text: + +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/plugins/ingestion-kinesis/licenses/httpclient-NOTICE.txt b/plugins/ingestion-kinesis/licenses/httpclient-NOTICE.txt new file mode 100644 index 0000000000000..4f6058178b201 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/httpclient-NOTICE.txt @@ -0,0 +1,5 @@ +Apache HttpComponents Client +Copyright 1999-2015 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). diff --git a/plugins/ingestion-kinesis/licenses/httpcore-4.4.16.jar.sha1 b/plugins/ingestion-kinesis/licenses/httpcore-4.4.16.jar.sha1 new file mode 100644 index 0000000000000..172110694b5bd --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/httpcore-4.4.16.jar.sha1 @@ -0,0 +1 @@ +51cf043c87253c9f58b539c9f7e44c8894223850 \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/httpcore-LICENSE.txt b/plugins/ingestion-kinesis/licenses/httpcore-LICENSE.txt new file mode 100644 index 0000000000000..72819a9f06f2a --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/httpcore-LICENSE.txt @@ -0,0 +1,241 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +========================================================================= + +This project contains annotations in the package org.apache.http.annotation +which are derived from JCIP-ANNOTATIONS +Copyright (c) 2005 Brian Goetz and Tim Peierls. +See http://www.jcip.net and the Creative Commons Attribution License +(http://creativecommons.org/licenses/by/2.5) +Full text: http://creativecommons.org/licenses/by/2.5/legalcode + +License + +THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE COMMONS PUBLIC LICENSE ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY COPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED. + +BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE TO BE BOUND BY THE TERMS OF THIS LICENSE. THE LICENSOR GRANTS YOU THE RIGHTS CONTAINED HERE IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND CONDITIONS. + +1. Definitions + + "Collective Work" means a work, such as a periodical issue, anthology or encyclopedia, in which the Work in its entirety in unmodified form, along with a number of other contributions, constituting separate and independent works in themselves, are assembled into a collective whole. A work that constitutes a Collective Work will not be considered a Derivative Work (as defined below) for the purposes of this License. + "Derivative Work" means a work based upon the Work or upon the Work and other pre-existing works, such as a translation, musical arrangement, dramatization, fictionalization, motion picture version, sound recording, art reproduction, abridgment, condensation, or any other form in which the Work may be recast, transformed, or adapted, except that a work that constitutes a Collective Work will not be considered a Derivative Work for the purpose of this License. For the avoidance of doubt, where the Work is a musical composition or sound recording, the synchronization of the Work in timed-relation with a moving image ("synching") will be considered a Derivative Work for the purpose of this License. + "Licensor" means the individual or entity that offers the Work under the terms of this License. + "Original Author" means the individual or entity who created the Work. + "Work" means the copyrightable work of authorship offered under the terms of this License. + "You" means an individual or entity exercising rights under this License who has not previously violated the terms of this License with respect to the Work, or who has received express permission from the Licensor to exercise rights under this License despite a previous violation. + +2. Fair Use Rights. Nothing in this license is intended to reduce, limit, or restrict any rights arising from fair use, first sale or other limitations on the exclusive rights of the copyright owner under copyright law or other applicable laws. + +3. License Grant. Subject to the terms and conditions of this License, Licensor hereby grants You a worldwide, royalty-free, non-exclusive, perpetual (for the duration of the applicable copyright) license to exercise the rights in the Work as stated below: + + to reproduce the Work, to incorporate the Work into one or more Collective Works, and to reproduce the Work as incorporated in the Collective Works; + to create and reproduce Derivative Works; + to distribute copies or phonorecords of, display publicly, perform publicly, and perform publicly by means of a digital audio transmission the Work including as incorporated in Collective Works; + to distribute copies or phonorecords of, display publicly, perform publicly, and perform publicly by means of a digital audio transmission Derivative Works. + + For the avoidance of doubt, where the work is a musical composition: + Performance Royalties Under Blanket Licenses. Licensor waives the exclusive right to collect, whether individually or via a performance rights society (e.g. ASCAP, BMI, SESAC), royalties for the public performance or public digital performance (e.g. webcast) of the Work. + Mechanical Rights and Statutory Royalties. Licensor waives the exclusive right to collect, whether individually or via a music rights agency or designated agent (e.g. Harry Fox Agency), royalties for any phonorecord You create from the Work ("cover version") and distribute, subject to the compulsory license created by 17 USC Section 115 of the US Copyright Act (or the equivalent in other jurisdictions). + Webcasting Rights and Statutory Royalties. For the avoidance of doubt, where the Work is a sound recording, Licensor waives the exclusive right to collect, whether individually or via a performance-rights society (e.g. SoundExchange), royalties for the public digital performance (e.g. webcast) of the Work, subject to the compulsory license created by 17 USC Section 114 of the US Copyright Act (or the equivalent in other jurisdictions). + +The above rights may be exercised in all media and formats whether now known or hereafter devised. The above rights include the right to make such modifications as are technically necessary to exercise the rights in other media and formats. All rights not expressly granted by Licensor are hereby reserved. + +4. Restrictions.The license granted in Section 3 above is expressly made subject to and limited by the following restrictions: + + You may distribute, publicly display, publicly perform, or publicly digitally perform the Work only under the terms of this License, and You must include a copy of, or the Uniform Resource Identifier for, this License with every copy or phonorecord of the Work You distribute, publicly display, publicly perform, or publicly digitally perform. You may not offer or impose any terms on the Work that alter or restrict the terms of this License or the recipients' exercise of the rights granted hereunder. You may not sublicense the Work. You must keep intact all notices that refer to this License and to the disclaimer of warranties. You may not distribute, publicly display, publicly perform, or publicly digitally perform the Work with any technological measures that control access or use of the Work in a manner inconsistent with the terms of this License Agreement. The above applies to the Work as incorporated in a Collective Work, but this does not require the Collective Work apart from the Work itself to be made subject to the terms of this License. If You create a Collective Work, upon notice from any Licensor You must, to the extent practicable, remove from the Collective Work any credit as required by clause 4(b), as requested. If You create a Derivative Work, upon notice from any Licensor You must, to the extent practicable, remove from the Derivative Work any credit as required by clause 4(b), as requested. + If you distribute, publicly display, publicly perform, or publicly digitally perform the Work or any Derivative Works or Collective Works, You must keep intact all copyright notices for the Work and provide, reasonable to the medium or means You are utilizing: (i) the name of the Original Author (or pseudonym, if applicable) if supplied, and/or (ii) if the Original Author and/or Licensor designate another party or parties (e.g. a sponsor institute, publishing entity, journal) for attribution in Licensor's copyright notice, terms of service or by other reasonable means, the name of such party or parties; the title of the Work if supplied; to the extent reasonably practicable, the Uniform Resource Identifier, if any, that Licensor specifies to be associated with the Work, unless such URI does not refer to the copyright notice or licensing information for the Work; and in the case of a Derivative Work, a credit identifying the use of the Work in the Derivative Work (e.g., "French translation of the Work by Original Author," or "Screenplay based on original Work by Original Author"). Such credit may be implemented in any reasonable manner; provided, however, that in the case of a Derivative Work or Collective Work, at a minimum such credit will appear where any other comparable authorship credit appears and in a manner at least as prominent as such other comparable authorship credit. + +5. Representations, Warranties and Disclaimer + +UNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN WRITING, LICENSOR OFFERS THE WORK AS-IS AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE WORK, EXPRESS, IMPLIED, STATUTORY OR OTHERWISE, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTIBILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS, WHETHER OR NOT DISCOVERABLE. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OF IMPLIED WARRANTIES, SO SUCH EXCLUSION MAY NOT APPLY TO YOU. + +6. Limitation on Liability. EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE LAW, IN NO EVENT WILL LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR ANY SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES ARISING OUT OF THIS LICENSE OR THE USE OF THE WORK, EVEN IF LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +7. Termination + + This License and the rights granted hereunder will terminate automatically upon any breach by You of the terms of this License. Individuals or entities who have received Derivative Works or Collective Works from You under this License, however, will not have their licenses terminated provided such individuals or entities remain in full compliance with those licenses. Sections 1, 2, 5, 6, 7, and 8 will survive any termination of this License. + Subject to the above terms and conditions, the license granted here is perpetual (for the duration of the applicable copyright in the Work). Notwithstanding the above, Licensor reserves the right to release the Work under different license terms or to stop distributing the Work at any time; provided, however that any such election will not serve to withdraw this License (or any other license that has been, or is required to be, granted under the terms of this License), and this License will continue in full force and effect unless terminated as stated above. + +8. Miscellaneous + + Each time You distribute or publicly digitally perform the Work or a Collective Work, the Licensor offers to the recipient a license to the Work on the same terms and conditions as the license granted to You under this License. + Each time You distribute or publicly digitally perform a Derivative Work, Licensor offers to the recipient a license to the original Work on the same terms and conditions as the license granted to You under this License. + If any provision of this License is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this License, and without further action by the parties to this agreement, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable. + No term or provision of this License shall be deemed waived and no breach consented to unless such waiver or consent shall be in writing and signed by the party to be charged with such waiver or consent. + This License constitutes the entire agreement between the parties with respect to the Work licensed here. There are no understandings, agreements or representations with respect to the Work not specified here. Licensor shall not be bound by any additional provisions that may appear in any communication from You. This License may not be modified without the mutual written agreement of the Licensor and You. diff --git a/plugins/ingestion-kinesis/licenses/httpcore-NOTICE.txt b/plugins/ingestion-kinesis/licenses/httpcore-NOTICE.txt new file mode 100644 index 0000000000000..c0be50a505ec1 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/httpcore-NOTICE.txt @@ -0,0 +1,8 @@ +Apache HttpComponents Core +Copyright 2005-2014 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +This project contains annotations derived from JCIP-ANNOTATIONS +Copyright (c) 2005 Brian Goetz and Tim Peierls. See http://www.jcip.net diff --git a/plugins/ingestion-kinesis/licenses/identity-spi-2.30.31.jar.sha1 b/plugins/ingestion-kinesis/licenses/identity-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..9eeab9ad13dba --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/identity-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +46da74ac074b176c25fba07c6541737422622c1d \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/identity-spi-LICENSE.txt b/plugins/ingestion-kinesis/licenses/identity-spi-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/identity-spi-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/ingestion-kinesis/licenses/identity-spi-NOTICE.txt b/plugins/ingestion-kinesis/licenses/identity-spi-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/ingestion-kinesis/licenses/jackson-LICENSE b/plugins/ingestion-kinesis/licenses/jackson-LICENSE new file mode 100644 index 0000000000000..f5f45d26a49d6 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/jackson-LICENSE @@ -0,0 +1,8 @@ +This copy of Jackson JSON processor streaming parser/generator is licensed under the +Apache (Software) License, version 2.0 ("the License"). +See the License for details about distribution rights, and the +specific rights regarding derivate works. + +You may obtain a copy of the License at: + +http://www.apache.org/licenses/LICENSE-2.0 diff --git a/plugins/ingestion-kinesis/licenses/jackson-NOTICE b/plugins/ingestion-kinesis/licenses/jackson-NOTICE new file mode 100644 index 0000000000000..4c976b7b4cc58 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/jackson-NOTICE @@ -0,0 +1,20 @@ +# Jackson JSON processor + +Jackson is a high-performance, Free/Open Source JSON processing library. +It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has +been in development since 2007. +It is currently developed by a community of developers, as well as supported +commercially by FasterXML.com. + +## Licensing + +Jackson core and extension components may licensed under different licenses. +To find the details that apply to this artifact see the accompanying LICENSE file. +For more information, including possible other licensing options, contact +FasterXML.com (http://fasterxml.com). + +## Credits + +A list of contributors may be found from CREDITS file, which is included +in some artifacts (usually source distributions); but is always available +from the source code management (SCM) system project uses. diff --git a/plugins/ingestion-kinesis/licenses/jackson-annotations-2.18.2.jar.sha1 b/plugins/ingestion-kinesis/licenses/jackson-annotations-2.18.2.jar.sha1 new file mode 100644 index 0000000000000..a06e1d5f28425 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/jackson-annotations-2.18.2.jar.sha1 @@ -0,0 +1 @@ +985d77751ebc7fce5db115a986bc9aa82f973f4a \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/jackson-databind-2.18.2.jar.sha1 b/plugins/ingestion-kinesis/licenses/jackson-databind-2.18.2.jar.sha1 new file mode 100644 index 0000000000000..eedbfff66c705 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/jackson-databind-2.18.2.jar.sha1 @@ -0,0 +1 @@ +deef8697b92141fb6caf7aa86966cff4eec9b04f \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/json-utils-2.30.31.jar.sha1 b/plugins/ingestion-kinesis/licenses/json-utils-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..5019f6d48fa0a --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/json-utils-2.30.31.jar.sha1 @@ -0,0 +1 @@ +7f0ef4b49299df2fd39f92113d94524729c61032 \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/json-utils-LICENSE.txt b/plugins/ingestion-kinesis/licenses/json-utils-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/json-utils-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/ingestion-kinesis/licenses/json-utils-NOTICE.txt b/plugins/ingestion-kinesis/licenses/json-utils-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/ingestion-kinesis/licenses/kinesis-2.30.31.jar.sha1 b/plugins/ingestion-kinesis/licenses/kinesis-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..f0a7788a041bc --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/kinesis-2.30.31.jar.sha1 @@ -0,0 +1 @@ +9e84a7317cf1c5b10c5c1c1691df38fc209231f8 \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/kinesis-LICENSE.txt b/plugins/ingestion-kinesis/licenses/kinesis-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/kinesis-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/ingestion-kinesis/licenses/kinesis-NOTICE.txt b/plugins/ingestion-kinesis/licenses/kinesis-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/ingestion-kinesis/licenses/metrics-spi-2.30.31.jar.sha1 b/plugins/ingestion-kinesis/licenses/metrics-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..69ab3ec6f79ff --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/metrics-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +57a979cbc99d0bf4113d96aaf4f453303a015966 \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/metrics-spi-LICENSE.txt b/plugins/ingestion-kinesis/licenses/metrics-spi-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/metrics-spi-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/ingestion-kinesis/licenses/metrics-spi-NOTICE.txt b/plugins/ingestion-kinesis/licenses/metrics-spi-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/ingestion-kinesis/licenses/netty-LICENSE.txt b/plugins/ingestion-kinesis/licenses/netty-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/netty-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/ingestion-kinesis/licenses/netty-NOTICE.txt b/plugins/ingestion-kinesis/licenses/netty-NOTICE.txt new file mode 100644 index 0000000000000..5bbf91a14de23 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/netty-NOTICE.txt @@ -0,0 +1,116 @@ + + The Netty Project + ================= + +Please visit the Netty web site for more information: + + * http://netty.io/ + +Copyright 2011 The Netty Project + +The Netty Project licenses this file to you under the Apache License, +version 2.0 (the "License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at: + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +License for the specific language governing permissions and limitations +under the License. + +Also, please refer to each LICENSE..txt file, which is located in +the 'license' directory of the distribution file, for the license terms of the +components that this product depends on. + +------------------------------------------------------------------------------- +This product contains the extensions to Java Collections Framework which has +been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene: + + * LICENSE: + * license/LICENSE.jsr166y.txt (Public Domain) + * HOMEPAGE: + * http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/ + * http://viewvc.jboss.org/cgi-bin/viewvc.cgi/jbosscache/experimental/jsr166/ + +This product contains a modified version of Robert Harder's Public Domain +Base64 Encoder and Decoder, which can be obtained at: + + * LICENSE: + * license/LICENSE.base64.txt (Public Domain) + * HOMEPAGE: + * http://iharder.sourceforge.net/current/java/base64/ + +This product contains a modified version of 'JZlib', a re-implementation of +zlib in pure Java, which can be obtained at: + + * LICENSE: + * license/LICENSE.jzlib.txt (BSD Style License) + * HOMEPAGE: + * http://www.jcraft.com/jzlib/ + +This product contains a modified version of 'Webbit', a Java event based +WebSocket and HTTP server: + + * LICENSE: + * license/LICENSE.webbit.txt (BSD License) + * HOMEPAGE: + * https://github.com/joewalnes/webbit + +This product optionally depends on 'Protocol Buffers', Google's data +interchange format, which can be obtained at: + + * LICENSE: + * license/LICENSE.protobuf.txt (New BSD License) + * HOMEPAGE: + * http://code.google.com/p/protobuf/ + +This product optionally depends on 'Bouncy Castle Crypto APIs' to generate +a temporary self-signed X.509 certificate when the JVM does not provide the +equivalent functionality. It can be obtained at: + + * LICENSE: + * license/LICENSE.bouncycastle.txt (MIT License) + * HOMEPAGE: + * http://www.bouncycastle.org/ + +This product optionally depends on 'SLF4J', a simple logging facade for Java, +which can be obtained at: + + * LICENSE: + * license/LICENSE.slf4j.txt (MIT License) + * HOMEPAGE: + * http://www.slf4j.org/ + +This product optionally depends on 'Apache Commons Logging', a logging +framework, which can be obtained at: + + * LICENSE: + * license/LICENSE.commons-logging.txt (Apache License 2.0) + * HOMEPAGE: + * http://commons.apache.org/logging/ + +This product optionally depends on 'Apache Log4J', a logging framework, +which can be obtained at: + + * LICENSE: + * license/LICENSE.log4j.txt (Apache License 2.0) + * HOMEPAGE: + * http://logging.apache.org/log4j/ + +This product optionally depends on 'JBoss Logging', a logging framework, +which can be obtained at: + + * LICENSE: + * license/LICENSE.jboss-logging.txt (GNU LGPL 2.1) + * HOMEPAGE: + * http://anonsvn.jboss.org/repos/common/common-logging-spi/ + +This product optionally depends on 'Apache Felix', an open source OSGi +framework implementation, which can be obtained at: + + * LICENSE: + * license/LICENSE.felix.txt (Apache License 2.0) + * HOMEPAGE: + * http://felix.apache.org/ diff --git a/plugins/ingestion-kinesis/licenses/netty-buffer-4.1.118.Final.jar.sha1 b/plugins/ingestion-kinesis/licenses/netty-buffer-4.1.118.Final.jar.sha1 new file mode 100644 index 0000000000000..556d707b9c29e --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/netty-buffer-4.1.118.Final.jar.sha1 @@ -0,0 +1 @@ +7022990af1e0d449f9d5322035899745e19735c5 \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/netty-codec-4.1.118.Final.jar.sha1 b/plugins/ingestion-kinesis/licenses/netty-codec-4.1.118.Final.jar.sha1 new file mode 100644 index 0000000000000..7964f25f0372a --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/netty-codec-4.1.118.Final.jar.sha1 @@ -0,0 +1 @@ +307f665c08ce57333121de4f460479fc0c3c94d4 \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/netty-codec-http-4.1.118.Final.jar.sha1 b/plugins/ingestion-kinesis/licenses/netty-codec-http-4.1.118.Final.jar.sha1 new file mode 100644 index 0000000000000..7cb43dd276c8a --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/netty-codec-http-4.1.118.Final.jar.sha1 @@ -0,0 +1 @@ +eda08a71294afe78c779b85fd696bc13491507a8 \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/netty-codec-http2-4.1.118.Final.jar.sha1 b/plugins/ingestion-kinesis/licenses/netty-codec-http2-4.1.118.Final.jar.sha1 new file mode 100644 index 0000000000000..fab58dee2dfbf --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/netty-codec-http2-4.1.118.Final.jar.sha1 @@ -0,0 +1 @@ +e3c35c0685ec9e84c4f84b79feea7c9d185a08d3 \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/netty-common-4.1.118.Final.jar.sha1 b/plugins/ingestion-kinesis/licenses/netty-common-4.1.118.Final.jar.sha1 new file mode 100644 index 0000000000000..632058bd06778 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/netty-common-4.1.118.Final.jar.sha1 @@ -0,0 +1 @@ +4bb0f9899146484fa89f7b9bc27389d5b8e2ecde \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/netty-handler-4.1.118.Final.jar.sha1 b/plugins/ingestion-kinesis/licenses/netty-handler-4.1.118.Final.jar.sha1 new file mode 100644 index 0000000000000..d6eea2494813e --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/netty-handler-4.1.118.Final.jar.sha1 @@ -0,0 +1 @@ +30ebb05b6b0fb071dbfcf713017c4a767a97bb9b \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/netty-nio-client-2.30.31.jar.sha1 b/plugins/ingestion-kinesis/licenses/netty-nio-client-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..f49d74cc59e37 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/netty-nio-client-2.30.31.jar.sha1 @@ -0,0 +1 @@ +a7226fc3811c7a071e44a33273e081f212e581e3 \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/netty-resolver-4.1.118.Final.jar.sha1 b/plugins/ingestion-kinesis/licenses/netty-resolver-4.1.118.Final.jar.sha1 new file mode 100644 index 0000000000000..19fbdbbb19b04 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/netty-resolver-4.1.118.Final.jar.sha1 @@ -0,0 +1 @@ +28c378c19c1779eca1104b400452627f3ebc4aea \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/netty-transport-4.1.118.Final.jar.sha1 b/plugins/ingestion-kinesis/licenses/netty-transport-4.1.118.Final.jar.sha1 new file mode 100644 index 0000000000000..f3b714539e61b --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/netty-transport-4.1.118.Final.jar.sha1 @@ -0,0 +1 @@ +5a27232e5d08218722d94ca14f0b1b4576e7711c \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/netty-transport-classes-epoll-4.1.118.Final.jar.sha1 b/plugins/ingestion-kinesis/licenses/netty-transport-classes-epoll-4.1.118.Final.jar.sha1 new file mode 100644 index 0000000000000..d53656cd3b7dc --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/netty-transport-classes-epoll-4.1.118.Final.jar.sha1 @@ -0,0 +1 @@ +376ce95507066f0e755d97c1c8bcd6c33f657617 \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/netty-transport-native-unix-common-4.1.118.Final.jar.sha1 b/plugins/ingestion-kinesis/licenses/netty-transport-native-unix-common-4.1.118.Final.jar.sha1 new file mode 100644 index 0000000000000..f1562364e2848 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/netty-transport-native-unix-common-4.1.118.Final.jar.sha1 @@ -0,0 +1 @@ +9da25a94e6a0edac90da0bc7894e5a54efcb866b \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/profiles-2.30.31.jar.sha1 b/plugins/ingestion-kinesis/licenses/profiles-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..6d4d2a1ac8d65 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/profiles-2.30.31.jar.sha1 @@ -0,0 +1 @@ +d6d2d5788695972140dfe8b012ea7ccd97b82eef \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/profiles-LICENSE.txt b/plugins/ingestion-kinesis/licenses/profiles-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/profiles-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/ingestion-kinesis/licenses/profiles-NOTICE.txt b/plugins/ingestion-kinesis/licenses/profiles-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/ingestion-kinesis/licenses/protocol-core-2.30.31.jar.sha1 b/plugins/ingestion-kinesis/licenses/protocol-core-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..caae2a4302976 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/protocol-core-2.30.31.jar.sha1 @@ -0,0 +1 @@ +ee17b25525aee497b6d520c8e499f39de7204fbc \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/protocol-core-LICENSE.txt b/plugins/ingestion-kinesis/licenses/protocol-core-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/protocol-core-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/ingestion-kinesis/licenses/protocol-core-NOTICE.txt b/plugins/ingestion-kinesis/licenses/protocol-core-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/ingestion-kinesis/licenses/regions-2.30.31.jar.sha1 b/plugins/ingestion-kinesis/licenses/regions-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..8e9876686a144 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/regions-2.30.31.jar.sha1 @@ -0,0 +1 @@ +7ce1df66496dcf9b124edb78ab9675e1e7d5c427 \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/regions-LICENSE.txt b/plugins/ingestion-kinesis/licenses/regions-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/regions-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/ingestion-kinesis/licenses/regions-NOTICE.txt b/plugins/ingestion-kinesis/licenses/regions-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/ingestion-kinesis/licenses/retries-2.30.31.jar.sha1 b/plugins/ingestion-kinesis/licenses/retries-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..98b46e3439ac7 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/retries-2.30.31.jar.sha1 @@ -0,0 +1 @@ +b490f67c9d3f000ae40928d9aa3c9debceac0966 \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/retries-LICENSE.txt b/plugins/ingestion-kinesis/licenses/retries-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/retries-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/ingestion-kinesis/licenses/retries-NOTICE.txt b/plugins/ingestion-kinesis/licenses/retries-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/ingestion-kinesis/licenses/retries-spi-2.30.31.jar.sha1 b/plugins/ingestion-kinesis/licenses/retries-spi-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..854e3d7e4aebf --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/retries-spi-2.30.31.jar.sha1 @@ -0,0 +1 @@ +4d9166189594243f88045fbf0c871a81e3914c0b \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/retries-spi-LICENSE.txt b/plugins/ingestion-kinesis/licenses/retries-spi-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/retries-spi-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/ingestion-kinesis/licenses/retries-spi-NOTICE.txt b/plugins/ingestion-kinesis/licenses/retries-spi-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/ingestion-kinesis/licenses/sdk-core-2.30.31.jar.sha1 b/plugins/ingestion-kinesis/licenses/sdk-core-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..ee3d7e3bff68d --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/sdk-core-2.30.31.jar.sha1 @@ -0,0 +1 @@ +b95c07d4796105c2e61c4c6ab60e3189886b2787 \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/sdk-core-LICENSE.txt b/plugins/ingestion-kinesis/licenses/sdk-core-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/sdk-core-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/ingestion-kinesis/licenses/sdk-core-NOTICE.txt b/plugins/ingestion-kinesis/licenses/sdk-core-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/ingestion-kinesis/licenses/slf4j-api-1.7.36.jar.sha1 b/plugins/ingestion-kinesis/licenses/slf4j-api-1.7.36.jar.sha1 new file mode 100644 index 0000000000000..77b9917528382 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/slf4j-api-1.7.36.jar.sha1 @@ -0,0 +1 @@ +6c62681a2f655b49963a5983b8b0950a6120ae14 \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/slf4j-api-LICENSE.txt b/plugins/ingestion-kinesis/licenses/slf4j-api-LICENSE.txt new file mode 100644 index 0000000000000..1a3d053237bec --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/slf4j-api-LICENSE.txt @@ -0,0 +1,24 @@ +Copyright (c) 2004-2022 QOS.ch Sarl (Switzerland) +All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + + diff --git a/plugins/ingestion-kinesis/licenses/slf4j-api-NOTICE.txt b/plugins/ingestion-kinesis/licenses/slf4j-api-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/ingestion-kinesis/licenses/sts-2.30.31.jar.sha1 b/plugins/ingestion-kinesis/licenses/sts-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..3752d0003bc8d --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/sts-2.30.31.jar.sha1 @@ -0,0 +1 @@ +fb85a774f8e7265ed4bc4255e6df8a80ee8cf4b9 \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/sts-LICENSE.txt b/plugins/ingestion-kinesis/licenses/sts-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/sts-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/ingestion-kinesis/licenses/sts-NOTICE.txt b/plugins/ingestion-kinesis/licenses/sts-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/ingestion-kinesis/licenses/third-party-jackson-core-2.30.31.jar.sha1 b/plugins/ingestion-kinesis/licenses/third-party-jackson-core-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..a07a8eda62447 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/third-party-jackson-core-2.30.31.jar.sha1 @@ -0,0 +1 @@ +100d8022939bd59cd7d2461bd4fb0fd9fa028499 \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/third-party-jackson-core-LICENSE.txt b/plugins/ingestion-kinesis/licenses/third-party-jackson-core-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/third-party-jackson-core-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/ingestion-kinesis/licenses/third-party-jackson-core-NOTICE.txt b/plugins/ingestion-kinesis/licenses/third-party-jackson-core-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/third-party-jackson-core-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/ingestion-kinesis/licenses/third-party-jackson-dataformat-cbor-2.30.31.jar.sha1 b/plugins/ingestion-kinesis/licenses/third-party-jackson-dataformat-cbor-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..ebefbe4530486 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/third-party-jackson-dataformat-cbor-2.30.31.jar.sha1 @@ -0,0 +1 @@ +868582af36ae946a1b005a228094cea55a74dfcd \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/third-party-jackson-dataformat-cbor-LICENSE.txt b/plugins/ingestion-kinesis/licenses/third-party-jackson-dataformat-cbor-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/third-party-jackson-dataformat-cbor-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/ingestion-kinesis/licenses/third-party-jackson-dataformat-cbor-NOTICE.txt b/plugins/ingestion-kinesis/licenses/third-party-jackson-dataformat-cbor-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/third-party-jackson-dataformat-cbor-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/ingestion-kinesis/licenses/utils-2.30.31.jar.sha1 b/plugins/ingestion-kinesis/licenses/utils-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..184ff1cc5f9ce --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/utils-2.30.31.jar.sha1 @@ -0,0 +1 @@ +3340adacb87ff28f90a039d57c81311b296db89e \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/utils-LICENSE.txt b/plugins/ingestion-kinesis/licenses/utils-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/utils-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/ingestion-kinesis/licenses/utils-NOTICE.txt b/plugins/ingestion-kinesis/licenses/utils-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/ingestion-kinesis/src/internalClusterTest/java/org/opensearch/plugin/kinesis/IngestFromKinesisIT.java b/plugins/ingestion-kinesis/src/internalClusterTest/java/org/opensearch/plugin/kinesis/IngestFromKinesisIT.java new file mode 100644 index 0000000000000..44b9be94eec17 --- /dev/null +++ b/plugins/ingestion-kinesis/src/internalClusterTest/java/org/opensearch/plugin/kinesis/IngestFromKinesisIT.java @@ -0,0 +1,128 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.kinesis; + +import org.opensearch.action.admin.cluster.node.info.NodeInfo; +import org.opensearch.action.admin.cluster.node.info.NodesInfoRequest; +import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.opensearch.action.admin.cluster.node.info.PluginsAndModules; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.query.RangeQueryBuilder; +import org.opensearch.indices.pollingingest.PollingIngestStats; +import org.opensearch.plugins.PluginInfo; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.Assert; + +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import org.testcontainers.containers.localstack.LocalStackContainer; + +import static org.hamcrest.Matchers.is; +import static org.awaitility.Awaitility.await; + +/** + * Integration test for Kinesis ingestion + */ +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class IngestFromKinesisIT extends KinesisIngestionBaseIT { + /** + * test ingestion-kinesis-plugin is installed + */ + public void testPluginsAreInstalled() { + NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(); + nodesInfoRequest.addMetric(NodesInfoRequest.Metric.PLUGINS.metricName()); + NodesInfoResponse nodesInfoResponse = OpenSearchIntegTestCase.client().admin().cluster().nodesInfo(nodesInfoRequest).actionGet(); + List pluginInfos = nodesInfoResponse.getNodes() + .stream() + .flatMap( + (Function>) nodeInfo -> nodeInfo.getInfo(PluginsAndModules.class).getPluginInfos().stream() + ) + .collect(Collectors.toList()); + Assert.assertTrue( + pluginInfos.stream().anyMatch(pluginInfo -> pluginInfo.getName().equals("org.opensearch.plugin.kinesis.KinesisPlugin")) + ); + } + + public void testKinesisIngestion() { + produceData("1", "name1", "24"); + produceData("2", "name2", "20"); + + createIndex( + "test", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("ingestion_source.type", "kinesis") + .put("ingestion_source.pointer.init.reset", "earliest") + .put("ingestion_source.param.stream", "test") + .put("ingestion_source.param.region", localstack.getRegion()) + .put("ingestion_source.param.access_key", localstack.getAccessKey()) + .put("ingestion_source.param.secret_key", localstack.getSecretKey()) + .put( + "ingestion_source.param.endpoint_override", + localstack.getEndpointOverride(LocalStackContainer.Service.KINESIS).toString() + ) + .put("index.replication.type", "SEGMENT") + .build(), + "{\"properties\":{\"name\":{\"type\": \"text\"},\"age\":{\"type\": \"integer\"}}}}" + ); + + RangeQueryBuilder query = new RangeQueryBuilder("age").gte(21); + await().atMost(10, TimeUnit.SECONDS).untilAsserted(() -> { + refresh("test"); + SearchResponse response = client().prepareSearch("test").setQuery(query).get(); + assertThat(response.getHits().getTotalHits().value(), is(1L)); + PollingIngestStats stats = client().admin().indices().prepareStats("test").get().getIndex("test").getShards()[0] + .getPollingIngestStats(); + assertNotNull(stats); + assertThat(stats.getMessageProcessorStats().getTotalProcessedCount(), is(2L)); + assertThat(stats.getConsumerStats().getTotalPolledCount(), is(2L)); + }); + } + + public void testKinesisIngestion_RewindByOffset() throws InterruptedException { + produceData("1", "name1", "24"); + String sequenceNumber = produceData("2", "name2", "20"); + Thread.sleep(1000); + + // create an index with ingestion source from kinesis + createIndex( + "test_rewind_by_offset", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("ingestion_source.type", "kinesis") + .put("ingestion_source.pointer.init.reset", "rewind_by_offset") + .put("ingestion_source.pointer.init.reset.value", sequenceNumber) + .put("ingestion_source.param.stream", "test") + .put("ingestion_source.param.region", localstack.getRegion()) + .put("ingestion_source.param.access_key", localstack.getAccessKey()) + .put("ingestion_source.param.secret_key", localstack.getSecretKey()) + .put( + "ingestion_source.param.endpoint_override", + localstack.getEndpointOverride(LocalStackContainer.Service.KINESIS).toString() + ) + .build(), + "{\"properties\":{\"name\":{\"type\": \"text\"},\"age\":{\"type\": \"integer\"}}}}" + ); + + RangeQueryBuilder query = new RangeQueryBuilder("age").gte(0); + await().atMost(10, TimeUnit.SECONDS).untilAsserted(() -> { + refresh("test_rewind_by_offset"); + SearchResponse response = client().prepareSearch("test_rewind_by_offset").setQuery(query).get(); + assertThat(response.getHits().getTotalHits().value(), is(1L)); + }); + } +} diff --git a/plugins/ingestion-kinesis/src/internalClusterTest/java/org/opensearch/plugin/kinesis/KinesisIngestionBaseIT.java b/plugins/ingestion-kinesis/src/internalClusterTest/java/org/opensearch/plugin/kinesis/KinesisIngestionBaseIT.java new file mode 100644 index 0000000000000..1d2b9cc00b867 --- /dev/null +++ b/plugins/ingestion-kinesis/src/internalClusterTest/java/org/opensearch/plugin/kinesis/KinesisIngestionBaseIT.java @@ -0,0 +1,129 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.kinesis; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.kinesis.KinesisClient; +import software.amazon.awssdk.services.kinesis.model.CreateStreamRequest; +import software.amazon.awssdk.services.kinesis.model.PutRecordRequest; +import software.amazon.awssdk.services.kinesis.model.PutRecordResponse; + +import org.opensearch.action.search.SearchResponse; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.After; +import org.junit.Before; + +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Locale; +import java.util.concurrent.TimeUnit; + +import org.testcontainers.containers.localstack.LocalStackContainer; +import org.testcontainers.utility.DockerImageName; + +/** + * Base test class for kinesis ingestion tests + */ +@ThreadLeakFilters(filters = TestContainerThreadLeakFilter.class) +public class KinesisIngestionBaseIT extends OpenSearchIntegTestCase { + static final String streamName = "test"; + static final String indexName = "testindex"; + static final String mapping = "{\"properties\":{\"name\":{\"type\": \"text\"},\"age\":{\"type\": \"integer\"}}}}"; + static final long defaultMessageTimestamp = 1739459500000L; + + protected LocalStackContainer localstack; + protected KinesisClient kinesisClient; + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(KinesisPlugin.class); + } + + @Before + protected void setup() throws InterruptedException { + setupKinesis(); + } + + @After + protected void cleanup() { + stopKinesis(); + } + + private void setupKinesis() throws InterruptedException { + localstack = new LocalStackContainer(DockerImageName.parse("localstack/localstack:latest")).withServices( + LocalStackContainer.Service.KINESIS + ); + localstack.start(); + + // Initialize AWS Kinesis Client with LocalStack endpoint + kinesisClient = KinesisClient.builder() + .endpointOverride(localstack.getEndpointOverride(LocalStackContainer.Service.KINESIS)) + .region(Region.of(localstack.getRegion())) + .credentialsProvider( + StaticCredentialsProvider.create(AwsBasicCredentials.create(localstack.getAccessKey(), localstack.getSecretKey())) + ) + .build(); + + // Create a stream + kinesisClient.createStream(CreateStreamRequest.builder().streamName(streamName).shardCount(1).build()); + + // sleep for a while to allow the stream to be created + Thread.sleep(500); + } + + private void stopKinesis() { + if (kinesisClient != null) { + kinesisClient.close(); + } + + if (localstack != null) { + localstack.stop(); + localstack = null; + } + } + + protected String produceData(String id, String name, String age) { + return produceData(id, name, age, defaultMessageTimestamp); + } + + protected String produceData(String id, String name, String age, long timestamp) { + String payload = String.format( + Locale.ROOT, + "{\"_id\":\"%s\", \"_op_type\":\"index\",\"_source\":{\"name\":\"%s\", \"age\": %s}}", + id, + name, + age + ); + + PutRecordResponse response = kinesisClient.putRecord( + PutRecordRequest.builder().streamName(streamName).data(SdkBytes.fromUtf8String(payload)).partitionKey(id).build() + ); + + return response.sequenceNumber(); + } + + protected void waitForSearchableDocs(long docCount, List nodes) throws Exception { + assertBusy(() -> { + for (String node : nodes) { + final SearchResponse response = client(node).prepareSearch(indexName).setSize(0).setPreference("_only_local").get(); + final long hits = response.getHits().getTotalHits().value(); + if (hits < docCount) { + fail("Expected search hits on node: " + node + " to be at least " + docCount + " but was: " + hits); + } + } + }, 1, TimeUnit.MINUTES); + } +} diff --git a/plugins/ingestion-kinesis/src/internalClusterTest/java/org/opensearch/plugin/kinesis/TestContainerThreadLeakFilter.java b/plugins/ingestion-kinesis/src/internalClusterTest/java/org/opensearch/plugin/kinesis/TestContainerThreadLeakFilter.java new file mode 100644 index 0000000000000..38b9773f4864b --- /dev/null +++ b/plugins/ingestion-kinesis/src/internalClusterTest/java/org/opensearch/plugin/kinesis/TestContainerThreadLeakFilter.java @@ -0,0 +1,26 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.kinesis; + +import com.carrotsearch.randomizedtesting.ThreadFilter; + +/** + * The {@link org.testcontainers.images.TimeLimitedLoggedPullImageResultCallback} instance used by test containers, + * creates a watcher daemon thread which is never + * stopped. This filter excludes that thread from the thread leak detection logic. It also excludes ryuk resource reaper + * thread aws IdleConnectionReaper thread, which are not closed on time . + */ +public final class TestContainerThreadLeakFilter implements ThreadFilter { + @Override + public boolean reject(Thread t) { + return t.getName().startsWith("testcontainers-pull-watchdog-") + || t.getName().startsWith("testcontainers-ryuk") + || t.getName().startsWith("idle-connection-reaper"); + } +} diff --git a/plugins/ingestion-kinesis/src/main/java/org/opensearch/plugin/kinesis/KinesisConsumerFactory.java b/plugins/ingestion-kinesis/src/main/java/org/opensearch/plugin/kinesis/KinesisConsumerFactory.java new file mode 100644 index 0000000000000..a637e53b2a86e --- /dev/null +++ b/plugins/ingestion-kinesis/src/main/java/org/opensearch/plugin/kinesis/KinesisConsumerFactory.java @@ -0,0 +1,45 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.kinesis; + +import org.opensearch.index.IngestionConsumerFactory; + +import java.util.Map; + +/** + * Factory for creating Kinesis consumers + */ +public class KinesisConsumerFactory implements IngestionConsumerFactory { + + /** + * Configuration for the Kinesis source + */ + protected KinesisSourceConfig config; + + /** + * Constructor. + */ + public KinesisConsumerFactory() {} + + @Override + public void initialize(Map params) { + config = new KinesisSourceConfig(params); + } + + @Override + public KinesisShardConsumer createShardConsumer(String clientId, int shardId) { + assert config != null; + return new KinesisShardConsumer(clientId, config, shardId); + } + + @Override + public SequenceNumber parsePointerFromString(String pointer) { + return new SequenceNumber(pointer); + } +} diff --git a/plugins/ingestion-kinesis/src/main/java/org/opensearch/plugin/kinesis/KinesisMessage.java b/plugins/ingestion-kinesis/src/main/java/org/opensearch/plugin/kinesis/KinesisMessage.java new file mode 100644 index 0000000000000..f08eab4f5920b --- /dev/null +++ b/plugins/ingestion-kinesis/src/main/java/org/opensearch/plugin/kinesis/KinesisMessage.java @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.kinesis; + +import org.opensearch.index.Message; + +/** + * Kinesis message + */ +public class KinesisMessage implements Message { + private final byte[] payload; + + /** + * Constructor + * @param payload the payload of the message + */ + public KinesisMessage(byte[] payload) { + this.payload = payload; + } + + @Override + public byte[] getPayload() { + return payload; + } +} diff --git a/plugins/ingestion-kinesis/src/main/java/org/opensearch/plugin/kinesis/KinesisPlugin.java b/plugins/ingestion-kinesis/src/main/java/org/opensearch/plugin/kinesis/KinesisPlugin.java new file mode 100644 index 0000000000000..3f4937886cc98 --- /dev/null +++ b/plugins/ingestion-kinesis/src/main/java/org/opensearch/plugin/kinesis/KinesisPlugin.java @@ -0,0 +1,41 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.kinesis; + +import org.opensearch.index.IngestionConsumerFactory; +import org.opensearch.plugins.IngestionConsumerPlugin; +import org.opensearch.plugins.Plugin; + +import java.util.Map; + +/** + * A plugin for ingestion source of Kinesis. + */ +public class KinesisPlugin extends Plugin implements IngestionConsumerPlugin { + /** + * The type of the ingestion source. + */ + public static final String TYPE = "KINESIS"; + + /** + * Constructor. + */ + public KinesisPlugin() {} + + @SuppressWarnings("rawtypes") + @Override + public Map getIngestionConsumerFactories() { + return Map.of(TYPE, new KinesisConsumerFactory()); + } + + @Override + public String getType() { + return TYPE; + } +} diff --git a/plugins/ingestion-kinesis/src/main/java/org/opensearch/plugin/kinesis/KinesisShardConsumer.java b/plugins/ingestion-kinesis/src/main/java/org/opensearch/plugin/kinesis/KinesisShardConsumer.java new file mode 100644 index 0000000000000..aa745eb057d74 --- /dev/null +++ b/plugins/ingestion-kinesis/src/main/java/org/opensearch/plugin/kinesis/KinesisShardConsumer.java @@ -0,0 +1,263 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.kinesis; + +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.kinesis.KinesisClient; +import software.amazon.awssdk.services.kinesis.KinesisClientBuilder; +import software.amazon.awssdk.services.kinesis.model.DescribeStreamRequest; +import software.amazon.awssdk.services.kinesis.model.DescribeStreamResponse; +import software.amazon.awssdk.services.kinesis.model.GetRecordsRequest; +import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; +import software.amazon.awssdk.services.kinesis.model.GetShardIteratorRequest; +import software.amazon.awssdk.services.kinesis.model.GetShardIteratorResponse; +import software.amazon.awssdk.services.kinesis.model.Record; +import software.amazon.awssdk.services.kinesis.model.ShardIteratorType; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.index.IngestionShardConsumer; +import org.opensearch.index.IngestionShardPointer; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.time.Instant; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeoutException; + +import static org.opensearch.plugin.kinesis.SequenceNumber.NON_EXISTING_SEQUENCE_NUMBER; +import static software.amazon.awssdk.auth.credentials.AwsBasicCredentials.create; + +/** + * Kinesis consumer to read messages from a Kinesis shard + */ +@SuppressWarnings("removal") +public class KinesisShardConsumer implements IngestionShardConsumer { + private static final Logger logger = LogManager.getLogger(KinesisShardConsumer.class); + + /** + * The Kinesis consumer + */ + ; + private KinesisClient kinesisClient; + private String lastShardIterator; + final String clientId; + final String kinesisShardId; + final int shardId; + final KinesisSourceConfig config; + + /** + * Constructor + * @param clientId the client id + * @param config the kinesis source config + * @param shardId the shard id + */ + public KinesisShardConsumer(String clientId, KinesisSourceConfig config, int shardId) { + this(clientId, config, shardId, createClient(clientId, config)); + } + + /** + * Constructor, visible for testing + * @param clientId the client id + * @param config the Kinesis source config + * @param shardId the shard id + * @param kinesisClient the created kinesis client + */ + protected KinesisShardConsumer(String clientId, KinesisSourceConfig config, int shardId, KinesisClient kinesisClient) { + this.clientId = clientId; + this.kinesisClient = kinesisClient; + this.shardId = shardId; + this.config = config; + + // Get shard iterator + DescribeStreamResponse describeStreamResponse = kinesisClient.describeStream( + DescribeStreamRequest.builder().streamName(config.getStream()).build() + ); + + if (shardId >= describeStreamResponse.streamDescription().shards().size()) { + throw new IllegalArgumentException("Shard id " + shardId + " does not exist in stream " + config.getStream()); + } + + String kinesisShardId = describeStreamResponse.streamDescription().shards().get(shardId).shardId(); + this.kinesisShardId = kinesisShardId; + logger.info("kinesis consumer created for stream {} shard {}", config.getStream(), shardId); + } + + /** + * Create a Kinesis consumer. visible for testing + * @param clientId the client id + * @param config the Kinesis source config + * @return the Kinesis consumer + */ + protected static KinesisClient createClient(String clientId, KinesisSourceConfig config) { + + KinesisClientBuilder kinesisClientBuilder = KinesisClient.builder() + .region(Region.of(config.getRegion())) + // TODO: better security config + .credentialsProvider(StaticCredentialsProvider.create(create(config.getAccessKey(), config.getSecretKey()))); + + if (config.getEndpointOverride() != null && !config.getEndpointOverride().isEmpty()) { + try { + kinesisClientBuilder = kinesisClientBuilder.endpointOverride(new URI(config.getEndpointOverride())); + } catch (URISyntaxException e) { + throw new RuntimeException("Invalid endpoint override: " + config.getEndpointOverride(), e); + } + } + + return kinesisClientBuilder.build(); + } + + @Override + public List> readNext( + SequenceNumber sequenceNumber, + boolean includeStart, + long maxMessages, + int timeoutMillis + ) throws TimeoutException { + List> records = fetch( + null, + sequenceNumber.getSequenceNumber(), + includeStart, + maxMessages, + timeoutMillis + ); + return records; + } + + @Override + public List> readNext(long maxMessages, int timeoutMillis) throws TimeoutException { + if (lastShardIterator == null) { + throw new IllegalStateException("No shard iterator available"); + } + return fetch(lastShardIterator, null, false, maxMessages, timeoutMillis); + } + + @Override + public IngestionShardPointer earliestPointer() { + return getSequenceNumber(ShardIteratorType.TRIM_HORIZON, null, 0); + } + + @Override + public IngestionShardPointer latestPointer() { + return getSequenceNumber(ShardIteratorType.LATEST, null, 0); + } + + private List fetchRecords( + String shardIterator, + ShardIteratorType shardIteratorType, + String startingSequenceNumber, + long timestampMillis, + int limit + ) { + String shardIteratorToUse = shardIterator; + + if (shardIterator == null) { + // fetch the shard iterator + GetShardIteratorRequest.Builder builder = GetShardIteratorRequest.builder() + .streamName(config.getStream()) + .shardId(kinesisShardId) + .shardIteratorType(shardIteratorType); + + if (startingSequenceNumber != null) { + builder = builder.startingSequenceNumber(startingSequenceNumber); + } + + if (timestampMillis != 0) { + builder = builder.timestamp(Instant.ofEpochMilli(timestampMillis)); + } + + GetShardIteratorRequest shardIteratorRequest = builder.build(); + + GetShardIteratorResponse shardIteratorResponse = kinesisClient.getShardIterator(shardIteratorRequest); + shardIteratorToUse = shardIteratorResponse.shardIterator(); + } + + if (shardIteratorToUse == null) { + return new ArrayList<>(); + } + + // Fetch the next records + GetRecordsRequest recordsRequest = GetRecordsRequest.builder().shardIterator(shardIteratorToUse).limit(limit).build(); + GetRecordsResponse recordsResponse = kinesisClient.getRecords(recordsRequest); + lastShardIterator = recordsResponse.nextShardIterator(); + List records = recordsResponse.records(); + return records; + } + + private SequenceNumber getSequenceNumber(ShardIteratorType shardIteratorType, String startingSequenceNumber, long timestampMillis) { + List records = fetchRecords(null, shardIteratorType, startingSequenceNumber, timestampMillis, 1); + + if (!records.isEmpty()) { + Record nextRecord = records.get(0); + return new SequenceNumber(nextRecord.sequenceNumber()); + } else { + return NON_EXISTING_SEQUENCE_NUMBER; + } + } + + @Override + public IngestionShardPointer pointerFromTimestampMillis(long timestampMillis) { + // TODO: support auto config + return getSequenceNumber(ShardIteratorType.AT_TIMESTAMP, null, timestampMillis); + } + + @Override + public IngestionShardPointer pointerFromOffset(String offset) { + return new SequenceNumber(offset); + } + + private synchronized List> fetch( + String shardIterator, + String sequenceNumber, + boolean includeStart, + long maxMessages, + int timeoutMillis + ) { + + // Prepare the get records request with the shardIterator + long limit = Math.min(maxMessages, 10000); // kinesis supports 10000 as upper limit + + ShardIteratorType iteratorType = includeStart ? ShardIteratorType.AT_SEQUENCE_NUMBER : ShardIteratorType.AFTER_SEQUENCE_NUMBER; + + List records = fetchRecords(shardIterator, iteratorType, sequenceNumber, 0, (int) limit); + + List> results = new ArrayList<>(); + + for (Record record : records) { + SequenceNumber sequenceNumber1 = new SequenceNumber(record.sequenceNumber()); + KinesisMessage message = new KinesisMessage(record.data().asByteArray()); + results.add(new ReadResult<>(sequenceNumber1, message)); + } + + return results; + } + + @Override + public int getShardId() { + return shardId; + } + + @Override + public void close() throws IOException { + if (kinesisClient != null) { + kinesisClient.close(); + } + } + + /** + * Get the client id + * @return the client id + */ + public String getClientId() { + return clientId; + } +} diff --git a/plugins/ingestion-kinesis/src/main/java/org/opensearch/plugin/kinesis/KinesisSourceConfig.java b/plugins/ingestion-kinesis/src/main/java/org/opensearch/plugin/kinesis/KinesisSourceConfig.java new file mode 100644 index 0000000000000..5c5bcc8bb3e46 --- /dev/null +++ b/plugins/ingestion-kinesis/src/main/java/org/opensearch/plugin/kinesis/KinesisSourceConfig.java @@ -0,0 +1,83 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.kinesis; + +import org.opensearch.core.util.ConfigurationUtils; + +import java.util.Map; + +/** + * Class encapsulating the configuration of a Kafka source. + */ +public class KinesisSourceConfig { + private final String PROP_REGION = "region"; + private final String PROP_STREAM = "stream"; + private final String PROP_ACCESS_KEY = "access_key"; + private final String PROP_SECRET_KEY = "secret_key"; + private final String PROP_ENDPOINT_OVERRIDE = "endpoint_override"; + + private final String region; + private final String stream; + private final String accessKey; + private final String secretKey; + private final String endpointOverride; + + /** + * Constructor + * @param params the configuration parameters + */ + public KinesisSourceConfig(Map params) { + this.region = ConfigurationUtils.readStringProperty(params, PROP_REGION); + this.stream = ConfigurationUtils.readStringProperty(params, PROP_STREAM); + this.accessKey = ConfigurationUtils.readStringProperty(params, PROP_ACCESS_KEY); + this.secretKey = ConfigurationUtils.readStringProperty(params, PROP_SECRET_KEY); + this.endpointOverride = ConfigurationUtils.readStringProperty(params, PROP_ENDPOINT_OVERRIDE, ""); + } + + /** + * Get the stream name + * @return the topic name + */ + public String getStream() { + return stream; + } + + /** + * Get the region + * @return the region + */ + public String getRegion() { + return region; + } + + /** + * Get the access key + * @return the access key + */ + public String getAccessKey() { + return accessKey; + } + + /** + * Get the secret key + * @return the secret key + */ + public String getSecretKey() { + return secretKey; + } + + /** + * Get the endpoint override + * @return the endpoint override + */ + public String getEndpointOverride() { + return endpointOverride; + } + +} diff --git a/plugins/ingestion-kinesis/src/main/java/org/opensearch/plugin/kinesis/SequenceNumber.java b/plugins/ingestion-kinesis/src/main/java/org/opensearch/plugin/kinesis/SequenceNumber.java new file mode 100644 index 0000000000000..9a379627ac589 --- /dev/null +++ b/plugins/ingestion-kinesis/src/main/java/org/opensearch/plugin/kinesis/SequenceNumber.java @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.kinesis; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.KeywordField; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermRangeQuery; +import org.opensearch.index.IngestionShardPointer; + +import java.nio.charset.StandardCharsets; + +/** + * Kinesis sequence number. + */ +public class SequenceNumber implements IngestionShardPointer { + + private final String sequenceNumber; + /** constant denoting non-existing sequence number */ + public static final SequenceNumber NON_EXISTING_SEQUENCE_NUMBER = new SequenceNumber("non-existing-sequence-number"); + + /** + * Constructor + * + * @param sequenceNumber the sequence number + */ + public SequenceNumber(String sequenceNumber) { + this.sequenceNumber = sequenceNumber; + } + + /** + * Get the sequence number + * + * @return the sequence number + */ + public String getSequenceNumber() { + return sequenceNumber; + } + + @Override + public byte[] serialize() { + return sequenceNumber.getBytes(StandardCharsets.UTF_8); + } + + @Override + public String asString() { + return sequenceNumber; + } + + @Override + public Field asPointField(String fieldName) { + return new KeywordField(fieldName, sequenceNumber, KeywordField.Store.YES); + } + + @Override + public Query newRangeQueryGreaterThan(String fieldName) { + return TermRangeQuery.newStringRange(fieldName, sequenceNumber, null, false, true); + } + + @Override + public String toString() { + return "SequenceNumber{" + "sequenceNumber=" + sequenceNumber + '}'; + } + + @Override + public int compareTo(IngestionShardPointer o) { + if (o == null) { + throw new IllegalArgumentException("the pointer is null"); + } + if (!(o instanceof SequenceNumber)) { + throw new IllegalArgumentException("the pointer is of type " + o.getClass() + " and not SequenceNumber"); + } + SequenceNumber other = (SequenceNumber) o; + return sequenceNumber.compareTo(other.sequenceNumber); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + SequenceNumber that = (SequenceNumber) o; + return sequenceNumber == that.sequenceNumber; + } + + @Override + public int hashCode() { + return sequenceNumber.hashCode(); + } +} diff --git a/plugins/ingestion-kinesis/src/main/java/org/opensearch/plugin/kinesis/package-info.java b/plugins/ingestion-kinesis/src/main/java/org/opensearch/plugin/kinesis/package-info.java new file mode 100644 index 0000000000000..7842a34a8ff9f --- /dev/null +++ b/plugins/ingestion-kinesis/src/main/java/org/opensearch/plugin/kinesis/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** package for kinesis plugin */ +package org.opensearch.plugin.kinesis; diff --git a/plugins/ingestion-kinesis/src/main/plugin-metadata/plugin-security.policy b/plugins/ingestion-kinesis/src/main/plugin-metadata/plugin-security.policy new file mode 100644 index 0000000000000..87efe79dc46c3 --- /dev/null +++ b/plugins/ingestion-kinesis/src/main/plugin-metadata/plugin-security.policy @@ -0,0 +1,13 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +grant { + // Allow host/ip name service lookups + permission java.net.SocketPermission "*", "connect"; + permission java.net.SocketPermission "*", "resolve"; +}; diff --git a/plugins/ingestion-kinesis/src/test/java/org/opensearch/plugin/kinesis/KinesisConsumerFactoryTests.java b/plugins/ingestion-kinesis/src/test/java/org/opensearch/plugin/kinesis/KinesisConsumerFactoryTests.java new file mode 100644 index 0000000000000..d7b51445e9afa --- /dev/null +++ b/plugins/ingestion-kinesis/src/test/java/org/opensearch/plugin/kinesis/KinesisConsumerFactoryTests.java @@ -0,0 +1,55 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.kinesis; + +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Assert; + +import java.util.HashMap; +import java.util.Map; + +public class KinesisConsumerFactoryTests extends OpenSearchTestCase { + public void testConstructorAndGetters() { + KinesisConsumerFactory factory = new KinesisConsumerFactory(); + Assert.assertNull("Config should be null before initialization", factory.config); + } + + public void testInitializeWithValidParams() { + KinesisConsumerFactory factory = new KinesisConsumerFactory(); + Map params = new HashMap<>(); + params.put("region", "us-west-2"); + params.put("stream", "testStream"); + params.put("secret_key", "testSecretKey"); + params.put("access_key", "testAccessKey"); + + factory.initialize(params); + + Assert.assertNotNull("Config should be initialized", factory.config); + Assert.assertEquals("Region should be correctly initialized", "us-west-2", factory.config.getRegion()); + Assert.assertEquals("Stream should be correctly initialized", "testStream", factory.config.getStream()); + } + + public void testInitializeWithNullParams() { + KinesisConsumerFactory factory = new KinesisConsumerFactory(); + try { + factory.initialize(null); + Assert.fail("Initialization should throw an exception when params is null"); + } catch (NullPointerException e) { + Assert.assertEquals("Cannot invoke \"java.util.Map.get(Object)\" because \"configuration\" is null", e.getMessage()); + } + } + + public void testParsePointerFromString() { + KinesisConsumerFactory factory = new KinesisConsumerFactory(); + SequenceNumber sequenceNumber = factory.parsePointerFromString("12345"); + + Assert.assertNotNull("Sequence number should be parsed", sequenceNumber); + Assert.assertEquals("Sequence number should be correctly parsed", "12345", sequenceNumber.getSequenceNumber()); + } +} diff --git a/plugins/ingestion-kinesis/src/test/java/org/opensearch/plugin/kinesis/KinesisMessageTests.java b/plugins/ingestion-kinesis/src/test/java/org/opensearch/plugin/kinesis/KinesisMessageTests.java new file mode 100644 index 0000000000000..c6f0cee3da922 --- /dev/null +++ b/plugins/ingestion-kinesis/src/test/java/org/opensearch/plugin/kinesis/KinesisMessageTests.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.kinesis; + +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Assert; + +public class KinesisMessageTests extends OpenSearchTestCase { + public void testConstructorAndGetters() { + byte[] payload = { 1, 2, 3 }; + KinesisMessage message = new KinesisMessage(payload); + + Assert.assertArrayEquals("Payload should be correctly initialized and returned", payload, message.getPayload()); + } + + public void testConstructorWithNullPayload() { + KinesisMessage message = new KinesisMessage(null); + + Assert.assertNull("Payload should be null", message.getPayload()); + } +} diff --git a/plugins/ingestion-kinesis/src/test/java/org/opensearch/plugin/kinesis/KinesisShardConsumerTests.java b/plugins/ingestion-kinesis/src/test/java/org/opensearch/plugin/kinesis/KinesisShardConsumerTests.java new file mode 100644 index 0000000000000..441583061491c --- /dev/null +++ b/plugins/ingestion-kinesis/src/test/java/org/opensearch/plugin/kinesis/KinesisShardConsumerTests.java @@ -0,0 +1,200 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.kinesis; + +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.services.kinesis.KinesisClient; +import software.amazon.awssdk.services.kinesis.model.DescribeStreamRequest; +import software.amazon.awssdk.services.kinesis.model.DescribeStreamResponse; +import software.amazon.awssdk.services.kinesis.model.GetRecordsRequest; +import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; +import software.amazon.awssdk.services.kinesis.model.GetShardIteratorRequest; +import software.amazon.awssdk.services.kinesis.model.GetShardIteratorResponse; +import software.amazon.awssdk.services.kinesis.model.Record; +import software.amazon.awssdk.services.kinesis.model.Shard; +import software.amazon.awssdk.services.kinesis.model.StreamDescription; + +import org.opensearch.index.IngestionShardConsumer; +import org.opensearch.index.IngestionShardPointer; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Assert; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeoutException; + +import org.mockito.Mockito; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class KinesisShardConsumerTests extends OpenSearchTestCase { + private KinesisClient mockKinesisClient; + private KinesisSourceConfig config; + + @Before + public void setUp() throws Exception { + super.setUp(); + mockKinesisClient = Mockito.mock(KinesisClient.class); + Map params = new HashMap<>(); + params.put("region", "us-west-2"); + params.put("stream", "testStream"); + params.put("access_key", "testAccessKey"); + params.put("secret_key", "testSecretKey"); + params.put("endpoint_override", "testEndpoint"); + config = new KinesisSourceConfig(params); + } + + public void testConstructorAndGetters() { + DescribeStreamResponse describeStreamResponse = DescribeStreamResponse.builder() + .streamDescription(StreamDescription.builder().shards(Shard.builder().shardId("shardId-0").build()).build()) + .build(); + when(mockKinesisClient.describeStream(any(DescribeStreamRequest.class))).thenReturn(describeStreamResponse); + + KinesisShardConsumer consumer = new KinesisShardConsumer("clientId", config, 0, mockKinesisClient); + + Assert.assertEquals("clientId", consumer.getClientId()); + Assert.assertEquals(0, consumer.getShardId()); + } + + public void testConstructorWithInvalidShardId() { + DescribeStreamResponse describeStreamResponse = DescribeStreamResponse.builder() + .streamDescription( + StreamDescription.builder() + .shards(Collections.emptyList()) // No shards in the stream + .build() + ) + .build(); + when(mockKinesisClient.describeStream(any(DescribeStreamRequest.class))).thenReturn(describeStreamResponse); + + try { + new KinesisShardConsumer("clientId", config, 0, mockKinesisClient); + fail("Expected IllegalArgumentException"); + } catch (IllegalArgumentException e) { + Assert.assertEquals("Shard id 0 does not exist in stream testStream", e.getMessage()); + } + } + + public void testReadNext() throws TimeoutException { + DescribeStreamResponse describeStreamResponse = DescribeStreamResponse.builder() + .streamDescription(StreamDescription.builder().shards(Shard.builder().shardId("shardId-0").build()).build()) + .build(); + when(mockKinesisClient.describeStream(any(DescribeStreamRequest.class))).thenReturn(describeStreamResponse); + + GetShardIteratorResponse getShardIteratorResponse = GetShardIteratorResponse.builder().shardIterator("shardIterator").build(); + when(mockKinesisClient.getShardIterator(any(GetShardIteratorRequest.class))).thenReturn(getShardIteratorResponse); + + GetRecordsResponse getRecordsResponse = GetRecordsResponse.builder() + .records(Record.builder().sequenceNumber("12345").data(SdkBytes.fromByteArray(new byte[] { 1, 2, 3 })).build()) + .build(); + when(mockKinesisClient.getRecords(any(GetRecordsRequest.class))).thenReturn(getRecordsResponse); + + KinesisShardConsumer consumer = new KinesisShardConsumer("clientId", config, 0, mockKinesisClient); + List> results = consumer.readNext( + new SequenceNumber("12345"), + true, + 10, + 1000 + ); + + Assert.assertEquals(1, results.size()); + Assert.assertEquals("12345", results.get(0).getPointer().getSequenceNumber()); + } + + public void testEarliestPointer() { + DescribeStreamResponse describeStreamResponse = DescribeStreamResponse.builder() + .streamDescription(StreamDescription.builder().shards(Shard.builder().shardId("shardId-0").build()).build()) + .build(); + when(mockKinesisClient.describeStream(any(DescribeStreamRequest.class))).thenReturn(describeStreamResponse); + + GetShardIteratorResponse getShardIteratorResponse = GetShardIteratorResponse.builder().shardIterator("shardIterator").build(); + when(mockKinesisClient.getShardIterator(any(GetShardIteratorRequest.class))).thenReturn(getShardIteratorResponse); + + GetRecordsResponse getRecordsResponse = GetRecordsResponse.builder() + .records(Record.builder().sequenceNumber("12345").build()) + .build(); + when(mockKinesisClient.getRecords(any(GetRecordsRequest.class))).thenReturn(getRecordsResponse); + + KinesisShardConsumer consumer = new KinesisShardConsumer("clientId", config, 0, mockKinesisClient); + IngestionShardPointer pointer = consumer.earliestPointer(); + + Assert.assertEquals("12345", ((SequenceNumber) pointer).getSequenceNumber()); + } + + public void testLatestPointer() { + DescribeStreamResponse describeStreamResponse = DescribeStreamResponse.builder() + .streamDescription(StreamDescription.builder().shards(Shard.builder().shardId("shardId-0").build()).build()) + .build(); + when(mockKinesisClient.describeStream(any(DescribeStreamRequest.class))).thenReturn(describeStreamResponse); + + GetShardIteratorResponse getShardIteratorResponse = GetShardIteratorResponse.builder().shardIterator("shardIterator").build(); + when(mockKinesisClient.getShardIterator(any(GetShardIteratorRequest.class))).thenReturn(getShardIteratorResponse); + + GetRecordsResponse getRecordsResponse = GetRecordsResponse.builder() + .records(Record.builder().sequenceNumber("12345").build()) + .build(); + when(mockKinesisClient.getRecords(any(GetRecordsRequest.class))).thenReturn(getRecordsResponse); + + KinesisShardConsumer consumer = new KinesisShardConsumer("clientId", config, 0, mockKinesisClient); + IngestionShardPointer pointer = consumer.latestPointer(); + + Assert.assertEquals("12345", ((SequenceNumber) pointer).getSequenceNumber()); + } + + public void testPointerFromTimestampMillis() { + DescribeStreamResponse describeStreamResponse = DescribeStreamResponse.builder() + .streamDescription(StreamDescription.builder().shards(Shard.builder().shardId("shardId-0").build()).build()) + .build(); + when(mockKinesisClient.describeStream(any(DescribeStreamRequest.class))).thenReturn(describeStreamResponse); + + GetShardIteratorResponse getShardIteratorResponse = GetShardIteratorResponse.builder().shardIterator("shardIterator").build(); + when(mockKinesisClient.getShardIterator(any(GetShardIteratorRequest.class))).thenReturn(getShardIteratorResponse); + + GetRecordsResponse getRecordsResponse = GetRecordsResponse.builder() + .records(Record.builder().sequenceNumber("12345").build()) + .build(); + when(mockKinesisClient.getRecords(any(GetRecordsRequest.class))).thenReturn(getRecordsResponse); + + KinesisShardConsumer consumer = new KinesisShardConsumer("clientId", config, 0, mockKinesisClient); + IngestionShardPointer pointer = consumer.pointerFromTimestampMillis(1234567890L); + + Assert.assertEquals("12345", ((SequenceNumber) pointer).getSequenceNumber()); + } + + public void testPointerFromOffset() { + DescribeStreamResponse describeStreamResponse = DescribeStreamResponse.builder() + .streamDescription(StreamDescription.builder().shards(Shard.builder().shardId("shardId-0").build()).build()) + .build(); + when(mockKinesisClient.describeStream(any(DescribeStreamRequest.class))).thenReturn(describeStreamResponse); + + KinesisShardConsumer consumer = new KinesisShardConsumer("clientId", config, 0, mockKinesisClient); + IngestionShardPointer pointer = consumer.pointerFromOffset("12345"); + + Assert.assertEquals("12345", ((SequenceNumber) pointer).getSequenceNumber()); + } + + public void testClose() throws IOException { + DescribeStreamResponse describeStreamResponse = DescribeStreamResponse.builder() + .streamDescription(StreamDescription.builder().shards(Shard.builder().shardId("shardId-0").build()).build()) + .build(); + when(mockKinesisClient.describeStream(any(DescribeStreamRequest.class))).thenReturn(describeStreamResponse); + + KinesisShardConsumer consumer = new KinesisShardConsumer("clientId", config, 0, mockKinesisClient); + consumer.close(); + + verify(mockKinesisClient, times(1)).close(); + } + +} diff --git a/plugins/ingestion-kinesis/src/test/java/org/opensearch/plugin/kinesis/KinesisSourceConfigTests.java b/plugins/ingestion-kinesis/src/test/java/org/opensearch/plugin/kinesis/KinesisSourceConfigTests.java new file mode 100644 index 0000000000000..9f0efc1c1788b --- /dev/null +++ b/plugins/ingestion-kinesis/src/test/java/org/opensearch/plugin/kinesis/KinesisSourceConfigTests.java @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.kinesis; + +import org.opensearch.OpenSearchParseException; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Assert; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +public class KinesisSourceConfigTests extends OpenSearchTestCase { + public void testConstructorAndGetters() { + Map params = new HashMap<>(); + params.put("region", "us-west-2"); + params.put("stream", "testStream"); + params.put("access_key", "testAccessKey"); + params.put("secret_key", "testSecretKey"); + params.put("endpoint_override", "testEndpoint"); + + KinesisSourceConfig config = new KinesisSourceConfig(params); + + Assert.assertEquals("Region should be correctly initialized and returned", "us-west-2", config.getRegion()); + Assert.assertEquals("Stream should be correctly initialized and returned", "testStream", config.getStream()); + Assert.assertEquals("Access key should be correctly initialized and returned", "testAccessKey", config.getAccessKey()); + Assert.assertEquals("Secret key should be correctly initialized and returned", "testSecretKey", config.getSecretKey()); + Assert.assertEquals("Endpoint override should be correctly initialized and returned", "testEndpoint", config.getEndpointOverride()); + } + + public void testConstructorFails() { + try { + new KinesisSourceConfig(null); + Assert.fail("Constructor should throw an exception when params is null"); + } catch (NullPointerException e) { + Assert.assertEquals("Cannot invoke \"java.util.Map.get(Object)\" because \"configuration\" is null", e.getMessage()); + } + + try { + new KinesisSourceConfig(Collections.emptyMap()); + Assert.fail("Constructor should throw an exception when params is empty"); + } catch (OpenSearchParseException e) { + Assert.assertEquals("[region] required property is missing", e.getMessage()); + } + } +} diff --git a/plugins/ingestion-kinesis/src/test/java/org/opensearch/plugin/kinesis/SequenceNumberTests.java b/plugins/ingestion-kinesis/src/test/java/org/opensearch/plugin/kinesis/SequenceNumberTests.java new file mode 100644 index 0000000000000..e74d6d3d73eb9 --- /dev/null +++ b/plugins/ingestion-kinesis/src/test/java/org/opensearch/plugin/kinesis/SequenceNumberTests.java @@ -0,0 +1,90 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.kinesis; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.KeywordField; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermRangeQuery; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Assert; + +import java.nio.charset.StandardCharsets; + +public class SequenceNumberTests extends OpenSearchTestCase { + public void testConstructorAndGetters() { + String sequenceNumber = "12345"; + SequenceNumber seqNum = new SequenceNumber(sequenceNumber); + + Assert.assertEquals("The sequence number should be correctly initialized and returned", sequenceNumber, seqNum.getSequenceNumber()); + } + + public void testEqualsAndHashCode() { + String sequenceNumber1 = "12345"; + String sequenceNumber2 = "67890"; + SequenceNumber seqNum1 = new SequenceNumber(sequenceNumber1); + SequenceNumber seqNum2 = new SequenceNumber(sequenceNumber1); + SequenceNumber seqNum3 = new SequenceNumber(sequenceNumber2); + + Assert.assertTrue("Sequence numbers with the same value should be equal", seqNum1.equals(seqNum2)); + Assert.assertFalse("Sequence numbers with different values should not be equal", seqNum1.equals(seqNum3)); + Assert.assertEquals("Hash codes for equal sequence numbers should be the same", seqNum1.hashCode(), seqNum2.hashCode()); + Assert.assertNotEquals("Hash codes for different sequence numbers should not be the same", seqNum1.hashCode(), seqNum3.hashCode()); + } + + public void testSerialize() { + String sequenceNumber = "12345"; + SequenceNumber seqNum = new SequenceNumber(sequenceNumber); + byte[] expectedBytes = sequenceNumber.getBytes(StandardCharsets.UTF_8); + + Assert.assertArrayEquals("The serialized bytes should be correct", expectedBytes, seqNum.serialize()); + } + + public void testAsString() { + String sequenceNumber = "12345"; + SequenceNumber seqNum = new SequenceNumber(sequenceNumber); + + Assert.assertEquals("The string representation should be correct", sequenceNumber, seqNum.asString()); + } + + public void testCompareTo() { + String sequenceNumber1 = "12345"; + String sequenceNumber2 = "67890"; + SequenceNumber seqNum1 = new SequenceNumber(sequenceNumber1); + SequenceNumber seqNum2 = new SequenceNumber(sequenceNumber2); + + Assert.assertTrue("The comparison should be correct", seqNum1.compareTo(seqNum2) < 0); + Assert.assertTrue("The comparison should be correct", seqNum2.compareTo(seqNum1) > 0); + Assert.assertTrue("The comparison should be correct", seqNum1.compareTo(seqNum1) == 0); + } + + public void testAsPointField() { + String sequenceNumber = "12345"; + SequenceNumber seqNum = new SequenceNumber(sequenceNumber); + Field field = seqNum.asPointField("sequenceNumberField"); + + Assert.assertTrue("The field should be an instance of KeywordField", field instanceof KeywordField); + Assert.assertEquals("The field name should be correct", "sequenceNumberField", field.name()); + Assert.assertEquals("The field value should be correct", sequenceNumber, field.stringValue()); + } + + public void testNewRangeQueryGreaterThan() { + String sequenceNumber = "12345"; + SequenceNumber seqNum = new SequenceNumber(sequenceNumber); + Query query = seqNum.newRangeQueryGreaterThan("sequenceNumberField"); + + Assert.assertTrue("The query should be an instance of TermRangeQuery", query instanceof TermRangeQuery); + TermRangeQuery termRangeQuery = (TermRangeQuery) query; + Assert.assertEquals("The field name should be correct", "sequenceNumberField", termRangeQuery.getField()); + Assert.assertEquals("The lower term should be correct", sequenceNumber, termRangeQuery.getLowerTerm().utf8ToString()); + Assert.assertNull("The upper term should be null", termRangeQuery.getUpperTerm()); + Assert.assertFalse("The lower term should be exclusive", termRangeQuery.includesLower()); + Assert.assertTrue("The upper term should be inclusive", termRangeQuery.includesUpper()); + } +} diff --git a/release-notes/opensearch.release-notes-3.0.0-alpha1.md b/release-notes/opensearch.release-notes-3.0.0-alpha1.md index 405d28f755ac3..6f94cfe19e29d 100644 --- a/release-notes/opensearch.release-notes-3.0.0-alpha1.md +++ b/release-notes/opensearch.release-notes-3.0.0-alpha1.md @@ -56,7 +56,6 @@ - Implemented computation of segment replication stats at shard level ([#17055](https://github.com/opensearch-project/OpenSearch/pull/17055)) - [Rule Based Auto-tagging] Add in-memory attribute value store ([#17342](https://github.com/opensearch-project/OpenSearch/pull/17342)) - ### Dependencies - Bump Apache HttpCore5/HttpClient5 dependencies from 5.2.5/5.3.1 to 5.3.1/5.4.1 to support ExtendedSocketOption in HttpAsyncClient ([#16757](https://github.com/opensearch-project/OpenSearch/pull/16757)) - Bumps `jetty` version from 9.4.55.v20240627 to 9.4.57.v20241219 ([#17395](https://github.com/opensearch-project/OpenSearch/pull/17395)) diff --git a/server/src/main/java/org/opensearch/index/IngestionShardConsumer.java b/server/src/main/java/org/opensearch/index/IngestionShardConsumer.java index a9ffcaca850f2..e55b4f59b61fa 100644 --- a/server/src/main/java/org/opensearch/index/IngestionShardConsumer.java +++ b/server/src/main/java/org/opensearch/index/IngestionShardConsumer.java @@ -58,24 +58,26 @@ public M getMessage() { /** * Read the next set of messages from the source - * @param pointer the pointer to start reading from, inclusive + * @param pointer the pointer to start reading from, + * @param includeStart whether to include the start pointer in the read * @param maxMessages, the maximum number of messages to read, or -1 for no limit * @param timeoutMillis the maximum time to wait for messages * @throws java.util.concurrent.TimeoutException If the operation could not be completed within {@code timeoutMillis} * milliseconds * @return a list of messages read from the source */ - List> readNext(T pointer, long maxMessages, int timeoutMillis) throws java.util.concurrent.TimeoutException; + List> readNext(T pointer, boolean includeStart, long maxMessages, int timeoutMillis) + throws java.util.concurrent.TimeoutException; /** - * @return the next pointer to read from - */ - T nextPointer(); - - /** - * @return the immediate next pointer from the provided start pointer + * Read the next set of messages from the source using the previous pointer. An exception is thrown if no previous pointer is available. + * This method is used as an optimization for consecutive reads. + * @param maxMessages the maximum number of messages to read, or -1 for no limit + * @param timeoutMillis the maximum time to wait for messages + * @return a list of messages read from the source + * @throws java.util.concurrent.TimeoutException */ - T nextPointer(T startPointer); + List> readNext(long maxMessages, int timeoutMillis) throws java.util.concurrent.TimeoutException; /** * @return the earliest pointer in the shard diff --git a/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java b/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java index b35873845165a..23c49d359b25e 100644 --- a/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java @@ -222,6 +222,8 @@ protected void commitIndexWriter(final IndexWriter writer, final String translog */ if (streamPoller.getBatchStartPointer() != null) { commitData.put(StreamPoller.BATCH_START, streamPoller.getBatchStartPointer().asString()); + } else { + logger.warn("ignore null batch start pointer"); } final String currentForceMergeUUID = forceMergeUUID; if (currentForceMergeUUID != null) { diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java b/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java index 5d0988c3ca0eb..351a45b804dff 100644 --- a/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java +++ b/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java @@ -52,6 +52,7 @@ public class DefaultStreamPoller implements StreamPoller { // start of the batch, inclusive private IngestionShardPointer batchStartPointer; + private boolean includeBatchStartPointer = false; private ResetState resetState; private final String resetValue; @@ -130,6 +131,8 @@ public void start() { throw new RuntimeException("poller is closed!"); } started = true; + // when we start, we need to include the batch start pointer in the read for the first read + includeBatchStartPointer = true; consumerThread.submit(this::startPoll); processorThread.submit(processorRunnable); } @@ -146,9 +149,6 @@ protected void startPoll() { } logger.info("Starting poller for shard {}", consumer.getShardId()); - // track the last record successfully written to the blocking queue - IngestionShardPointer lastSuccessfulPointer = null; - while (true) { try { if (closed) { @@ -196,11 +196,13 @@ protected void startPoll() { state = State.POLLING; - List> results = consumer.readNext( - batchStartPointer, - MAX_POLL_SIZE, - POLL_TIMEOUT - ); + List> results; + + if (includeBatchStartPointer) { + results = consumer.readNext(batchStartPointer, true, MAX_POLL_SIZE, POLL_TIMEOUT); + } else { + results = consumer.readNext(MAX_POLL_SIZE, POLL_TIMEOUT); + } if (results.isEmpty()) { // no new records @@ -209,7 +211,14 @@ protected void startPoll() { state = State.PROCESSING; // process the records + boolean firstInBatch = true; for (IngestionShardConsumer.ReadResult result : results) { + if (firstInBatch) { + // update the batch start pointer to the next batch + batchStartPointer = result.getPointer(); + firstInBatch = false; + } + // check if the message is already processed if (isProcessed(result.getPointer())) { logger.info("Skipping message with pointer {} as it is already processed", result.getPointer().asString()); @@ -217,25 +226,20 @@ protected void startPoll() { } totalPolledCount.inc(); blockingQueue.put(result); - lastSuccessfulPointer = result.getPointer(); + logger.debug( "Put message {} with pointer {} to the blocking queue", String.valueOf(result.getMessage().getPayload()), result.getPointer().asString() ); } - // update the batch start pointer to the next batch - batchStartPointer = consumer.nextPointer(); + // for future reads, we do not need to include the batch start pointer, and read from the last successful pointer. + includeBatchStartPointer = false; } catch (Throwable e) { logger.error("Error in polling the shard {}: {}", consumer.getShardId(), e); errorStrategy.handleError(e, IngestionErrorStrategy.ErrorStage.POLLING); - if (errorStrategy.shouldIgnoreError(e, IngestionErrorStrategy.ErrorStage.POLLING)) { - // Advance the batch start pointer to ignore the error and continue from next record - batchStartPointer = lastSuccessfulPointer == null - ? consumer.nextPointer(batchStartPointer) - : consumer.nextPointer(lastSuccessfulPointer); - } else { + if (!errorStrategy.shouldIgnoreError(e, IngestionErrorStrategy.ErrorStage.POLLING)) { // Blocking error encountered. Pause poller to stop processing remaining updates. pause(); } diff --git a/server/src/test/java/org/opensearch/index/engine/FakeIngestionSource.java b/server/src/test/java/org/opensearch/index/engine/FakeIngestionSource.java index 6233a65664d0b..c9225dfd32bc0 100644 --- a/server/src/test/java/org/opensearch/index/engine/FakeIngestionSource.java +++ b/server/src/test/java/org/opensearch/index/engine/FakeIngestionSource.java @@ -65,13 +65,20 @@ public FakeIngestionConsumer(List messages, int shardId) { @Override public List> readNext( FakeIngestionShardPointer pointer, + boolean includeStart, long maxMessages, int timeoutMillis ) throws TimeoutException { - lastFetchedOffset = pointer.offset - 1; + if (includeStart) { + lastFetchedOffset = pointer.offset - 1; + } else { + lastFetchedOffset = pointer.offset; + } + int numToFetch = Math.min(messages.size() - (int) pointer.offset, (int) maxMessages); List> result = new ArrayList<>(); - for (long i = pointer.offset; i < pointer.offset + numToFetch; i++) { + long startOffset = includeStart ? pointer.offset : pointer.offset + 1; + for (long i = startOffset; i < pointer.offset + numToFetch; i++) { result.add(new ReadResult<>(new FakeIngestionShardPointer(i), new FakeIngestionMessage(messages.get((int) i)))); lastFetchedOffset = i; } @@ -79,13 +86,9 @@ public List> readNex } @Override - public FakeIngestionShardPointer nextPointer() { - return new FakeIngestionShardPointer(lastFetchedOffset + 1); - } - - @Override - public FakeIngestionShardPointer nextPointer(FakeIngestionShardPointer startPointer) { - return new FakeIngestionShardPointer(startPointer.offset + 1); + public List> readNext(long maxMessages, int timeoutMillis) + throws TimeoutException { + return readNext(new FakeIngestionShardPointer(lastFetchedOffset), false, maxMessages, timeoutMillis); } @Override diff --git a/server/src/test/java/org/opensearch/index/engine/IngestionEngineTests.java b/server/src/test/java/org/opensearch/index/engine/IngestionEngineTests.java index 2d00bbcba0c8c..8f84f59cfbccc 100644 --- a/server/src/test/java/org/opensearch/index/engine/IngestionEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/IngestionEngineTests.java @@ -99,7 +99,8 @@ public void testCreateEngine() throws IOException { Map commitData = ingestionEngine.commitDataAsMap(); // verify the commit data Assert.assertEquals(7, commitData.size()); - Assert.assertEquals("2", commitData.get(StreamPoller.BATCH_START)); + // the commiit data is the start of the current batch + Assert.assertEquals("0", commitData.get(StreamPoller.BATCH_START)); // verify the stored offsets var offset = new FakeIngestionSource.FakeIngestionShardPointer(0); diff --git a/server/src/test/java/org/opensearch/indices/pollingingest/DefaultStreamPollerTests.java b/server/src/test/java/org/opensearch/indices/pollingingest/DefaultStreamPollerTests.java index 11d130aef469b..4599e3e8f154c 100644 --- a/server/src/test/java/org/opensearch/indices/pollingingest/DefaultStreamPollerTests.java +++ b/server/src/test/java/org/opensearch/indices/pollingingest/DefaultStreamPollerTests.java @@ -27,6 +27,7 @@ import java.util.concurrent.TimeoutException; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.ArgumentMatchers.eq; @@ -246,16 +247,22 @@ public void testDropErrorIngestionStrategy() throws TimeoutException, Interrupte FakeIngestionSource.FakeIngestionShardPointer, FakeIngestionSource.FakeIngestionMessage>> readResultsBatch1 = fakeConsumer.readNext( fakeConsumer.earliestPointer(), + true, 2, 100 ); List< IngestionShardConsumer.ReadResult< FakeIngestionSource.FakeIngestionShardPointer, - FakeIngestionSource.FakeIngestionMessage>> readResultsBatch2 = fakeConsumer.readNext(fakeConsumer.nextPointer(), 2, 100); + FakeIngestionSource.FakeIngestionMessage>> readResultsBatch2 = fakeConsumer.readNext( + new FakeIngestionSource.FakeIngestionShardPointer(1), + true, + 2, + 100 + ); IngestionShardConsumer mockConsumer = mock(IngestionShardConsumer.class); when(mockConsumer.getShardId()).thenReturn(0); - when(mockConsumer.readNext(any(), anyLong(), anyInt())).thenThrow(new RuntimeException("message1 poll failed")) + when(mockConsumer.readNext(any(), anyBoolean(), anyLong(), anyInt())).thenThrow(new RuntimeException("message1 poll failed")) .thenReturn(readResultsBatch1) .thenThrow(new RuntimeException("message3 poll failed")) .thenReturn(readResultsBatch2) @@ -274,8 +281,8 @@ public void testDropErrorIngestionStrategy() throws TimeoutException, Interrupte poller.start(); Thread.sleep(sleepTime); - verify(errorStrategy, times(2)).handleError(any(), eq(IngestionErrorStrategy.ErrorStage.POLLING)); - verify(processor, times(4)).process(any(), any()); + verify(errorStrategy, times(1)).handleError(any(), eq(IngestionErrorStrategy.ErrorStage.POLLING)); + verify(processor, times(2)).process(any(), any()); } public void testBlockErrorIngestionStrategy() throws TimeoutException, InterruptedException { @@ -286,16 +293,22 @@ public void testBlockErrorIngestionStrategy() throws TimeoutException, Interrupt FakeIngestionSource.FakeIngestionShardPointer, FakeIngestionSource.FakeIngestionMessage>> readResultsBatch1 = fakeConsumer.readNext( fakeConsumer.earliestPointer(), + true, 2, 100 ); List< IngestionShardConsumer.ReadResult< FakeIngestionSource.FakeIngestionShardPointer, - FakeIngestionSource.FakeIngestionMessage>> readResultsBatch2 = fakeConsumer.readNext(fakeConsumer.nextPointer(), 2, 100); + FakeIngestionSource.FakeIngestionMessage>> readResultsBatch2 = fakeConsumer.readNext( + new FakeIngestionSource.FakeIngestionShardPointer(1), + true, + 2, + 100 + ); IngestionShardConsumer mockConsumer = mock(IngestionShardConsumer.class); when(mockConsumer.getShardId()).thenReturn(0); - when(mockConsumer.readNext(any(), anyLong(), anyInt())).thenThrow(new RuntimeException("message1 poll failed")) + when(mockConsumer.readNext(any(), anyBoolean(), anyLong(), anyInt())).thenThrow(new RuntimeException("message1 poll failed")) .thenReturn(readResultsBatch1) .thenReturn(readResultsBatch2) .thenReturn(Collections.emptyList()); From b52e63f3ae62c4c49aaabd1d133f5f4a172783ca Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Mar 2025 15:08:06 -0400 Subject: [PATCH 106/550] Bump com.nimbusds:nimbus-jose-jwt from 10.0.1 to 10.0.2 in /test/fixtures/hdfs-fixture (#17669) * Bump com.nimbusds:nimbus-jose-jwt in /test/fixtures/hdfs-fixture Bumps [com.nimbusds:nimbus-jose-jwt](https://bitbucket.org/connect2id/nimbus-jose-jwt) from 10.0.1 to 10.0.2. - [Changelog](https://bitbucket.org/connect2id/nimbus-jose-jwt/src/master/CHANGELOG.txt) - [Commits](https://bitbucket.org/connect2id/nimbus-jose-jwt/branches/compare/10.0.2..10.0.1) --- updated-dependencies: - dependency-name: com.nimbusds:nimbus-jose-jwt dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- CHANGELOG.md | 2 +- test/fixtures/hdfs-fixture/build.gradle | 2 +- test/framework/licenses/bcutil-jdk18on-1.78.jar.sha1 | 1 - 3 files changed, 2 insertions(+), 3 deletions(-) delete mode 100644 test/framework/licenses/bcutil-jdk18on-1.78.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index e62a6235a3609..0c90fc26867b0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,7 +17,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Migrate BC libs to their FIPS counterparts ([#14912](https://github.com/opensearch-project/OpenSearch/pull/14912)) ### Dependencies -- Bump `com.nimbusds:nimbus-jose-jwt` from 9.41.1 to 10.0.2 ([#17607](https://github.com/opensearch-project/OpenSearch/pull/17607)) +- Bump `com.nimbusds:nimbus-jose-jwt` from 9.41.1 to 10.0.2 ([#17607](https://github.com/opensearch-project/OpenSearch/pull/17607), [#17669](https://github.com/opensearch-project/OpenSearch/pull/17669)) - Bump `com.google.api:api-common` from 1.8.1 to 2.46.1 ([#17604](https://github.com/opensearch-project/OpenSearch/pull/17604)) - Bump `ch.qos.logback:logback-core` from 1.5.16 to 1.5.17 ([#17609](https://github.com/opensearch-project/OpenSearch/pull/17609)) - Bump `org.jruby.joni:joni` from 2.2.3 to 2.2.5 ([#17608](https://github.com/opensearch-project/OpenSearch/pull/17608)) diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 52291d360bd14..faf20b2070cf6 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -79,7 +79,7 @@ dependencies { api "org.jboss.xnio:xnio-nio:3.8.16.Final" api 'org.jline:jline:3.29.0' api 'org.apache.commons:commons-configuration2:2.11.0' - api 'com.nimbusds:nimbus-jose-jwt:10.0.1' + api 'com.nimbusds:nimbus-jose-jwt:10.0.2' api ('org.apache.kerby:kerb-admin:2.1.0') { exclude group: "org.jboss.xnio" exclude group: "org.jline" diff --git a/test/framework/licenses/bcutil-jdk18on-1.78.jar.sha1 b/test/framework/licenses/bcutil-jdk18on-1.78.jar.sha1 deleted file mode 100644 index 9c88eef3ace17..0000000000000 --- a/test/framework/licenses/bcutil-jdk18on-1.78.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -81c1f5e06f206be5dad137d563609dbe66c81d31 \ No newline at end of file From 29060c65d6a927d37c32dc74d752f0943de28fbd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Mar 2025 18:15:46 -0400 Subject: [PATCH 107/550] Bump com.azure:azure-storage-blob from 12.29.1 to 12.30.0 in /plugins/repository-azure (#17667) * Bump com.azure:azure-storage-blob in /plugins/repository-azure Bumps [com.azure:azure-storage-blob](https://github.com/Azure/azure-sdk-for-java) from 12.29.1 to 12.30.0. - [Release notes](https://github.com/Azure/azure-sdk-for-java/releases) - [Commits](https://github.com/Azure/azure-sdk-for-java/compare/azure-storage-blob_12.29.1...azure-storage-blob_12.30.0) --- updated-dependencies: - dependency-name: com.azure:azure-storage-blob dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- CHANGELOG.md | 1 + plugins/repository-azure/build.gradle | 2 +- .../licenses/azure-storage-blob-12.29.1.jar.sha1 | 1 - .../licenses/azure-storage-blob-12.30.0.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-azure/licenses/azure-storage-blob-12.29.1.jar.sha1 create mode 100644 plugins/repository-azure/licenses/azure-storage-blob-12.30.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 0c90fc26867b0..5f6290d7ee894 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.jruby.joni:joni` from 2.2.3 to 2.2.5 ([#17608](https://github.com/opensearch-project/OpenSearch/pull/17608)) - Bump `dangoslen/dependabot-changelog-helper` from 3 to 4 ([#17498](https://github.com/opensearch-project/OpenSearch/pull/17498)) - Bump `com.google.api:gax` from 2.35.0 to 2.63.1 ([#17465](https://github.com/opensearch-project/OpenSearch/pull/17465)) +- Bump `com.azure:azure-storage-blob` from 12.29.1 to 12.30.0 ([#17667](https://github.com/opensearch-project/OpenSearch/pull/17667)) ### Changed diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index c2fc2233c0473..8c61acd978f09 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -56,7 +56,7 @@ dependencies { api "io.netty:netty-resolver-dns:${versions.netty}" api "io.netty:netty-transport-native-unix-common:${versions.netty}" implementation project(':modules:transport-netty4') - api 'com.azure:azure-storage-blob:12.29.1' + api 'com.azure:azure-storage-blob:12.30.0' api 'com.azure:azure-identity:1.14.2' // Start of transitive dependencies for azure-identity api 'com.microsoft.azure:msal4j-persistence-extension:1.3.0' diff --git a/plugins/repository-azure/licenses/azure-storage-blob-12.29.1.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-blob-12.29.1.jar.sha1 deleted file mode 100644 index af0da6064372e..0000000000000 --- a/plugins/repository-azure/licenses/azure-storage-blob-12.29.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bf6845feeee7e47da636afcfa28f3affbf1fede5 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-storage-blob-12.30.0.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-blob-12.30.0.jar.sha1 new file mode 100644 index 0000000000000..2f6fc7a879e5f --- /dev/null +++ b/plugins/repository-azure/licenses/azure-storage-blob-12.30.0.jar.sha1 @@ -0,0 +1 @@ +a187bbdf04d9d4c0144ef619ba02ce1cd07211ac \ No newline at end of file From 3ac7c2315b080b93ac852af2d5d7bf7f2abc4aec Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 25 Mar 2025 12:33:12 -0400 Subject: [PATCH 108/550] Bump tj-actions/changed-files from 46.0.1 to 46.0.3 (#17666) * Bump tj-actions/changed-files from 46.0.1 to 46.0.3 Bumps [tj-actions/changed-files](https://github.com/tj-actions/changed-files) from 46.0.1 to 46.0.3. - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v46.0.1...v46.0.3) --- updated-dependencies: - dependency-name: tj-actions/changed-files dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Signed-off-by: Craig Perkins Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Craig Perkins --- .github/workflows/gradle-check.yml | 2 +- CHANGELOG.md | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index 4b79bd8310c76..e3b5739237fdc 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -24,7 +24,7 @@ jobs: - uses: actions/checkout@v4 - name: Get changed files id: changed-files-specific - uses: tj-actions/changed-files@v46.0.1 + uses: tj-actions/changed-files@v46.0.3 with: files_ignore: | release-notes/*.md diff --git a/CHANGELOG.md b/CHANGELOG.md index 5f6290d7ee894..59f8f9347402d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `dangoslen/dependabot-changelog-helper` from 3 to 4 ([#17498](https://github.com/opensearch-project/OpenSearch/pull/17498)) - Bump `com.google.api:gax` from 2.35.0 to 2.63.1 ([#17465](https://github.com/opensearch-project/OpenSearch/pull/17465)) - Bump `com.azure:azure-storage-blob` from 12.29.1 to 12.30.0 ([#17667](https://github.com/opensearch-project/OpenSearch/pull/17667)) +- Bump `tj-actions/changed-files` from 46.0.1 to 46.0.3 ([#17666](https://github.com/opensearch-project/OpenSearch/pull/17666)) ### Changed From 17289b7685e72425e13051b1169518cdcafc4600 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Tue, 25 Mar 2025 14:32:16 -0400 Subject: [PATCH 109/550] Add support of Java policies (#17663) Signed-off-by: Andriy Redko Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + gradle/missing-javadoc.gradle | 1 + libs/agent-sm/agent-policy/build.gradle | 26 + .../java/org/opensearch/package-info.java | 12 + .../secure_sm/policy/ParseUtil.java | 616 +++++++ .../opensearch/secure_sm/policy/Password.java | 173 ++ .../secure_sm/policy/PolicyFile.java | 1601 +++++++++++++++++ .../secure_sm/policy/PolicyParser.java | 1163 ++++++++++++ .../secure_sm/policy/PolicyUtil.java | 170 ++ .../secure_sm/policy/PropertyExpander.java | 133 ++ .../secure_sm/policy/SecurityConstants.java | 145 ++ .../secure_sm/policy/package-info.java | 12 + libs/agent-sm/bootstrap/build.gradle | 24 + .../javaagent/bootstrap/AgentPolicy.java | 89 + .../javaagent/bootstrap/package-info.java | 12 + .../java/org/opensearch/package-info.java | 12 + libs/agent-sm/build.gradle | 22 + 17 files changed, 4212 insertions(+) create mode 100644 libs/agent-sm/agent-policy/build.gradle create mode 100644 libs/agent-sm/agent-policy/src/main/java/org/opensearch/package-info.java create mode 100644 libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/ParseUtil.java create mode 100644 libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/Password.java create mode 100644 libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyFile.java create mode 100644 libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyParser.java create mode 100644 libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyUtil.java create mode 100644 libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PropertyExpander.java create mode 100644 libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/SecurityConstants.java create mode 100644 libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/package-info.java create mode 100644 libs/agent-sm/bootstrap/build.gradle create mode 100644 libs/agent-sm/bootstrap/src/main/java/org/opensearch/javaagent/bootstrap/AgentPolicy.java create mode 100644 libs/agent-sm/bootstrap/src/main/java/org/opensearch/javaagent/bootstrap/package-info.java create mode 100644 libs/agent-sm/bootstrap/src/main/java/org/opensearch/package-info.java create mode 100644 libs/agent-sm/build.gradle diff --git a/CHANGELOG.md b/CHANGELOG.md index 59f8f9347402d..b9e2426816006 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Introduce a new search node role to hold search only shards ([#17620](https://github.com/opensearch-project/OpenSearch/pull/17620)) - Fix systemd integTest on deb regarding path ownership check ([#17641](https://github.com/opensearch-project/OpenSearch/pull/17641)) - Add dfs transformation function in XContentMapValues ([#17612](https://github.com/opensearch-project/OpenSearch/pull/17612)) +- [Security Manager Replacement] Add support of Java policies ([#17663](https://github.com/opensearch-project/OpenSearch/pull/17663)) - Added Kinesis support as a plugin for the pull-based ingestion ([#17615](https://github.com/opensearch-project/OpenSearch/pull/17615) ### Changed diff --git a/gradle/missing-javadoc.gradle b/gradle/missing-javadoc.gradle index 6e31f838e678a..9f27dc5cadcd2 100644 --- a/gradle/missing-javadoc.gradle +++ b/gradle/missing-javadoc.gradle @@ -106,6 +106,7 @@ configure([ project(":libs:opensearch-secure-sm"), project(":libs:opensearch-ssl-config"), project(":libs:opensearch-x-content"), + project(":libs:agent-sm:agent-policy"), project(":modules:aggs-matrix-stats"), project(":modules:analysis-common"), project(":modules:geo"), diff --git a/libs/agent-sm/agent-policy/build.gradle b/libs/agent-sm/agent-policy/build.gradle new file mode 100644 index 0000000000000..997ed5ddf174b --- /dev/null +++ b/libs/agent-sm/agent-policy/build.gradle @@ -0,0 +1,26 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +apply plugin: 'opensearch.build' +apply plugin: 'opensearch.publish' + +ext { + failOnJavadocWarning = false +} + +base { + archivesName = 'opensearch-agent-policy' +} + +disableTasks('forbiddenApisMain') + +test.enabled = false +testingConventions.enabled = false diff --git a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/package-info.java b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/package-info.java new file mode 100644 index 0000000000000..0724b60d1777f --- /dev/null +++ b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Java Agent Policy + */ +package org.opensearch; diff --git a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/ParseUtil.java b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/ParseUtil.java new file mode 100644 index 0000000000000..d4477fa13fdcd --- /dev/null +++ b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/ParseUtil.java @@ -0,0 +1,616 @@ +/* + * Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.secure_sm.policy; + +import java.io.File; +import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.nio.ByteBuffer; +import java.nio.CharBuffer; +import java.nio.charset.CharacterCodingException; +import java.nio.charset.CharsetDecoder; +import java.nio.charset.CharsetEncoder; +import java.nio.charset.CoderResult; +import java.nio.charset.CodingErrorAction; +import java.nio.charset.StandardCharsets; +import java.util.HexFormat; + +/** + * Adapted from: https://github.com/openjdk/jdk23u/blob/master/src/java.base/share/classes/sun/net/www/ParseUtil.java + */ +public final class ParseUtil { + + private static final HexFormat HEX_UPPERCASE = HexFormat.of().withUpperCase(); + + private ParseUtil() {} + + /** + * Constructs an encoded version of the specified path string suitable + * for use in the construction of a URL. + * + * A path separator is replaced by a forward slash. The string is UTF8 + * encoded. The % escape sequence is used for characters that are above + * 0x7F or those defined in RFC2396 as reserved or excluded in the path + * component of a URL. + */ + public static String encodePath(String path) { + return encodePath(path, true); + } + + /* + * flag indicates whether path uses platform dependent + * File.separatorChar or not. True indicates path uses platform + * dependent File.separatorChar. + */ + public static String encodePath(String path, boolean flag) { + if (flag && File.separatorChar != '/') { + return encodePath(path, 0, File.separatorChar); + } else { + int index = firstEncodeIndex(path); + if (index > -1) { + return encodePath(path, index, '/'); + } else { + return path; + } + } + } + + private static int firstEncodeIndex(String path) { + int len = path.length(); + for (int i = 0; i < len; i++) { + char c = path.charAt(i); + // Ordering in the following test is performance sensitive, + // and typically paths have most chars in the a-z range, then + // in the symbol range '&'-':' (includes '.', '/' and '0'-'9') + // and more rarely in the A-Z range. + if (c >= 'a' && c <= 'z' || c >= '&' && c <= ':' || c >= 'A' && c <= 'Z') { + continue; + } else if (c > 0x007F || match(c, L_ENCODED, H_ENCODED)) { + return i; + } + } + return -1; + } + + private static String encodePath(String path, int index, char sep) { + char[] pathCC = path.toCharArray(); + char[] retCC = new char[pathCC.length * 2 + 16 - index]; + if (index > 0) { + System.arraycopy(pathCC, 0, retCC, 0, index); + } + int retLen = index; + + for (int i = index; i < pathCC.length; i++) { + char c = pathCC[i]; + if (c == sep) retCC[retLen++] = '/'; + else { + if (c <= 0x007F) { + if (c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c >= '0' && c <= '9') { + retCC[retLen++] = c; + } else if (match(c, L_ENCODED, H_ENCODED)) { + retLen = escape(retCC, c, retLen); + } else { + retCC[retLen++] = c; + } + } else if (c > 0x07FF) { + retLen = escape(retCC, (char) (0xE0 | ((c >> 12) & 0x0F)), retLen); + retLen = escape(retCC, (char) (0x80 | ((c >> 6) & 0x3F)), retLen); + retLen = escape(retCC, (char) (0x80 | ((c >> 0) & 0x3F)), retLen); + } else { + retLen = escape(retCC, (char) (0xC0 | ((c >> 6) & 0x1F)), retLen); + retLen = escape(retCC, (char) (0x80 | ((c >> 0) & 0x3F)), retLen); + } + } + // worst case scenario for character [0x7ff-] every single + // character will be encoded into 9 characters. + if (retLen + 9 > retCC.length) { + int newLen = retCC.length * 2 + 16; + if (newLen < 0) { + newLen = Integer.MAX_VALUE; + } + char[] buf = new char[newLen]; + System.arraycopy(retCC, 0, buf, 0, retLen); + retCC = buf; + } + } + return new String(retCC, 0, retLen); + } + + /** + * Appends the URL escape sequence for the specified char to the + * specified character array. + */ + private static int escape(char[] cc, char c, int index) { + cc[index++] = '%'; + cc[index++] = Character.forDigit((c >> 4) & 0xF, 16); + cc[index++] = Character.forDigit(c & 0xF, 16); + return index; + } + + /** + * Un-escape and return the character at position i in string s. + */ + private static byte unescape(String s, int i) { + return (byte) Integer.parseInt(s, i + 1, i + 3, 16); + } + + /** + * Returns a new String constructed from the specified String by replacing + * the URL escape sequences and UTF8 encoding with the characters they + * represent. + */ + public static String decode(String s) { + int n = s.length(); + if ((n == 0) || (s.indexOf('%') < 0)) return s; + + StringBuilder sb = new StringBuilder(n); + ByteBuffer bb = ByteBuffer.allocate(n); + CharBuffer cb = CharBuffer.allocate(n); + CharsetDecoder dec = StandardCharsets.UTF_8.newDecoder() + .onMalformedInput(CodingErrorAction.REPORT) + .onUnmappableCharacter(CodingErrorAction.REPORT); + + char c = s.charAt(0); + for (int i = 0; i < n;) { + assert c == s.charAt(i); + if (c != '%') { + sb.append(c); + if (++i >= n) break; + c = s.charAt(i); + continue; + } + bb.clear(); + for (;;) { + if (n - i < 2) { + throw new IllegalArgumentException("Malformed escape pair: " + s); + } + + try { + bb.put(unescape(s, i)); + } catch (NumberFormatException | IndexOutOfBoundsException e) { + throw new IllegalArgumentException("Malformed escape pair: " + s); + } + i += 3; + if (i >= n) break; + c = s.charAt(i); + if (c != '%') break; + } + bb.flip(); + cb.clear(); + dec.reset(); + CoderResult cr = dec.decode(bb, cb, true); + if (cr.isError()) throw new IllegalArgumentException("Error decoding percent encoded characters"); + cr = dec.flush(cb); + if (cr.isError()) throw new IllegalArgumentException("Error decoding percent encoded characters"); + sb.append(cb.flip().toString()); + } + + return sb.toString(); + } + + public static URL fileToEncodedURL(File file) throws MalformedURLException { + String path = file.getAbsolutePath(); + path = ParseUtil.encodePath(path); + if (!path.startsWith("/")) { + path = "/" + path; + } + if (!path.endsWith("/") && file.isDirectory()) { + path = path + "/"; + } + @SuppressWarnings("deprecation") + var result = new URL("file", "", path); + return result; + } + + public static java.net.URI toURI(URL url) { + String protocol = url.getProtocol(); + String auth = url.getAuthority(); + String path = url.getPath(); + String query = url.getQuery(); + String ref = url.getRef(); + if (path != null && !(path.startsWith("/"))) path = "/" + path; + + // + // In java.net.URI class, a port number of -1 implies the default + // port number. So get it stripped off before creating URI instance. + // + if (auth != null && auth.endsWith(":-1")) auth = auth.substring(0, auth.length() - 3); + + java.net.URI uri; + try { + uri = createURI(protocol, auth, path, query, ref); + } catch (java.net.URISyntaxException e) { + uri = null; + } + return uri; + } + + // + // createURI() and its auxiliary code are cloned from java.net.URI. + // Most of the code are just copy and paste, except that quote() + // has been modified to avoid double-escape. + // + // Usually it is unacceptable, but we're forced to do it because + // otherwise we need to change public API, namely java.net.URI's + // multi-argument constructors. It turns out that the changes cause + // incompatibilities so can't be done. + // + private static URI createURI(String scheme, String authority, String path, String query, String fragment) throws URISyntaxException { + String s = toString(scheme, null, authority, null, null, -1, path, query, fragment); + checkPath(s, scheme, path); + return new URI(s); + } + + private static String toString( + String scheme, + String opaquePart, + String authority, + String userInfo, + String host, + int port, + String path, + String query, + String fragment + ) { + StringBuilder sb = new StringBuilder(); + if (scheme != null) { + sb.append(scheme); + sb.append(':'); + } + appendSchemeSpecificPart(sb, opaquePart, authority, userInfo, host, port, path, query); + appendFragment(sb, fragment); + return sb.toString(); + } + + private static void appendSchemeSpecificPart( + StringBuilder sb, + String opaquePart, + String authority, + String userInfo, + String host, + int port, + String path, + String query + ) { + if (opaquePart != null) { + /* check if SSP begins with an IPv6 address + * because we must not quote a literal IPv6 address + */ + if (opaquePart.startsWith("//[")) { + int end = opaquePart.indexOf(']'); + if (end != -1 && opaquePart.indexOf(':') != -1) { + String doquote, dontquote; + if (end == opaquePart.length()) { + dontquote = opaquePart; + doquote = ""; + } else { + dontquote = opaquePart.substring(0, end + 1); + doquote = opaquePart.substring(end + 1); + } + sb.append(dontquote); + sb.append(quote(doquote, L_URIC, H_URIC)); + } + } else { + sb.append(quote(opaquePart, L_URIC, H_URIC)); + } + } else { + appendAuthority(sb, authority, userInfo, host, port); + if (path != null) sb.append(quote(path, L_PATH, H_PATH)); + if (query != null) { + sb.append('?'); + sb.append(quote(query, L_URIC, H_URIC)); + } + } + } + + private static void appendAuthority(StringBuilder sb, String authority, String userInfo, String host, int port) { + if (host != null) { + sb.append("//"); + if (userInfo != null) { + sb.append(quote(userInfo, L_USERINFO, H_USERINFO)); + sb.append('@'); + } + boolean needBrackets = ((host.indexOf(':') >= 0) && !host.startsWith("[") && !host.endsWith("]")); + if (needBrackets) sb.append('['); + sb.append(host); + if (needBrackets) sb.append(']'); + if (port != -1) { + sb.append(':'); + sb.append(port); + } + } else if (authority != null) { + sb.append("//"); + if (authority.startsWith("[")) { + int end = authority.indexOf(']'); + if (end != -1 && authority.indexOf(':') != -1) { + String doquote, dontquote; + if (end == authority.length()) { + dontquote = authority; + doquote = ""; + } else { + dontquote = authority.substring(0, end + 1); + doquote = authority.substring(end + 1); + } + sb.append(dontquote); + sb.append(quote(doquote, L_REG_NAME | L_SERVER, H_REG_NAME | H_SERVER)); + } + } else { + sb.append(quote(authority, L_REG_NAME | L_SERVER, H_REG_NAME | H_SERVER)); + } + } + } + + private static void appendFragment(StringBuilder sb, String fragment) { + if (fragment != null) { + sb.append('#'); + sb.append(quote(fragment, L_URIC, H_URIC)); + } + } + + // Quote any characters in s that are not permitted + // by the given mask pair + // + private static String quote(String s, long lowMask, long highMask) { + int n = s.length(); + StringBuilder sb = null; + CharsetEncoder encoder = null; + boolean allowNonASCII = ((lowMask & L_ESCAPED) != 0); + for (int i = 0; i < s.length(); i++) { + char c = s.charAt(i); + if (c < '\u0080') { + if (!match(c, lowMask, highMask) && !isEscaped(s, i)) { + if (sb == null) { + sb = new StringBuilder(); + sb.append(s, 0, i); + } + appendEscape(sb, (byte) c); + } else { + if (sb != null) sb.append(c); + } + } else if (allowNonASCII && (Character.isSpaceChar(c) || Character.isISOControl(c))) { + if (encoder == null) { + encoder = StandardCharsets.UTF_8.newEncoder(); + } + if (sb == null) { + sb = new StringBuilder(); + sb.append(s, 0, i); + } + appendEncoded(encoder, sb, c); + } else { + if (sb != null) sb.append(c); + } + } + return (sb == null) ? s : sb.toString(); + } + + // + // To check if the given string has an escaped triplet + // at the given position + // + private static boolean isEscaped(String s, int pos) { + if (s == null || (s.length() <= (pos + 2))) return false; + + return s.charAt(pos) == '%' && match(s.charAt(pos + 1), L_HEX, H_HEX) && match(s.charAt(pos + 2), L_HEX, H_HEX); + } + + private static void appendEncoded(CharsetEncoder encoder, StringBuilder sb, char c) { + ByteBuffer bb = null; + try { + bb = encoder.encode(CharBuffer.wrap("" + c)); + } catch (CharacterCodingException x) { + assert false; + } + while (bb.hasRemaining()) { + int b = bb.get() & 0xff; + if (b >= 0x80) appendEscape(sb, (byte) b); + else sb.append((char) b); + } + } + + private static void appendEscape(StringBuilder sb, byte b) { + sb.append('%'); + HEX_UPPERCASE.toHexDigits(sb, b); + } + + // Tell whether the given character is permitted by the given mask pair + private static boolean match(char c, long lowMask, long highMask) { + if (c < 64) return ((1L << c) & lowMask) != 0; + if (c < 128) return ((1L << (c - 64)) & highMask) != 0; + return false; + } + + // If a scheme is given then the path, if given, must be absolute + // + private static void checkPath(String s, String scheme, String path) throws URISyntaxException { + if (scheme != null) { + if (path != null && !path.isEmpty() && path.charAt(0) != '/') throw new URISyntaxException(s, "Relative path in absolute URI"); + } + } + + // -- Character classes for parsing -- + + // To save startup time, we manually calculate the low-/highMask constants. + // For reference, the following methods were used to calculate the values: + + // Compute a low-order mask for the characters + // between first and last, inclusive + // private static long lowMask(char first, char last) { + // long m = 0; + // int f = Math.max(Math.min(first, 63), 0); + // int l = Math.max(Math.min(last, 63), 0); + // for (int i = f; i <= l; i++) + // m |= 1L << i; + // return m; + // } + + // Compute the low-order mask for the characters in the given string + // private static long lowMask(String chars) { + // int n = chars.length(); + // long m = 0; + // for (int i = 0; i < n; i++) { + // char c = chars.charAt(i); + // if (c < 64) + // m |= (1L << c); + // } + // return m; + // } + + // Compute a high-order mask for the characters + // between first and last, inclusive + // private static long highMask(char first, char last) { + // long m = 0; + // int f = Math.max(Math.min(first, 127), 64) - 64; + // int l = Math.max(Math.min(last, 127), 64) - 64; + // for (int i = f; i <= l; i++) + // m |= 1L << i; + // return m; + // } + + // Compute the high-order mask for the characters in the given string + // private static long highMask(String chars) { + // int n = chars.length(); + // long m = 0; + // for (int i = 0; i < n; i++) { + // char c = chars.charAt(i); + // if ((c >= 64) && (c < 128)) + // m |= (1L << (c - 64)); + // } + // return m; + // } + + // Character-class masks + + // digit = "0" | "1" | "2" | "3" | "4" | "5" | "6" | "7" | + // "8" | "9" + private static final long L_DIGIT = 0x3FF000000000000L; // lowMask('0', '9'); + private static final long H_DIGIT = 0L; + + // hex = digit | "A" | "B" | "C" | "D" | "E" | "F" | + // "a" | "b" | "c" | "d" | "e" | "f" + private static final long L_HEX = L_DIGIT; + private static final long H_HEX = 0x7E0000007EL; // highMask('A', 'F') | highMask('a', 'f'); + + // upalpha = "A" | "B" | "C" | "D" | "E" | "F" | "G" | "H" | "I" | + // "J" | "K" | "L" | "M" | "N" | "O" | "P" | "Q" | "R" | + // "S" | "T" | "U" | "V" | "W" | "X" | "Y" | "Z" + private static final long L_UPALPHA = 0L; + private static final long H_UPALPHA = 0x7FFFFFEL; // highMask('A', 'Z'); + + // lowalpha = "a" | "b" | "c" | "d" | "e" | "f" | "g" | "h" | "i" | + // "j" | "k" | "l" | "m" | "n" | "o" | "p" | "q" | "r" | + // "s" | "t" | "u" | "v" | "w" | "x" | "y" | "z" + private static final long L_LOWALPHA = 0L; + private static final long H_LOWALPHA = 0x7FFFFFE00000000L; // highMask('a', 'z'); + + // alpha = lowalpha | upalpha + private static final long L_ALPHA = L_LOWALPHA | L_UPALPHA; + private static final long H_ALPHA = H_LOWALPHA | H_UPALPHA; + + // alphanum = alpha | digit + private static final long L_ALPHANUM = L_DIGIT | L_ALPHA; + private static final long H_ALPHANUM = H_DIGIT | H_ALPHA; + + // mark = "-" | "_" | "." | "!" | "~" | "*" | "'" | + // "(" | ")" + private static final long L_MARK = 0x678200000000L; // lowMask("-_.!~*'()"); + private static final long H_MARK = 0x4000000080000000L; // highMask("-_.!~*'()"); + + // unreserved = alphanum | mark + private static final long L_UNRESERVED = L_ALPHANUM | L_MARK; + private static final long H_UNRESERVED = H_ALPHANUM | H_MARK; + + // reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" | + // "$" | "," | "[" | "]" + // Added per RFC2732: "[", "]" + private static final long L_RESERVED = 0xAC00985000000000L; // lowMask(";/?:@&=+$,[]"); + private static final long H_RESERVED = 0x28000001L; // highMask(";/?:@&=+$,[]"); + + // The zero'th bit is used to indicate that escape pairs and non-US-ASCII + // characters are allowed; this is handled by the scanEscape method below. + private static final long L_ESCAPED = 1L; + private static final long H_ESCAPED = 0L; + + // uric = reserved | unreserved | escaped + private static final long L_URIC = L_RESERVED | L_UNRESERVED | L_ESCAPED; + private static final long H_URIC = H_RESERVED | H_UNRESERVED | H_ESCAPED; + + // pchar = unreserved | escaped | + // ":" | "@" | "&" | "=" | "+" | "$" | "," + private static final long L_PCHAR = L_UNRESERVED | L_ESCAPED | 0x2400185000000000L; // lowMask(":@&=+$,"); + private static final long H_PCHAR = H_UNRESERVED | H_ESCAPED | 0x1L; // highMask(":@&=+$,"); + + // All valid path characters + private static final long L_PATH = L_PCHAR | 0x800800000000000L; // lowMask(";/"); + private static final long H_PATH = H_PCHAR; // highMask(";/") == 0x0L; + + // Dash, for use in domainlabel and toplabel + private static final long L_DASH = 0x200000000000L; // lowMask("-"); + private static final long H_DASH = 0x0L; // highMask("-"); + + // userinfo = *( unreserved | escaped | + // ";" | ":" | "&" | "=" | "+" | "$" | "," ) + private static final long L_USERINFO = L_UNRESERVED | L_ESCAPED | 0x2C00185000000000L; // lowMask(";:&=+$,"); + private static final long H_USERINFO = H_UNRESERVED | H_ESCAPED; // | highMask(";:&=+$,") == 0L; + + // reg_name = 1*( unreserved | escaped | "$" | "," | + // ";" | ":" | "@" | "&" | "=" | "+" ) + private static final long L_REG_NAME = L_UNRESERVED | L_ESCAPED | 0x2C00185000000000L; // lowMask("$,;:@&=+"); + private static final long H_REG_NAME = H_UNRESERVED | H_ESCAPED | 0x1L; // highMask("$,;:@&=+"); + + // All valid characters for server-based authorities + private static final long L_SERVER = L_USERINFO | L_ALPHANUM | L_DASH | 0x400400000000000L; // lowMask(".:@[]"); + private static final long H_SERVER = H_USERINFO | H_ALPHANUM | H_DASH | 0x28000001L; // highMask(".:@[]"); + + // Characters that are encoded in the path component of a URI. + // + // These characters are reserved in the path segment as described in + // RFC2396 section 3.3: + // "=" | ";" | "?" | "/" + // + // These characters are defined as excluded in RFC2396 section 2.4.3 + // and must be escaped if they occur in the data part of a URI: + // "#" | " " | "<" | ">" | "%" | "\"" | "{" | "}" | "|" | "\\" | "^" | + // "[" | "]" | "`" + // + // Also US ASCII control characters 00-1F and 7F. + + // lowMask((char)0, (char)31) | lowMask("=;?/# <>%\"{}|\\^[]`"); + private static final long L_ENCODED = 0xF800802DFFFFFFFFL; + + // highMask((char)0x7F, (char)0x7F) | highMask("=;?/# <>%\"{}|\\^[]`"); + private static final long H_ENCODED = 0xB800000178000000L; + +} diff --git a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/Password.java b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/Password.java new file mode 100644 index 0000000000000..ffe5f734fa0ea --- /dev/null +++ b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/Password.java @@ -0,0 +1,173 @@ +/* + * Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.secure_sm.policy; + +import java.io.ByteArrayInputStream; +import java.io.Console; +import java.io.IOException; +import java.io.InputStream; +import java.io.PushbackInputStream; +import java.nio.ByteBuffer; +import java.nio.CharBuffer; +import java.nio.charset.CharsetEncoder; +import java.nio.charset.CodingErrorAction; +import java.util.Arrays; + +/** + * Adapted from: https://github.com/openjdk/jdk23u/blob/master/src/java.base/share/classes/sun/security/util/Password.java + */ +public class Password { + /** Reads user password from given input stream. */ + public static char[] readPassword(InputStream in) throws IOException { + return readPassword(in, false); + } + + /** Reads user password from given input stream. + * @param isEchoOn true if the password should be echoed on the screen + */ + @SuppressWarnings("fallthrough") + public static char[] readPassword(InputStream in, boolean isEchoOn) throws IOException { + + char[] consoleEntered = null; + byte[] consoleBytes = null; + + try { + // Use the new java.io.Console class + Console con = null; + if (!isEchoOn && in == System.in && ((con = System.console()) != null)) { + consoleEntered = con.readPassword(); + // readPassword returns "" if you just print ENTER, + // to be compatible with old Password class, change to null + if (consoleEntered != null && consoleEntered.length == 0) { + return null; + } + consoleBytes = convertToBytes(consoleEntered); + in = new ByteArrayInputStream(consoleBytes); + } + + // Rest of the lines still necessary for KeyStoreLoginModule + // and when there is no console. + + char[] lineBuffer; + char[] buf; + int i; + + buf = lineBuffer = new char[128]; + + int room = buf.length; + int offset = 0; + int c; + + boolean done = false; + while (!done) { + switch (c = in.read()) { + case -1: + case '\n': + done = true; + break; + + case '\r': + int c2 = in.read(); + if ((c2 != '\n') && (c2 != -1)) { + if (!(in instanceof PushbackInputStream)) { + in = new PushbackInputStream(in); + } + ((PushbackInputStream) in).unread(c2); + } else { + done = true; + break; + } + /* fall through */ + default: + if (--room < 0) { + buf = new char[offset + 128]; + room = buf.length - offset - 1; + System.arraycopy(lineBuffer, 0, buf, 0, offset); + Arrays.fill(lineBuffer, ' '); + lineBuffer = buf; + } + buf[offset++] = (char) c; + break; + } + } + + if (offset == 0) { + return null; + } + + char[] ret = new char[offset]; + System.arraycopy(buf, 0, ret, 0, offset); + Arrays.fill(buf, ' '); + + return ret; + } finally { + if (consoleEntered != null) { + Arrays.fill(consoleEntered, ' '); + } + if (consoleBytes != null) { + Arrays.fill(consoleBytes, (byte) 0); + } + } + } + + /** + * Change a password read from Console.readPassword() into + * its original bytes. + * + * @param pass a char[] + * @return its byte[] format, similar to new String(pass).getBytes() + */ + private static byte[] convertToBytes(char[] pass) { + if (enc == null) { + synchronized (Password.class) { + enc = System.console() + .charset() + .newEncoder() + .onMalformedInput(CodingErrorAction.REPLACE) + .onUnmappableCharacter(CodingErrorAction.REPLACE); + } + } + byte[] ba = new byte[(int) (enc.maxBytesPerChar() * pass.length)]; + ByteBuffer bb = ByteBuffer.wrap(ba); + synchronized (enc) { + enc.reset().encode(CharBuffer.wrap(pass), bb, true); + } + if (bb.position() < ba.length) { + ba[bb.position()] = '\n'; + } + return ba; + } + + private static volatile CharsetEncoder enc; +} diff --git a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyFile.java b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyFile.java new file mode 100644 index 0000000000000..14b1a8f56375c --- /dev/null +++ b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyFile.java @@ -0,0 +1,1601 @@ +/* + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.secure_sm.policy; + +import javax.security.auth.Subject; +import javax.security.auth.x500.X500Principal; + +import java.io.File; +import java.io.FilePermission; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.ObjectInputStream; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.net.MalformedURLException; +import java.net.NetPermission; +import java.net.SocketPermission; +import java.net.URI; +import java.net.URL; +import java.security.AllPermission; +import java.security.CodeSource; +import java.security.KeyStore; +import java.security.KeyStoreException; +import java.security.Permission; +import java.security.PermissionCollection; +import java.security.Permissions; +import java.security.Principal; +import java.security.ProtectionDomain; +import java.security.Security; +import java.security.SecurityPermission; +import java.security.UnresolvedPermission; +import java.security.cert.Certificate; +import java.security.cert.X509Certificate; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Enumeration; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.PropertyPermission; +import java.util.Set; +import java.util.StringTokenizer; +import java.util.concurrent.ConcurrentHashMap; + +import static java.nio.charset.StandardCharsets.UTF_8; + +/** + * Adapted from: https://github.com/openjdk/jdk23u/blob/master/src/java.base/share/classes/sun/security/provider/PolicyFile.java + */ +@SuppressWarnings("removal") +public class PolicyFile extends java.security.Policy { + private static final String SELF = "${{self}}"; + private static final String X500PRINCIPAL = "javax.security.auth.x500.X500Principal"; + private static final String POLICY = "java.security.policy"; + private static final String POLICY_URL = "policy.url."; + + private static final int DEFAULT_CACHE_SIZE = 1; + + // contains the policy grant entries, PD cache, and alias mapping + // can be updated if refresh() is called + private volatile PolicyInfo policyInfo; + + private boolean expandProperties = true; + private boolean allowSystemProperties = true; + private boolean notUtf8 = false; + private URL url; + + // for use with the reflection API + private static final Class[] PARAMS0 = {}; + private static final Class[] PARAMS1 = { String.class }; + private static final Class[] PARAMS2 = { String.class, String.class }; + + /** + * When a policy file has a syntax error, the exception code may generate + * another permission check and this can cause the policy file to be parsed + * repeatedly, leading to a StackOverflowError or ClassCircularityError. + * To avoid this, this set is populated with policy files that have been + * previously parsed and have syntax errors, so that they can be + * subsequently ignored. + */ + private static Set badPolicyURLs = Collections.newSetFromMap(new ConcurrentHashMap()); + + /** + * Initializes the Policy object and reads the default policy + * configuration file(s) into the Policy object. + */ + public PolicyFile() { + init((URL) null); + } + + /** + * Initializes the Policy object and reads the default policy + * from the specified URL only. + */ + public PolicyFile(URL url) { + this.url = url; + init(url); + } + + /** + * Initializes the Policy object and reads the default policy + * configuration file(s) into the Policy object. + * + * See the class description for details on the algorithm used to + * initialize the Policy object. + */ + private void init(URL url) { + int numCaches = DEFAULT_CACHE_SIZE; + PolicyInfo newInfo = new PolicyInfo(numCaches); + initPolicyFile(newInfo, url); + policyInfo = newInfo; + } + + private void initPolicyFile(final PolicyInfo newInfo, final URL url) { + if (url != null) { + + /** + * If the caller specified a URL via Policy.getInstance, + * we only read from default.policy and that URL. + */ + + if (init(url, newInfo) == false) { + // use static policy if all else fails + initStaticPolicy(newInfo); + } + + } else { + + /** + * Caller did not specify URL via Policy.getInstance. + * Read from URLs listed in the java.security properties file. + */ + + boolean loaded_one = initPolicyFile(POLICY, POLICY_URL, newInfo); + // To maintain strict backward compatibility + // we load the static policy only if POLICY load failed + if (!loaded_one) { + // use static policy if all else fails + initStaticPolicy(newInfo); + } + } + } + + private boolean initPolicyFile(final String propname, final String urlname, final PolicyInfo newInfo) { + boolean loaded_policy = false; + + if (allowSystemProperties) { + String extra_policy = System.getProperty(propname); + if (extra_policy != null) { + boolean overrideAll = false; + if (extra_policy.startsWith("=")) { + overrideAll = true; + extra_policy = extra_policy.substring(1); + } + try { + extra_policy = PropertyExpander.expand(extra_policy); + URL policyURL; + + File policyFile = new File(extra_policy); + if (policyFile.exists()) { + policyURL = ParseUtil.fileToEncodedURL(new File(policyFile.getCanonicalPath())); + } else { + policyURL = newURL(extra_policy); + } + if (init(policyURL, newInfo)) { + loaded_policy = true; + } + } catch (Exception e) {} + if (overrideAll) { + return Boolean.valueOf(loaded_policy); + } + } + } + + int n = 1; + String policy_uri; + + while ((policy_uri = Security.getProperty(urlname + n)) != null) { + try { + URL policy_url = null; + String expanded_uri = PropertyExpander.expand(policy_uri).replace(File.separatorChar, '/'); + + if (policy_uri.startsWith("file:${java.home}/") || policy_uri.startsWith("file:${user.home}/")) { + + // this special case accommodates + // the situation java.home/user.home + // expand to a single slash, resulting in + // a file://foo URI + policy_url = new File(expanded_uri.substring(5)).toURI().toURL(); + } else { + policy_url = new URI(expanded_uri).toURL(); + } + + if (init(policy_url, newInfo)) { + loaded_policy = true; + } + } catch (Exception e) { + // ignore that policy + } + n++; + } + return Boolean.valueOf(loaded_policy); + } + + /** + * Reads a policy configuration into the Policy object using a + * Reader object. + */ + private boolean init(URL policy, PolicyInfo newInfo) { + + // skip parsing policy file if it has been previously parsed and + // has syntax errors + if (badPolicyURLs.contains(policy)) { + return false; + } + + try (InputStreamReader isr = getInputStreamReader(PolicyUtil.getInputStream(policy))) { + + PolicyParser pp = new PolicyParser(expandProperties); + pp.read(isr); + + KeyStore keyStore = null; + try { + keyStore = PolicyUtil.getKeyStore( + policy, + pp.getKeyStoreUrl(), + pp.getKeyStoreType(), + pp.getKeyStoreProvider(), + pp.getStorePassURL() + ); + } catch (Exception e) { + // ignore, treat it like we have no keystore + } + + Enumeration enum_ = pp.grantElements(); + while (enum_.hasMoreElements()) { + PolicyParser.GrantEntry ge = enum_.nextElement(); + addGrantEntry(ge, keyStore, newInfo); + } + return true; + } catch (PolicyParser.ParsingException pe) { + // record bad policy file to avoid later reparsing it + badPolicyURLs.add(policy); + pe.printStackTrace(System.err); + } catch (Exception e) {} + + return false; + } + + private InputStreamReader getInputStreamReader(InputStream is) { + /* + * Read in policy using UTF-8 by default. + * + * Check non-standard system property to see if the default encoding + * should be used instead. + */ + return (notUtf8) ? new InputStreamReader(is) : new InputStreamReader(is, UTF_8); + } + + private void initStaticPolicy(final PolicyInfo newInfo) { + PolicyEntry pe = new PolicyEntry(new CodeSource(null, (Certificate[]) null)); + pe.add(SecurityConstants.LOCAL_LISTEN_PERMISSION); + pe.add(new PropertyPermission("java.version", SecurityConstants.PROPERTY_READ_ACTION)); + pe.add(new PropertyPermission("java.vendor", SecurityConstants.PROPERTY_READ_ACTION)); + pe.add(new PropertyPermission("java.vendor.url", SecurityConstants.PROPERTY_READ_ACTION)); + pe.add(new PropertyPermission("java.class.version", SecurityConstants.PROPERTY_READ_ACTION)); + pe.add(new PropertyPermission("os.name", SecurityConstants.PROPERTY_READ_ACTION)); + pe.add(new PropertyPermission("os.version", SecurityConstants.PROPERTY_READ_ACTION)); + pe.add(new PropertyPermission("os.arch", SecurityConstants.PROPERTY_READ_ACTION)); + pe.add(new PropertyPermission("file.separator", SecurityConstants.PROPERTY_READ_ACTION)); + pe.add(new PropertyPermission("path.separator", SecurityConstants.PROPERTY_READ_ACTION)); + pe.add(new PropertyPermission("line.separator", SecurityConstants.PROPERTY_READ_ACTION)); + pe.add(new PropertyPermission("java.specification.version", SecurityConstants.PROPERTY_READ_ACTION)); + pe.add(new PropertyPermission("java.specification.maintenance.version", SecurityConstants.PROPERTY_READ_ACTION)); + pe.add(new PropertyPermission("java.specification.vendor", SecurityConstants.PROPERTY_READ_ACTION)); + pe.add(new PropertyPermission("java.specification.name", SecurityConstants.PROPERTY_READ_ACTION)); + pe.add(new PropertyPermission("java.vm.specification.version", SecurityConstants.PROPERTY_READ_ACTION)); + pe.add(new PropertyPermission("java.vm.specification.vendor", SecurityConstants.PROPERTY_READ_ACTION)); + pe.add(new PropertyPermission("java.vm.specification.name", SecurityConstants.PROPERTY_READ_ACTION)); + pe.add(new PropertyPermission("java.vm.version", SecurityConstants.PROPERTY_READ_ACTION)); + pe.add(new PropertyPermission("java.vm.vendor", SecurityConstants.PROPERTY_READ_ACTION)); + pe.add(new PropertyPermission("java.vm.name", SecurityConstants.PROPERTY_READ_ACTION)); + + // No need to sync because no one has access to newInfo yet + newInfo.policyEntries.add(pe); + } + + /** + * Given a GrantEntry, create a codeSource. + * + * @return null if signedBy alias is not recognized + */ + private CodeSource getCodeSource(PolicyParser.GrantEntry ge, KeyStore keyStore, PolicyInfo newInfo) + throws java.net.MalformedURLException { + Certificate[] certs = null; + if (ge.signedBy != null) { + certs = getCertificates(keyStore, ge.signedBy, newInfo); + if (certs == null) { + return null; + } + } + + URL location; + + if (ge.codeBase != null) location = newURL(ge.codeBase); + else location = null; + + return (canonicalizeCodebase(new CodeSource(location, certs), false)); + } + + /** + * Add one policy entry to the list. + */ + private void addGrantEntry(PolicyParser.GrantEntry ge, KeyStore keyStore, PolicyInfo newInfo) { + + try { + CodeSource codesource = getCodeSource(ge, keyStore, newInfo); + // skip if signedBy alias was unknown... + if (codesource == null) return; + + // perform keystore alias principal replacement. + // for example, if alias resolves to X509 certificate, + // replace principal with: + // -- skip if alias is unknown + if (replacePrincipals(ge.principals, keyStore) == false) return; + PolicyEntry entry = new PolicyEntry(codesource, ge.principals); + Enumeration enum_ = ge.permissionElements(); + while (enum_.hasMoreElements()) { + PolicyParser.PermissionEntry pe = enum_.nextElement(); + + try { + // perform ${{ ... }} expansions within permission name + expandPermissionName(pe, keyStore); + + // XXX special case PrivateCredentialPermission-SELF + Permission perm; + if (pe.permission.equals("javax.security.auth.PrivateCredentialPermission") && pe.name.endsWith(" self")) { + pe.name = pe.name.substring(0, pe.name.indexOf("self")) + SELF; + } + // check for self + if (pe.name != null && pe.name.contains(SELF)) { + // Create a "SelfPermission" , it could be an + // an unresolved permission which will be resolved + // when implies is called + // Add it to entry + Certificate[] certs; + if (pe.signedBy != null) { + certs = getCertificates(keyStore, pe.signedBy, newInfo); + } else { + certs = null; + } + perm = new SelfPermission(pe.permission, pe.name, pe.action, certs); + } else { + perm = getInstance(pe.permission, pe.name, pe.action); + } + entry.add(perm); + } catch (ClassNotFoundException cnfe) { + Certificate[] certs; + if (pe.signedBy != null) { + certs = getCertificates(keyStore, pe.signedBy, newInfo); + } else { + certs = null; + } + + // only add if we had no signer or we had + // a signer and found the keys for it. + if (certs != null || pe.signedBy == null) { + Permission perm = new UnresolvedPermission(pe.permission, pe.name, pe.action, certs); + entry.add(perm); + } + } catch (java.lang.reflect.InvocationTargetException ite) { + ite.printStackTrace(System.err); + } catch (Exception e) { + e.printStackTrace(System.err); + } + } + + // No need to sync because no one has access to newInfo yet + newInfo.policyEntries.add(entry); + } catch (Exception e) { + e.printStackTrace(System.err); + } + } + + /** + * Returns a new Permission object of the given Type. The Permission is + * created by getting the + * Class object using the Class.forName method, and using + * the reflection API to invoke the (String name, String actions) + * constructor on the + * object. + * + * @param type the type of Permission being created. + * @param name the name of the Permission being created. + * @param actions the actions of the Permission being created. + * + * @exception ClassNotFoundException if the particular Permission + * class could not be found. + * + * @exception IllegalAccessException if the class or initializer is + * not accessible. + * + * @exception InstantiationException if getInstance tries to + * instantiate an abstract class or an interface, or if the + * instantiation fails for some other reason. + * + * @exception NoSuchMethodException if the (String, String) constructor + * is not found. + * + * @exception InvocationTargetException if the underlying Permission + * constructor throws an exception. + * + */ + + private static final Permission getInstance(String type, String name, String actions) throws ClassNotFoundException, + InstantiationException, IllegalAccessException, NoSuchMethodException, InvocationTargetException { + Class pc = Class.forName(type, false, null); + Permission answer = getKnownPermission(pc, name, actions); + if (answer != null) { + return answer; + } + if (!Permission.class.isAssignableFrom(pc)) { + // not the right subtype + throw new ClassCastException(type + " is not a Permission"); + } + + if (name == null && actions == null) { + try { + Constructor c = pc.getConstructor(PARAMS0); + return (Permission) c.newInstance(new Object[] {}); + } catch (NoSuchMethodException ne) { + try { + Constructor c = pc.getConstructor(PARAMS1); + return (Permission) c.newInstance(new Object[] { name }); + } catch (NoSuchMethodException ne1) { + Constructor c = pc.getConstructor(PARAMS2); + return (Permission) c.newInstance(new Object[] { name, actions }); + } + } + } else { + if (name != null && actions == null) { + try { + Constructor c = pc.getConstructor(PARAMS1); + return (Permission) c.newInstance(new Object[] { name }); + } catch (NoSuchMethodException ne) { + Constructor c = pc.getConstructor(PARAMS2); + return (Permission) c.newInstance(new Object[] { name, actions }); + } + } else { + Constructor c = pc.getConstructor(PARAMS2); + return (Permission) c.newInstance(new Object[] { name, actions }); + } + } + } + + /** + * Creates one of the well-known permissions in the java.base module + * directly instead of via reflection. Keep list short to not penalize + * permissions from other modules. + */ + private static Permission getKnownPermission(Class claz, String name, String actions) { + if (claz.equals(FilePermission.class)) { + return new FilePermission(name, actions); + } else if (claz.equals(SocketPermission.class)) { + return new SocketPermission(name, actions); + } else if (claz.equals(RuntimePermission.class)) { + return new RuntimePermission(name, actions); + } else if (claz.equals(PropertyPermission.class)) { + return new PropertyPermission(name, actions); + } else if (claz.equals(NetPermission.class)) { + return new NetPermission(name, actions); + } else if (claz.equals(AllPermission.class)) { + return SecurityConstants.ALL_PERMISSION; + } else if (claz.equals(SecurityPermission.class)) { + return new SecurityPermission(name, actions); + } else { + return null; + } + } + + /** + * Creates one of the well-known principals in the java.base module + * directly instead of via reflection. Keep list short to not penalize + * principals from other modules. + */ + private static Principal getKnownPrincipal(Class claz, String name) { + if (claz.equals(X500Principal.class)) { + return new X500Principal(name); + } else { + return null; + } + } + + /** + * Fetch all certs associated with this alias. + */ + private Certificate[] getCertificates(KeyStore keyStore, String aliases, PolicyInfo newInfo) { + + List vcerts = null; + + StringTokenizer st = new StringTokenizer(aliases, ","); + int n = 0; + + while (st.hasMoreTokens()) { + String alias = st.nextToken().trim(); + n++; + Certificate cert = null; + // See if this alias's cert has already been cached + synchronized (newInfo.aliasMapping) { + cert = (Certificate) newInfo.aliasMapping.get(alias); + + if (cert == null && keyStore != null) { + + try { + cert = keyStore.getCertificate(alias); + } catch (KeyStoreException kse) { + // never happens, because keystore has already been loaded + // when we call this + } + if (cert != null) { + newInfo.aliasMapping.put(alias, cert); + newInfo.aliasMapping.put(cert, alias); + } + } + } + + if (cert != null) { + if (vcerts == null) vcerts = new ArrayList<>(); + vcerts.add(cert); + } + } + + // make sure n == vcerts.size, since we are doing a logical *and* + if (vcerts != null && n == vcerts.size()) { + Certificate[] certs = new Certificate[vcerts.size()]; + vcerts.toArray(certs); + return certs; + } else { + return null; + } + } + + /** + * Refreshes the policy object by re-reading all the policy files. + */ + @Override + public void refresh() { + init(url); + } + + /** + * Evaluates the global policy for the permissions granted to + * the ProtectionDomain and tests whether the permission is + * granted. + * + * @param pd the ProtectionDomain to test + * @param p the Permission object to be tested for implication. + * + * @return true if "permission" is a proper subset of a permission + * granted to this ProtectionDomain. + * + * @see java.security.ProtectionDomain + */ + @Override + public boolean implies(ProtectionDomain pd, Permission p) { + PermissionCollection pc = getPermissions(pd); + if (pc == null) { + return false; + } + + // cache mapping of protection domain to its PermissionCollection + return pc.implies(p); + } + + /** + * Examines this Policy and returns the permissions granted + * to the specified ProtectionDomain. This includes + * the permissions currently associated with the domain as well + * as the policy permissions granted to the domain's + * CodeSource, ClassLoader, and Principals. + * + *

Note that this Policy implementation has + * special handling for PrivateCredentialPermissions. + * When this method encounters a PrivateCredentialPermission + * which specifies "self" as the Principal class and name, + * it does not add that Permission to the returned + * PermissionCollection. Instead, it builds + * a new PrivateCredentialPermission + * for each Principal associated with the provided + * Subject. Each new PrivateCredentialPermission + * contains the same Credential class as specified in the + * originally granted permission, as well as the Class and name + * for the respective Principal. + * + * @param domain the Permissions granted to this + * ProtectionDomain are returned. + * + * @return the Permissions granted to the provided + * ProtectionDomain. + */ + @Override + public PermissionCollection getPermissions(ProtectionDomain domain) { + Permissions perms = new Permissions(); + + if (domain == null) return perms; + + // first get policy perms + getPermissions(perms, domain); + + // add static perms + // - adding static perms after policy perms is necessary + // to avoid a regression for 4301064 + PermissionCollection pc = domain.getPermissions(); + if (pc != null) { + synchronized (pc) { + Enumeration e = pc.elements(); + while (e.hasMoreElements()) { + perms.add(e.nextElement()); + } + } + } + + return perms; + } + + /** + * Examines this Policy and creates a PermissionCollection object with + * the set of permissions for the specified CodeSource. + * + * @param codesource the CodeSource associated with the caller. + * This encapsulates the original location of the code (where the code + * came from) and the public key(s) of its signer. + * + * @return the set of permissions according to the policy. + */ + @Override + public PermissionCollection getPermissions(CodeSource codesource) { + return getPermissions(new Permissions(), codesource); + } + + /** + * Examines the global policy and returns the provided Permissions + * object with additional permissions granted to the specified + * ProtectionDomain. + * + * @param perms the Permissions to populate + * @param pd the ProtectionDomain associated with the caller. + * + * @return the set of Permissions according to the policy. + */ + private PermissionCollection getPermissions(Permissions perms, ProtectionDomain pd) { + final CodeSource cs = pd.getCodeSource(); + if (cs == null) return perms; + + CodeSource canonCodeSource = canonicalizeCodebase(cs, true); + return getPermissions(perms, canonCodeSource, pd.getPrincipals()); + } + + /** + * Examines the global policy and returns the provided Permissions + * object with additional permissions granted to the specified + * CodeSource. + * + * @param perms the permissions to populate + * @param cs the codesource associated with the caller. + * This encapsulates the original location of the code (where the code + * came from) and the public key(s) of its signer. + * + * @return the set of permissions according to the policy. + */ + private PermissionCollection getPermissions(Permissions perms, final CodeSource cs) { + + if (cs == null) return perms; + + CodeSource canonCodeSource = canonicalizeCodebase(cs, true); + return getPermissions(perms, canonCodeSource, null); + } + + private Permissions getPermissions(Permissions perms, final CodeSource cs, Principal[] principals) { + for (PolicyEntry entry : policyInfo.policyEntries) { + addPermissions(perms, cs, principals, entry); + } + + return perms; + } + + private void addPermissions(Permissions perms, final CodeSource cs, Principal[] principals, final PolicyEntry entry) { + + // check to see if the CodeSource implies + Boolean imp = entry.getCodeSource().implies(cs); + if (!imp.booleanValue()) { + // CodeSource does not imply - return and try next policy entry + return; + } + + // check to see if the Principals imply + + List entryPs = entry.getPrincipals(); + + if (entryPs == null || entryPs.isEmpty()) { + + // policy entry has no principals - + // add perms regardless of principals in current ACC + + addPerms(perms, principals, entry); + return; + + } else if (principals == null || principals.length == 0) { + + // current thread has no principals but this policy entry + // has principals - perms are not added + + return; + } + + // current thread has principals and this policy entry + // has principals. see if policy entry principals match + // principals in current ACC + + for (PolicyParser.PrincipalEntry pppe : entryPs) { + + // Check for wildcards + if (pppe.isWildcardClass()) { + // a wildcard class matches all principals in current ACC + continue; + } + + if (pppe.isWildcardName()) { + // a wildcard name matches any principal with the same class + if (wildcardPrincipalNameImplies(pppe.principalClass, principals)) { + continue; + } + // policy entry principal not in current ACC - + // immediately return and go to next policy entry + return; + } + + Set pSet = new HashSet<>(Arrays.asList(principals)); + Subject subject = new Subject(true, pSet, Collections.EMPTY_SET, Collections.EMPTY_SET); + try { + ClassLoader cl = Thread.currentThread().getContextClassLoader(); + Class pClass = Class.forName(pppe.principalClass, false, cl); + Principal p = getKnownPrincipal(pClass, pppe.principalName); + if (p == null) { + if (!Principal.class.isAssignableFrom(pClass)) { + // not the right subtype + throw new ClassCastException(pppe.principalClass + " is not a Principal"); + } + + Constructor c = pClass.getConstructor(PARAMS1); + p = (Principal) c.newInstance(new Object[] { pppe.principalName }); + + } + + // check if the Principal implies the current + // thread's principals + if (!p.implies(subject)) { + // policy principal does not imply the current Subject - + // immediately return and go to next policy entry + return; + } + } catch (Exception e) { + // fall back to default principal comparison. + // see if policy entry principal is in current ACC + + if (!pppe.implies(subject)) { + // policy entry principal not in current ACC - + // immediately return and go to next policy entry + return; + } + } + + // either the principal information matched, + // or the Principal.implies succeeded. + // continue loop and test the next policy principal + } + + // all policy entry principals were found in the current ACC - + // grant the policy permissions + + addPerms(perms, principals, entry); + } + + /** + * Returns true if the array of principals contains at least one + * principal of the specified class. + */ + private static boolean wildcardPrincipalNameImplies(String principalClass, Principal[] principals) { + for (Principal p : principals) { + if (principalClass.equals(p.getClass().getName())) { + return true; + } + } + return false; + } + + private void addPerms(Permissions perms, Principal[] accPs, PolicyEntry entry) { + for (int i = 0; i < entry.permissions.size(); i++) { + Permission p = entry.permissions.get(i); + + if (p instanceof SelfPermission) { + // handle "SELF" permissions + expandSelf((SelfPermission) p, entry.getPrincipals(), accPs, perms); + } else { + perms.add(p); + } + } + } + + /** + * @param sp the SelfPermission that needs to be expanded. + * + * @param entryPs list of principals for the Policy entry. + * + * @param pdp Principal array from the current ProtectionDomain. + * + * @param perms the PermissionCollection where the individual + * Permissions will be added after expansion. + */ + + private void expandSelf(SelfPermission sp, List entryPs, Principal[] pdp, Permissions perms) { + + if (entryPs == null || entryPs.isEmpty()) { + return; + } + int startIndex = 0; + int v; + StringBuilder sb = new StringBuilder(); + while ((v = sp.getSelfName().indexOf(SELF, startIndex)) != -1) { + + // add non-SELF string + sb.append(sp.getSelfName().substring(startIndex, v)); + + // expand SELF + Iterator pli = entryPs.iterator(); + while (pli.hasNext()) { + PolicyParser.PrincipalEntry pppe = pli.next(); + String[][] principalInfo = getPrincipalInfo(pppe, pdp); + for (int i = 0; i < principalInfo.length; i++) { + if (i != 0) { + sb.append(", "); + } + sb.append(principalInfo[i][0] + " " + "\"" + principalInfo[i][1] + "\""); + } + if (pli.hasNext()) { + sb.append(", "); + } + } + startIndex = v + SELF.length(); + } + // add remaining string (might be the entire string) + sb.append(sp.getSelfName().substring(startIndex)); + + try { + // first try to instantiate the permission + perms.add(getInstance(sp.getSelfType(), sb.toString(), sp.getSelfActions())); + } catch (ClassNotFoundException cnfe) { + // ok, the permission is not in the bootclasspath. + // before we add an UnresolvedPermission, check to see + // whether this perm already belongs to the collection. + // if so, use that perm's ClassLoader to create a new + // one. + Class pc = null; + synchronized (perms) { + Enumeration e = perms.elements(); + while (e.hasMoreElements()) { + Permission pElement = e.nextElement(); + if (pElement.getClass().getName().equals(sp.getSelfType())) { + pc = pElement.getClass(); + break; + } + } + } + if (pc == null) { + // create an UnresolvedPermission + perms.add(new UnresolvedPermission(sp.getSelfType(), sb.toString(), sp.getSelfActions(), sp.getCerts())); + } else { + try { + // we found an instantiated permission. + // use its class loader to instantiate a new permission. + Constructor c; + // name parameter can not be null + if (sp.getSelfActions() == null) { + try { + c = pc.getConstructor(PARAMS1); + perms.add((Permission) c.newInstance(new Object[] { sb.toString() })); + } catch (NoSuchMethodException ne) { + c = pc.getConstructor(PARAMS2); + perms.add((Permission) c.newInstance(new Object[] { sb.toString(), sp.getSelfActions() })); + } + } else { + c = pc.getConstructor(PARAMS2); + perms.add((Permission) c.newInstance(new Object[] { sb.toString(), sp.getSelfActions() })); + } + } catch (Exception nme) {} + } + } catch (Exception e) {} + } + + /** + * return the principal class/name pair in the 2D array. + * array[x][y]: x corresponds to the array length. + * if (y == 0), it's the principal class. + * if (y == 1), it's the principal name. + */ + private String[][] getPrincipalInfo(PolicyParser.PrincipalEntry pe, Principal[] pdp) { + + // there are 3 possibilities: + // 1) the entry's Principal class and name are not wildcarded + // 2) the entry's Principal name is wildcarded only + // 3) the entry's Principal class and name are wildcarded + + if (!pe.isWildcardClass() && !pe.isWildcardName()) { + + // build an info array for the principal + // from the Policy entry + String[][] info = new String[1][2]; + info[0][0] = pe.principalClass; + info[0][1] = pe.principalName; + return info; + + } else if (!pe.isWildcardClass() && pe.isWildcardName()) { + + // build an info array for every principal + // in the current domain which has a principal class + // that is equal to policy entry principal class name + List plist = new ArrayList<>(); + for (int i = 0; i < pdp.length; i++) { + if (pe.principalClass.equals(pdp[i].getClass().getName())) plist.add(pdp[i]); + } + String[][] info = new String[plist.size()][2]; + int i = 0; + for (Principal p : plist) { + info[i][0] = p.getClass().getName(); + info[i][1] = p.getName(); + i++; + } + return info; + + } else { + + // build an info array for every + // one of the current Domain's principals + + String[][] info = new String[pdp.length][2]; + + for (int i = 0; i < pdp.length; i++) { + info[i][0] = pdp[i].getClass().getName(); + info[i][1] = pdp[i].getName(); + } + return info; + } + } + + /* + * Returns the signer certificates from the list of certificates + * associated with the given code source. + * + * The signer certificates are those certificates that were used + * to verify signed code originating from the codesource location. + * + * This method assumes that in the given code source, each signer + * certificate is followed by its supporting certificate chain + * (which may be empty), and that the signer certificate and its + * supporting certificate chain are ordered bottom-to-top + * (i.e., with the signer certificate first and the (root) certificate + * authority last). + */ + protected Certificate[] getSignerCertificates(CodeSource cs) { + Certificate[] certs = null; + if ((certs = cs.getCertificates()) == null) return null; + for (int i = 0; i < certs.length; i++) { + if (!(certs[i] instanceof X509Certificate)) return cs.getCertificates(); + } + + // Do we have to do anything? + int i = 0; + int count = 0; + while (i < certs.length) { + count++; + while (((i + 1) < certs.length) + && ((X509Certificate) certs[i]).getIssuerX500Principal() + .equals(((X509Certificate) certs[i + 1]).getSubjectX500Principal())) { + i++; + } + i++; + } + if (count == certs.length) + // Done + return certs; + + List userCertList = new ArrayList<>(); + i = 0; + while (i < certs.length) { + userCertList.add(certs[i]); + while (((i + 1) < certs.length) + && ((X509Certificate) certs[i]).getIssuerX500Principal() + .equals(((X509Certificate) certs[i + 1]).getSubjectX500Principal())) { + i++; + } + i++; + } + Certificate[] userCerts = new Certificate[userCertList.size()]; + userCertList.toArray(userCerts); + return userCerts; + } + + private CodeSource canonicalizeCodebase(CodeSource cs, boolean extractSignerCerts) { + + String path = null; + + CodeSource canonCs = cs; + URL u = cs.getLocation(); + if (u != null) { + if (u.getProtocol().equals("jar")) { + // unwrap url embedded inside jar url + String spec = u.getFile(); + int separator = spec.indexOf("!/"); + if (separator != -1) { + try { + u = newURL(spec.substring(0, separator)); + } catch (MalformedURLException e) { + // Fail silently. In this case, url stays what + // it was above + } + } + } + if (u.getProtocol().equals("file")) { + boolean isLocalFile = false; + String host = u.getHost(); + isLocalFile = (host == null || host.isEmpty() || host.equals("~") || host.equalsIgnoreCase("localhost")); + + if (isLocalFile) { + path = u.getFile().replace('/', File.separatorChar); + path = ParseUtil.decode(path); + } + } + } + + if (path != null) { + try { + URL csUrl = null; + path = canonPath(path); + csUrl = ParseUtil.fileToEncodedURL(new File(path)); + + if (extractSignerCerts) { + canonCs = new CodeSource(csUrl, getSignerCertificates(cs)); + } else { + canonCs = new CodeSource(csUrl, cs.getCertificates()); + } + } catch (IOException ioe) { + // leave codesource as it is, unless we have to extract its + // signer certificates + if (extractSignerCerts) { + canonCs = new CodeSource(cs.getLocation(), getSignerCertificates(cs)); + } + } + } else { + if (extractSignerCerts) { + canonCs = new CodeSource(cs.getLocation(), getSignerCertificates(cs)); + } + } + return canonCs; + } + + // Wrapper to return a canonical path that avoids calling getCanonicalPath() + // with paths that are intended to match all entries in the directory + private static String canonPath(String path) throws IOException { + if (path.endsWith("*")) { + path = path.substring(0, path.length() - 1) + "-"; + path = new File(path).getCanonicalPath(); + return path.substring(0, path.length() - 1) + "*"; + } else { + return new File(path).getCanonicalPath(); + } + } + + /** + * return true if no replacement was performed, + * or if replacement succeeded. + */ + private boolean replacePrincipals(List principals, KeyStore keystore) { + + if (principals == null || principals.isEmpty() || keystore == null) return true; + + for (PolicyParser.PrincipalEntry pppe : principals) { + if (pppe.isReplaceName()) { + + // perform replacement + // (only X509 replacement is possible now) + String name; + if ((name = getDN(pppe.principalName, keystore)) == null) { + return false; + } + + pppe.principalClass = X500PRINCIPAL; + pppe.principalName = name; + } + } + // return true if no replacement was performed, + // or if replacement succeeded + return true; + } + + private void expandPermissionName(PolicyParser.PermissionEntry pe, KeyStore keystore) throws Exception { + // short cut the common case + if (pe.name == null || pe.name.indexOf("${{", 0) == -1) { + return; + } + + int startIndex = 0; + int b, e; + StringBuilder sb = new StringBuilder(); + while ((b = pe.name.indexOf("${{", startIndex)) != -1) { + e = pe.name.indexOf("}}", b); + if (e < 1) { + break; + } + sb.append(pe.name.substring(startIndex, b)); + + // get the value in ${{...}} + String value = pe.name.substring(b + 3, e); + + // parse up to the first ':' + int colonIndex; + String prefix = value; + String suffix; + if ((colonIndex = value.indexOf(':')) != -1) { + prefix = value.substring(0, colonIndex); + } + + // handle different prefix possibilities + if (prefix.equalsIgnoreCase("self")) { + // do nothing - handled later + sb.append(pe.name.substring(b, e + 2)); + startIndex = e + 2; + continue; + } else if (prefix.equalsIgnoreCase("alias")) { + // get the suffix and perform keystore alias replacement + if (colonIndex == -1) { + throw new Exception("Alias name not provided pe.name: " + pe.name); + } + suffix = value.substring(colonIndex + 1); + if ((suffix = getDN(suffix, keystore)) == null) { + throw new Exception("Unable to perform substitution on alias suffix: " + value.substring(colonIndex + 1)); + } + + sb.append(X500PRINCIPAL + " \"" + suffix + "\""); + startIndex = e + 2; + } else { + throw new Exception("Substitution value prefix unsupported: " + prefix); + } + } + + // copy the rest of the value + sb.append(pe.name.substring(startIndex)); + + pe.name = sb.toString(); + } + + private String getDN(String alias, KeyStore keystore) { + Certificate cert = null; + try { + cert = keystore.getCertificate(alias); + } catch (Exception e) { + return null; + } + + if (!(cert instanceof X509Certificate x509Cert)) { + return null; + } else { + // 4702543: X500 names with an EmailAddress + // were encoded incorrectly. create new + // X500Principal name with correct encoding + + X500Principal p = new X500Principal(x509Cert.getSubjectX500Principal().toString()); + return p.getName(); + } + } + + /** + * Each entry in the policy configuration file is represented by a + * PolicyEntry object.

+ * + * A PolicyEntry is a (CodeSource,Permission) pair. The + * CodeSource contains the (URL, PublicKey) that together identify + * where the Java bytecodes come from and who (if anyone) signed + * them. The URL could refer to localhost. The URL could also be + * null, meaning that this policy entry is given to all comers, as + * long as they match the signer field. The signer could be null, + * meaning the code is not signed.

+ * + * The Permission contains the (Type, Name, Action) triplet.

+ * + * For now, the Policy object retrieves the public key from the + * X.509 certificate on disk that corresponds to the signedBy + * alias specified in the Policy config file. For reasons of + * efficiency, the Policy object keeps a hashtable of certs already + * read in. This could be replaced by a secure internal key + * store. + * + *

+ * For example, the entry + *

+     *          permission java.io.File "/tmp", "read,write",
+     *          signedBy "Duke";
+     * 
+ * is represented internally + *
+     *
+     * FilePermission f = new FilePermission("/tmp", "read,write");
+     * PublicKey p = publickeys.get("Duke");
+     * URL u = InetAddress.getLocalHost();
+     * CodeBase c = new CodeBase( p, u );
+     * pe = new PolicyEntry(f, c);
+     * 
+ * + * @author Marianne Mueller + * @author Roland Schemers + * @see java.security.CodeSource + * @see java.security.Policy + * @see java.security.Permissions + * @see java.security.ProtectionDomain + */ + private static class PolicyEntry { + + private final CodeSource codesource; + final List permissions; + private final List principals; + + /** + * Given a Permission and a CodeSource, create a policy entry. + * + * XXX Decide if/how to add validity fields and "purpose" fields to + * XXX policy entries + * + * @param cs the CodeSource, which encapsulates the URL and the + * public key + * attributes from the policy config file. Validity checks + * are performed on the public key before PolicyEntry is + * called. + * + */ + PolicyEntry(CodeSource cs, List principals) { + this.codesource = cs; + this.permissions = new ArrayList(); + this.principals = principals; // can be null + } + + PolicyEntry(CodeSource cs) { + this(cs, null); + } + + List getPrincipals() { + return principals; // can be null + } + + /** + * add a Permission object to this entry. + * No need to sync add op because perms are added to entry only + * while entry is being initialized + */ + void add(Permission p) { + permissions.add(p); + } + + /** + * Return the CodeSource for this policy entry + */ + CodeSource getCodeSource() { + return codesource; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("{"); + sb.append(getCodeSource()); + sb.append("\n"); + for (int j = 0; j < permissions.size(); j++) { + Permission p = permissions.get(j); + sb.append(" "); + sb.append(" "); + sb.append(p); + sb.append("\n"); + } + sb.append("}"); + sb.append("\n"); + return sb.toString(); + } + } + + private static class SelfPermission extends Permission { + + @java.io.Serial + private static final long serialVersionUID = -8315562579967246806L; + + /** + * The class name of the Permission class that will be + * created when this self permission is expanded . + * + * @serial + */ + private String type; + + /** + * The permission name. + * + * @serial + */ + private String name; + + /** + * The actions of the permission. + * + * @serial + */ + private String actions; + + /** + * The certs of the permission. + * + * @serial + */ + private Certificate[] certs; + + /** + * Creates a new SelfPermission containing the permission + * information needed later to expand the self + * @param type the class name of the Permission class that will be + * created when this permission is expanded and if necessary resolved. + * @param name the name of the permission. + * @param actions the actions of the permission. + * @param certs the certificates the permission's class was signed with. + * This is a list of certificate chains, where each chain is composed of + * a signer certificate and optionally its supporting certificate chain. + * Each chain is ordered bottom-to-top (i.e., with the signer + * certificate first and the (root) certificate authority last). + */ + public SelfPermission(String type, String name, String actions, Certificate[] certs) { + super(type); + if (type == null) { + throw new NullPointerException("Ttype cannot be null"); + } + this.type = type; + this.name = name; + this.actions = actions; + if (certs != null) { + // Extract the signer certs from the list of certificates. + for (int i = 0; i < certs.length; i++) { + if (!(certs[i] instanceof X509Certificate)) { + // there is no concept of signer certs, so we store the + // entire cert array + this.certs = certs.clone(); + break; + } + } + + if (this.certs == null) { + // Go through the list of certs and see if all the certs are + // signer certs. + int i = 0; + int count = 0; + while (i < certs.length) { + count++; + while (((i + 1) < certs.length) + && ((X509Certificate) certs[i]).getIssuerX500Principal() + .equals(((X509Certificate) certs[i + 1]).getSubjectX500Principal())) { + i++; + } + i++; + } + if (count == certs.length) { + // All the certs are signer certs, so we store the + // entire array + this.certs = certs.clone(); + } + + if (this.certs == null) { + // extract the signer certs + List signerCerts = new ArrayList<>(); + i = 0; + while (i < certs.length) { + signerCerts.add(certs[i]); + while (((i + 1) < certs.length) + && ((X509Certificate) certs[i]).getIssuerX500Principal() + .equals(((X509Certificate) certs[i + 1]).getSubjectX500Principal())) { + i++; + } + i++; + } + this.certs = new Certificate[signerCerts.size()]; + signerCerts.toArray(this.certs); + } + } + } + } + + /** + * This method always returns false for SelfPermission permissions. + * That is, an SelfPermission never considered to + * imply another permission. + * + * @param p the permission to check against. + * + * @return false. + */ + @Override + public boolean implies(Permission p) { + return false; + } + + /** + * Checks two SelfPermission objects for equality. + * + * Checks that obj is an SelfPermission, and has + * the same type (class) name, permission name, actions, and + * certificates as this object. + * + * @param obj the object we are testing for equality with this object. + * + * @return true if obj is an SelfPermission, and has the same + * type (class) name, permission name, actions, and + * certificates as this object. + */ + @Override + public boolean equals(Object obj) { + if (obj == this) return true; + + if (!(obj instanceof SelfPermission)) return false; + SelfPermission that = (SelfPermission) obj; + + if (!(this.type.equals(that.type) && this.name.equals(that.name) && this.actions.equals(that.actions))) return false; + + if ((this.certs == null) && (that.certs == null)) { + return true; + } + + if ((this.certs == null) || (that.certs == null)) { + return false; + } + + if (this.certs.length != that.certs.length) { + return false; + } + + int i, j; + boolean match; + + for (i = 0; i < this.certs.length; i++) { + match = false; + for (j = 0; j < that.certs.length; j++) { + if (this.certs[i].equals(that.certs[j])) { + match = true; + break; + } + } + if (!match) return false; + } + + for (i = 0; i < that.certs.length; i++) { + match = false; + for (j = 0; j < this.certs.length; j++) { + if (that.certs[i].equals(this.certs[j])) { + match = true; + break; + } + } + if (!match) return false; + } + return true; + } + + /** + * Returns the hash code value for this object. + * + * @return a hash code value for this object. + */ + @Override + public int hashCode() { + int hash = type.hashCode(); + if (name != null) hash ^= name.hashCode(); + if (actions != null) hash ^= actions.hashCode(); + return hash; + } + + /** + * Returns the canonical string representation of the actions, + * which currently is the empty string "", since there are no actions + * for an SelfPermission. That is, the actions for the + * permission that will be created when this SelfPermission + * is resolved may be non-null, but an SelfPermission + * itself is never considered to have any actions. + * + * @return the empty string "". + */ + @Override + public String getActions() { + return ""; + } + + public String getSelfType() { + return type; + } + + public String getSelfName() { + return name; + } + + public String getSelfActions() { + return actions; + } + + public Certificate[] getCerts() { + return (certs == null ? null : certs.clone()); + } + + /** + * Returns a string describing this SelfPermission. The convention + * is to specify the class name, the permission name, and the actions, + * in the following format: '(unresolved "ClassName" "name" "actions")'. + * + * @return information about this SelfPermission. + */ + @Override + public String toString() { + return "(SelfPermission " + type + " " + name + " " + actions + ")"; + } + + /** + * Restores the state of this object from the stream. + * + * @param stream the {@code ObjectInputStream} from which data is read + * @throws IOException if an I/O error occurs + * @throws ClassNotFoundException if a serialized class cannot be loaded + */ + @java.io.Serial + private void readObject(ObjectInputStream stream) throws IOException, ClassNotFoundException { + stream.defaultReadObject(); + if (certs != null) { + this.certs = certs.clone(); + } + } + } + + /** + * holds policy information that we need to synch on + */ + private static class PolicyInfo { + // Stores grant entries in the policy + final List policyEntries; + + // Maps aliases to certs + final Map aliasMapping; + + PolicyInfo(int numCaches) { + policyEntries = new ArrayList<>(); + aliasMapping = Collections.synchronizedMap(new HashMap<>(11)); + } + } + + @SuppressWarnings("deprecation") + private static URL newURL(String spec) throws MalformedURLException { + return new URL(spec); + } +} diff --git a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyParser.java b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyParser.java new file mode 100644 index 0000000000000..9d5b0d5a13722 --- /dev/null +++ b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyParser.java @@ -0,0 +1,1163 @@ +/* + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.secure_sm.policy; + +import javax.security.auth.x500.X500Principal; + +import java.io.BufferedReader; +import java.io.BufferedWriter; +import java.io.File; +import java.io.IOException; +import java.io.PrintWriter; +import java.io.Reader; +import java.io.StreamTokenizer; +import java.io.Writer; +import java.security.GeneralSecurityException; +import java.security.Principal; +import java.util.Collection; +import java.util.Enumeration; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.Locale; +import java.util.Map; +import java.util.StringTokenizer; +import java.util.TreeMap; +import java.util.Vector; + +/** + * Adapted from: https://github.com/openjdk/jdk23u/blob/master/src/java.base/share/classes/sun/security/provider/PolicyParser.java + */ +public class PolicyParser { + + private final Vector grantEntries; + private Map domainEntries; + + private StreamTokenizer st; + private int lookahead; + private boolean expandProp = false; + private String keyStoreUrlString = null; // unexpanded + private String keyStoreType = null; + private String keyStoreProvider = null; + private String storePassURL = null; + + private String expand(String value) throws PropertyExpander.ExpandException { + return expand(value, false); + } + + private String expand(String value, boolean encodeURL) throws PropertyExpander.ExpandException { + if (!expandProp) { + return value; + } else { + return PropertyExpander.expand(value, encodeURL); + } + } + + /** + * Creates a PolicyParser object. + */ + + public PolicyParser() { + grantEntries = new Vector<>(); + } + + public PolicyParser(boolean expandProp) { + this(); + this.expandProp = expandProp; + } + + /** + * Reads a policy configuration into the Policy object using a + * Reader object. + * + * @param policy the policy Reader object. + * + * @exception ParsingException if the policy configuration contains + * a syntax error. + * + * @exception IOException if an error occurs while reading the policy + * configuration. + */ + + public void read(Reader policy) throws ParsingException, IOException { + if (!(policy instanceof BufferedReader)) { + policy = new BufferedReader(policy); + } + + /* + * Configure the stream tokenizer: + * Recognize strings between "..." + * Don't convert words to lowercase + * Recognize both C-style and C++-style comments + * Treat end-of-line as white space, not as a token + */ + st = new StreamTokenizer(policy); + + st.resetSyntax(); + st.wordChars('a', 'z'); + st.wordChars('A', 'Z'); + st.wordChars('.', '.'); + st.wordChars('0', '9'); + st.wordChars('_', '_'); + st.wordChars('$', '$'); + st.wordChars(128 + 32, 255); + st.whitespaceChars(0, ' '); + st.commentChar('/'); + st.quoteChar('\''); + st.quoteChar('"'); + st.lowerCaseMode(false); + st.ordinaryChar('/'); + st.slashSlashComments(true); + st.slashStarComments(true); + + /* + * The main parsing loop. The loop is executed once + * for each entry in the config file. The entries + * are delimited by semicolons. Once we've read in + * the information for an entry, go ahead and try to + * add it to the policy vector. + * + */ + + lookahead = st.nextToken(); + GrantEntry ge = null; + while (lookahead != StreamTokenizer.TT_EOF) { + if (peek("grant")) { + ge = parseGrantEntry(); + // could be null if we couldn't expand a property + if (ge != null) add(ge); + } else if (peek("keystore") && keyStoreUrlString == null) { + // only one keystore entry per policy file, others will be + // ignored + parseKeyStoreEntry(); + } else if (peek("keystorePasswordURL") && storePassURL == null) { + // only one keystore passwordURL per policy file, others will be + // ignored + parseStorePassURL(); + } else if (ge == null && keyStoreUrlString == null && storePassURL == null && peek("domain")) { + if (domainEntries == null) { + domainEntries = new TreeMap<>(); + } + DomainEntry de = parseDomainEntry(); + String domainName = de.getName(); + if (domainEntries.putIfAbsent(domainName, de) != null) { + Object[] source = { domainName }; + String msg = "duplicate keystore domain name: " + domainName; + throw new ParsingException(msg, source); + } + } else { + // error? + } + match(";"); + } + + if (keyStoreUrlString == null && storePassURL != null) { + throw new ParsingException("Keystore Password URL cannot be specified without also specifying keystore"); + } + } + + public void add(GrantEntry ge) { + grantEntries.addElement(ge); + } + + public void replace(GrantEntry origGe, GrantEntry newGe) { + grantEntries.setElementAt(newGe, grantEntries.indexOf(origGe)); + } + + public boolean remove(GrantEntry ge) { + return grantEntries.removeElement(ge); + } + + /** + * Returns the (possibly expanded) keystore location, or null if the + * expansion fails. + */ + public String getKeyStoreUrl() { + try { + if (keyStoreUrlString != null && keyStoreUrlString.length() != 0) { + return expand(keyStoreUrlString, true).replace(File.separatorChar, '/'); + } + } catch (PropertyExpander.ExpandException peee) { + return null; + } + return null; + } + + public void setKeyStoreUrl(String url) { + keyStoreUrlString = url; + } + + public String getKeyStoreType() { + return keyStoreType; + } + + public void setKeyStoreType(String type) { + keyStoreType = type; + } + + public String getKeyStoreProvider() { + return keyStoreProvider; + } + + public void setKeyStoreProvider(String provider) { + keyStoreProvider = provider; + } + + public String getStorePassURL() { + try { + if (storePassURL != null && storePassURL.length() != 0) { + return expand(storePassURL, true).replace(File.separatorChar, '/'); + } + } catch (PropertyExpander.ExpandException peee) { + return null; + } + return null; + } + + public void setStorePassURL(String storePassURL) { + this.storePassURL = storePassURL; + } + + /** + * Enumerate all the entries in the global policy object. + * This method is used by policy admin tools. The tools + * should use the Enumeration methods on the returned object + * to fetch the elements sequentially. + */ + public Enumeration grantElements() { + return grantEntries.elements(); + } + + public Collection getDomainEntries() { + return domainEntries.values(); + } + + /** + * write out the policy + */ + + public void write(Writer policy) { + PrintWriter out = new PrintWriter(new BufferedWriter(policy)); + + out.println("/* AUTOMATICALLY GENERATED ON " + (new java.util.Date()) + "*/"); + out.println("/* DO NOT EDIT */"); + out.println(); + + // write the (unexpanded) keystore entry as the first entry of the + // policy file + if (keyStoreUrlString != null) { + writeKeyStoreEntry(out); + } + if (storePassURL != null) { + writeStorePassURL(out); + } + + // write "grant" entries + for (GrantEntry ge : grantEntries) { + ge.write(out); + out.println(); + } + out.flush(); + } + + /** + * parses a keystore entry + */ + private void parseKeyStoreEntry() throws ParsingException, IOException { + match("keystore"); + keyStoreUrlString = match("quoted string"); + + // parse keystore type + if (!peek(",")) { + return; // default type + } + match(","); + + if (peek("\"")) { + keyStoreType = match("quoted string"); + } else { + throw new ParsingException(st.lineno(), "Expected keystore type"); + } + + // parse keystore provider + if (!peek(",")) { + return; // provider optional + } + match(","); + + if (peek("\"")) { + keyStoreProvider = match("quoted string"); + } else { + throw new ParsingException(st.lineno(), "Keystore provider expected"); + } + } + + private void parseStorePassURL() throws ParsingException, IOException { + match("keyStorePasswordURL"); + storePassURL = match("quoted string"); + } + + /** + * writes the (unexpanded) keystore entry + */ + private void writeKeyStoreEntry(PrintWriter out) { + out.print("keystore \""); + out.print(keyStoreUrlString); + out.print('"'); + if (keyStoreType != null && !keyStoreType.isEmpty()) out.print(", \"" + keyStoreType + "\""); + if (keyStoreProvider != null && !keyStoreProvider.isEmpty()) out.print(", \"" + keyStoreProvider + "\""); + out.println(";"); + out.println(); + } + + private void writeStorePassURL(PrintWriter out) { + out.print("keystorePasswordURL \""); + out.print(storePassURL); + out.print('"'); + out.println(";"); + out.println(); + } + + /** + * parse a Grant entry + */ + private GrantEntry parseGrantEntry() throws ParsingException, IOException { + GrantEntry e = new GrantEntry(); + LinkedList principals = null; + boolean ignoreEntry = false; + + match("grant"); + + while (!peek("{")) { + + if (peekAndMatch("Codebase")) { + if (e.codeBase != null) throw new ParsingException(st.lineno(), "Multiple Codebase expressions"); + e.codeBase = match("quoted string"); + peekAndMatch(","); + } else if (peekAndMatch("SignedBy")) { + if (e.signedBy != null) throw new ParsingException(st.lineno(), "Multiple SignedBy expressions"); + e.signedBy = match("quoted string"); + + // verify syntax of the aliases + StringTokenizer aliases = new StringTokenizer(e.signedBy, ",", true); + int actr = 0; + int cctr = 0; + while (aliases.hasMoreTokens()) { + String alias = aliases.nextToken().trim(); + if (alias.equals(",")) cctr++; + else if (!alias.isEmpty()) actr++; + } + if (actr <= cctr) throw new ParsingException(st.lineno(), "SignedBy has an empty alias"); + + peekAndMatch(","); + } else if (peekAndMatch("Principal")) { + if (principals == null) { + principals = new LinkedList<>(); + } + + String principalClass; + String principalName; + + if (peek("\"")) { + // both the principalClass and principalName + // will be replaced later + principalClass = PrincipalEntry.REPLACE_NAME; + principalName = match("principal type"); + } else { + // check for principalClass wildcard + if (peek("*")) { + match("*"); + principalClass = PrincipalEntry.WILDCARD_CLASS; + } else { + principalClass = match("principal type"); + } + + // check for principalName wildcard + if (peek("*")) { + match("*"); + principalName = PrincipalEntry.WILDCARD_NAME; + } else { + principalName = match("quoted string"); + } + + // disallow WILDCARD_CLASS && actual name + if (principalClass.equals(PrincipalEntry.WILDCARD_CLASS) && !principalName.equals(PrincipalEntry.WILDCARD_NAME)) { + throw new ParsingException(st.lineno(), "Cannot specify Principal with a wildcard class without a wildcard name"); + } + } + + try { + principalName = expand(principalName); + + if (principalClass.equals("javax.security.auth.x500.X500Principal") + && !principalName.equals(PrincipalEntry.WILDCARD_NAME)) { + + // 4702543: X500 names with an EmailAddress + // were encoded incorrectly. construct a new + // X500Principal with correct encoding. + + X500Principal p = new X500Principal((new X500Principal(principalName)).toString()); + principalName = p.getName(); + } + + principals.add(new PrincipalEntry(principalClass, principalName)); + } catch (PropertyExpander.ExpandException peee) { + ignoreEntry = true; + } + peekAndMatch(","); + + } else { + throw new ParsingException(st.lineno(), "Expected codeBase or SignedBy or Principal"); + } + } + + if (principals != null) e.principals = principals; + match("{"); + + while (!peek("}")) { + if (peek("Permission")) { + try { + PermissionEntry pe = parsePermissionEntry(); + e.add(pe); + } catch (PropertyExpander.ExpandException peee) { + skipEntry(); // BugId 4219343 + } + match(";"); + } else { + throw new ParsingException(st.lineno(), "Expected permission entry"); + } + } + match("}"); + + try { + if (e.signedBy != null) e.signedBy = expand(e.signedBy); + if (e.codeBase != null) { + e.codeBase = expand(e.codeBase, true).replace(File.separatorChar, '/'); + } + } catch (PropertyExpander.ExpandException peee) { + return null; + } + + return (ignoreEntry) ? null : e; + } + + /** + * parse a Permission entry + */ + private PermissionEntry parsePermissionEntry() throws ParsingException, IOException, PropertyExpander.ExpandException { + PermissionEntry e = new PermissionEntry(); + + // Permission + match("Permission"); + e.permission = match("permission type"); + + if (peek("\"")) { + // Permission name + e.name = expand(match("quoted string")); + } + + if (!peek(",")) { + return e; + } + match(","); + + if (peek("\"")) { + e.action = expand(match("quoted string")); + if (!peek(",")) { + return e; + } + match(","); + } + + if (peekAndMatch("SignedBy")) { + e.signedBy = expand(match("quoted string")); + } + return e; + } + + /** + * parse a domain entry + */ + private DomainEntry parseDomainEntry() throws ParsingException, IOException { + DomainEntry domainEntry; + String name; + Map properties = new HashMap<>(); + + match("domain"); + name = match("domain name"); + + while (!peek("{")) { + // get the domain properties + properties = parseProperties("{"); + } + match("{"); + domainEntry = new DomainEntry(name, properties); + + while (!peek("}")) { + + match("keystore"); + name = match("keystore name"); + // get the keystore properties + if (!peek("}")) { + properties = parseProperties(";"); + } + match(";"); + domainEntry.add(new KeyStoreEntry(name, properties)); + } + match("}"); + + return domainEntry; + } + + /* + * Return a collection of domain properties or keystore properties. + */ + private Map parseProperties(String terminator) throws ParsingException, IOException { + + Map properties = new HashMap<>(); + String key; + String value; + while (!peek(terminator)) { + key = match("property name"); + match("="); + + try { + value = expand(match("quoted string")); + } catch (PropertyExpander.ExpandException peee) { + throw new IOException(peee.getLocalizedMessage()); + } + properties.put(key.toLowerCase(Locale.ENGLISH), value); + } + + return properties; + } + + private boolean peekAndMatch(String expect) throws ParsingException, IOException { + if (peek(expect)) { + match(expect); + return true; + } else { + return false; + } + } + + private boolean peek(String expect) { + boolean found = false; + + switch (lookahead) { + + case StreamTokenizer.TT_WORD: + if (expect.equalsIgnoreCase(st.sval)) found = true; + break; + case ',': + if (expect.equalsIgnoreCase(",")) found = true; + break; + case '{': + if (expect.equalsIgnoreCase("{")) found = true; + break; + case '}': + if (expect.equalsIgnoreCase("}")) found = true; + break; + case '"': + if (expect.equalsIgnoreCase("\"")) found = true; + break; + case '*': + if (expect.equalsIgnoreCase("*")) found = true; + break; + case ';': + if (expect.equalsIgnoreCase(";")) found = true; + break; + default: + + } + return found; + } + + private String match(String expect) throws ParsingException, IOException { + String value = null; + + switch (lookahead) { + case StreamTokenizer.TT_NUMBER: + throw new ParsingException(st.lineno(), expect); + case StreamTokenizer.TT_EOF: + Object[] source = { expect }; + String msg = "expected [" + expect + "], read [end of file]"; + throw new ParsingException(msg, source); + case StreamTokenizer.TT_WORD: + if (expect.equalsIgnoreCase(st.sval)) { + lookahead = st.nextToken(); + } else if (expect.equalsIgnoreCase("permission type")) { + value = st.sval; + lookahead = st.nextToken(); + } else if (expect.equalsIgnoreCase("principal type")) { + value = st.sval; + lookahead = st.nextToken(); + } else if (expect.equalsIgnoreCase("domain name") + || expect.equalsIgnoreCase("keystore name") + || expect.equalsIgnoreCase("property name")) { + value = st.sval; + lookahead = st.nextToken(); + } else { + throw new ParsingException(st.lineno(), expect, st.sval); + } + break; + case '"': + if (expect.equalsIgnoreCase("quoted string")) { + value = st.sval; + lookahead = st.nextToken(); + } else if (expect.equalsIgnoreCase("permission type")) { + value = st.sval; + lookahead = st.nextToken(); + } else if (expect.equalsIgnoreCase("principal type")) { + value = st.sval; + lookahead = st.nextToken(); + } else { + throw new ParsingException(st.lineno(), expect, st.sval); + } + break; + case ',': + if (expect.equalsIgnoreCase(",")) lookahead = st.nextToken(); + else throw new ParsingException(st.lineno(), expect, ","); + break; + case '{': + if (expect.equalsIgnoreCase("{")) lookahead = st.nextToken(); + else throw new ParsingException(st.lineno(), expect, "{"); + break; + case '}': + if (expect.equalsIgnoreCase("}")) lookahead = st.nextToken(); + else throw new ParsingException(st.lineno(), expect, "}"); + break; + case ';': + if (expect.equalsIgnoreCase(";")) lookahead = st.nextToken(); + else throw new ParsingException(st.lineno(), expect, ";"); + break; + case '*': + if (expect.equalsIgnoreCase("*")) lookahead = st.nextToken(); + else throw new ParsingException(st.lineno(), expect, "*"); + break; + case '=': + if (expect.equalsIgnoreCase("=")) lookahead = st.nextToken(); + else throw new ParsingException(st.lineno(), expect, "="); + break; + default: + throw new ParsingException(st.lineno(), expect, String.valueOf((char) lookahead)); + } + return value; + } + + /** + * skip all tokens for this entry leaving the delimiter ";" + * in the stream. + */ + private void skipEntry() throws ParsingException, IOException { + while (lookahead != ';') { + switch (lookahead) { + case StreamTokenizer.TT_NUMBER: + throw new ParsingException(st.lineno(), ";"); + case StreamTokenizer.TT_EOF: + throw new ParsingException("Expected read end of file"); + default: + lookahead = st.nextToken(); + } + } + } + + /** + * Each grant entry in the policy configuration file is + * represented by a GrantEntry object. + * + *

+ * For example, the entry + *

+     *      grant signedBy "Duke" {
+     *          permission java.io.FilePermission "/tmp", "read,write";
+     *      };
+     *
+     * 
+ * is represented internally + *
+     *
+     * pe = new PermissionEntry("java.io.FilePermission",
+     *                           "/tmp", "read,write");
+     *
+     * ge = new GrantEntry("Duke", null);
+     *
+     * ge.add(pe);
+     *
+     * 
+ * + * @author Roland Schemers + * + * version 1.19, 05/21/98 + */ + + public static class GrantEntry { + + public String signedBy; + public String codeBase; + public LinkedList principals; + public Vector permissionEntries; + + public GrantEntry() { + principals = new LinkedList<>(); + permissionEntries = new Vector<>(); + } + + public GrantEntry(String signedBy, String codeBase) { + this.codeBase = codeBase; + this.signedBy = signedBy; + principals = new LinkedList<>(); + permissionEntries = new Vector<>(); + } + + public void add(PermissionEntry pe) { + permissionEntries.addElement(pe); + } + + public boolean remove(PrincipalEntry pe) { + return principals.remove(pe); + } + + public boolean remove(PermissionEntry pe) { + return permissionEntries.removeElement(pe); + } + + public boolean contains(PrincipalEntry pe) { + return principals.contains(pe); + } + + public boolean contains(PermissionEntry pe) { + return permissionEntries.contains(pe); + } + + /** + * Enumerate all the permission entries in this GrantEntry. + */ + public Enumeration permissionElements() { + return permissionEntries.elements(); + } + + public void write(PrintWriter out) { + out.print("grant"); + if (signedBy != null) { + out.print(" signedBy \""); + out.print(signedBy); + out.print('"'); + if (codeBase != null) out.print(", "); + } + if (codeBase != null) { + out.print(" codeBase \""); + out.print(codeBase); + out.print('"'); + if (principals != null && principals.size() > 0) out.print(",\n"); + } + if (principals != null && principals.size() > 0) { + Iterator pli = principals.iterator(); + while (pli.hasNext()) { + out.print(" "); + PrincipalEntry pe = pli.next(); + pe.write(out); + if (pli.hasNext()) out.print(",\n"); + } + } + out.println(" {"); + for (PermissionEntry pe : permissionEntries) { + out.write(" "); + pe.write(out); + } + out.println("};"); + } + + public Object clone() { + GrantEntry ge = new GrantEntry(); + ge.codeBase = this.codeBase; + ge.signedBy = this.signedBy; + ge.principals = new LinkedList<>(this.principals); + ge.permissionEntries = new Vector<>(this.permissionEntries); + return ge; + } + } + + /** + * Principal info (class and name) in a grant entry + */ + public static class PrincipalEntry implements Principal { + + public static final String WILDCARD_CLASS = "WILDCARD_PRINCIPAL_CLASS"; + public static final String WILDCARD_NAME = "WILDCARD_PRINCIPAL_NAME"; + public static final String REPLACE_NAME = "PolicyParser.REPLACE_NAME"; + + String principalClass; + String principalName; + + /** + * A PrincipalEntry consists of the Principal class and Principal name. + * + * @param principalClass the Principal class + * @param principalName the Principal name + * @throws NullPointerException if principalClass or principalName + * are null + */ + public PrincipalEntry(String principalClass, String principalName) { + if (principalClass == null || principalName == null) throw new NullPointerException("principalClass or principalName is null"); + this.principalClass = principalClass; + this.principalName = principalName; + } + + boolean isWildcardName() { + return principalName.equals(WILDCARD_NAME); + } + + boolean isWildcardClass() { + return principalClass.equals(WILDCARD_CLASS); + } + + boolean isReplaceName() { + return principalClass.equals(REPLACE_NAME); + } + + public String getPrincipalClass() { + return principalClass; + } + + public String getPrincipalName() { + return principalName; + } + + public String getDisplayClass() { + if (isWildcardClass()) { + return "*"; + } else if (isReplaceName()) { + return ""; + } else return principalClass; + } + + public String getDisplayName() { + return getDisplayName(false); + } + + public String getDisplayName(boolean addQuote) { + if (isWildcardName()) { + return "*"; + } else { + if (addQuote) return "\"" + principalName + "\""; + else return principalName; + } + } + + @Override + public String getName() { + return principalName; + } + + @Override + public String toString() { + if (!isReplaceName()) { + return getDisplayClass() + "/" + getDisplayName(); + } else { + return getDisplayName(); + } + } + + /** + * Test for equality between the specified object and this object. + * Two PrincipalEntries are equal if their class and name values + * are equal. + * + * @param obj the object to test for equality with this object + * @return true if the objects are equal, false otherwise + */ + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + + if (!(obj instanceof PrincipalEntry that)) return false; + + return (principalClass.equals(that.principalClass) && principalName.equals(that.principalName)); + } + + /** + * Return a hashcode for this PrincipalEntry. + * + * @return a hashcode for this PrincipalEntry + */ + @Override + public int hashCode() { + return principalClass.hashCode(); + } + + public void write(PrintWriter out) { + out.print("principal " + getDisplayClass() + " " + getDisplayName(true)); + } + } + + /** + * Each permission entry in the policy configuration file is + * represented by a + * PermissionEntry object. + * + *

+ * For example, the entry + *

+     *          permission java.io.FilePermission "/tmp", "read,write";
+     * 
+ * is represented internally + *
+     *
+     * pe = new PermissionEntry("java.io.FilePermission",
+     *                           "/tmp", "read,write");
+     * 
+ * + * @author Roland Schemers + * + * version 1.19, 05/21/98 + */ + + public static class PermissionEntry { + + public String permission; + public String name; + public String action; + public String signedBy; + + public PermissionEntry() {} + + public PermissionEntry(String permission, String name, String action) { + this.permission = permission; + this.name = name; + this.action = action; + } + + /** + * Calculates a hash code value for the object. Objects + * which are equal will also have the same hashcode. + */ + @Override + public int hashCode() { + int retval = permission.hashCode(); + if (name != null) retval ^= name.hashCode(); + if (action != null) retval ^= action.hashCode(); + return retval; + } + + @Override + public boolean equals(Object obj) { + if (obj == this) return true; + + if (!(obj instanceof PermissionEntry that)) return false; + + if (this.permission == null) { + if (that.permission != null) return false; + } else { + if (!this.permission.equals(that.permission)) return false; + } + + if (this.name == null) { + if (that.name != null) return false; + } else { + if (!this.name.equals(that.name)) return false; + } + + if (this.action == null) { + if (that.action != null) return false; + } else { + if (!this.action.equals(that.action)) return false; + } + + if (this.signedBy == null) { + return that.signedBy == null; + } else { + return this.signedBy.equals(that.signedBy); + } + } + + public void write(PrintWriter out) { + out.print("permission "); + out.print(permission); + if (name != null) { + out.print(" \""); + + // ATTENTION: regex with double escaping, + // the normal forms look like: + // $name =~ s/\\/\\\\/g; and + // $name =~ s/\"/\\\"/g; + // and then in a java string, it's escaped again + + out.print(name.replaceAll("\\\\", "\\\\\\\\").replaceAll("\"", "\\\\\\\"")); + out.print('"'); + } + if (action != null) { + out.print(", \""); + out.print(action); + out.print('"'); + } + if (signedBy != null) { + out.print(", signedBy \""); + out.print(signedBy); + out.print('"'); + } + out.println(";"); + } + } + + /** + * Each domain entry in the keystore domain configuration file is + * represented by a DomainEntry object. + */ + static class DomainEntry { + private final String name; + private final Map properties; + private final Map entries; + + DomainEntry(String name, Map properties) { + this.name = name; + this.properties = properties; + entries = new HashMap<>(); + } + + String getName() { + return name; + } + + Map getProperties() { + return properties; + } + + Collection getEntries() { + return entries.values(); + } + + void add(KeyStoreEntry entry) throws ParsingException { + String keystoreName = entry.getName(); + if (!entries.containsKey(keystoreName)) { + entries.put(keystoreName, entry); + } else { + Object[] source = { keystoreName }; + String msg = "duplicate keystore name: " + keystoreName; + throw new ParsingException(msg, source); + } + } + + @Override + public String toString() { + StringBuilder s = new StringBuilder("\ndomain ").append(name); + + if (properties != null) { + for (Map.Entry property : properties.entrySet()) { + s.append("\n ").append(property.getKey()).append('=').append(property.getValue()); + } + } + s.append(" {\n"); + + for (KeyStoreEntry entry : entries.values()) { + s.append(entry).append("\n"); + } + s.append("}"); + + return s.toString(); + } + } + + /** + * Each keystore entry in the keystore domain configuration file is + * represented by a KeyStoreEntry object. + */ + + static class KeyStoreEntry { + private final String name; + private final Map properties; + + KeyStoreEntry(String name, Map properties) { + this.name = name; + this.properties = properties; + } + + String getName() { + return name; + } + + Map getProperties() { + return properties; + } + + @Override + public String toString() { + StringBuilder s = new StringBuilder("\n keystore ").append(name); + if (properties != null) { + for (Map.Entry property : properties.entrySet()) { + s.append("\n ").append(property.getKey()).append('=').append(property.getValue()); + } + } + s.append(";"); + + return s.toString(); + } + } + + public static class ParsingException extends GeneralSecurityException { + + @java.io.Serial + private static final long serialVersionUID = -4330692689482574072L; + + @SuppressWarnings("serial") // Not statically typed as Serializable + private Object[] source; + + /** + * Constructs a ParsingException with the specified + * detail message. A detail message is a String that describes + * this particular exception, which may, for example, specify which + * algorithm is not available. + * + * @param msg the detail message. + */ + public ParsingException(String msg) { + super(msg); + } + + public ParsingException(String msg, Object[] source) { + super(msg); + this.source = source; + } + + public ParsingException(int line, String msg) { + super("line " + line + ": " + msg); + source = new Object[] { line, msg }; + } + + public ParsingException(int line, String expect, String actual) { + super("line " + line + ": expected [" + expect + "], found [" + actual + "]"); + source = new Object[] { line, expect, actual }; + } + } +} diff --git a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyUtil.java b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyUtil.java new file mode 100644 index 0000000000000..ed19379b697c0 --- /dev/null +++ b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyUtil.java @@ -0,0 +1,170 @@ +/* + * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.secure_sm.policy; + +import java.io.BufferedInputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.net.MalformedURLException; +import java.net.URL; +import java.security.KeyStore; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.security.NoSuchProviderException; +import java.util.Arrays; + +/** + * Adapted from: https://github.com/openjdk/jdk23u/blob/master/src/java.base/share/classes/sun/security/util/PolicyUtil.java + */ +public class PolicyUtil { + + // standard PKCS11 KeyStore type + private static final String P11KEYSTORE = "PKCS11"; + + // reserved word + private static final String NONE = "NONE"; + + /* + * Fast path reading from file urls in order to avoid calling + * FileURLConnection.connect() which can be quite slow the first time + * it is called. We really should clean up FileURLConnection so that + * this is not a problem but in the meantime this fix helps reduce + * start up time noticeably for the new launcher. -- DAC + */ + public static InputStream getInputStream(URL url) throws IOException { + if ("file".equals(url.getProtocol())) { + String path = url.getFile().replace('/', File.separatorChar); + path = ParseUtil.decode(path); + return new FileInputStream(path); + } else { + return url.openStream(); + } + } + + /** + * this is intended for use by the policy parser to + * instantiate a KeyStore from the information in the GUI/policy file + */ + public static KeyStore getKeyStore( + URL policyUrl, // URL of policy file + String keyStoreName, // input: keyStore URL + String keyStoreType, // input: keyStore type + String keyStoreProvider, // input: keyStore provider + String storePassURL // input: keyStore password + ) throws KeyStoreException, IOException, NoSuchProviderException, NoSuchAlgorithmException, java.security.cert.CertificateException { + + if (keyStoreName == null) { + throw new IllegalArgumentException("null KeyStore name"); + } + + char[] keyStorePassword = null; + try { + KeyStore ks; + if (keyStoreType == null) { + keyStoreType = KeyStore.getDefaultType(); + } + + if (P11KEYSTORE.equalsIgnoreCase(keyStoreType) && !NONE.equals(keyStoreName)) { + throw new IllegalArgumentException( + "Invalid value (" + + keyStoreName + + ") for keystore URL. If the keystore type is \"" + + P11KEYSTORE + + "\", the keystore url must be \"" + + NONE + + "\"" + ); + } + + if (keyStoreProvider != null) { + ks = KeyStore.getInstance(keyStoreType, keyStoreProvider); + } else { + ks = KeyStore.getInstance(keyStoreType); + } + + if (storePassURL != null) { + URL passURL; + try { + @SuppressWarnings("deprecation") + var _unused = passURL = new URL(storePassURL); + // absolute URL + } catch (MalformedURLException e) { + // relative URL + if (policyUrl == null) { + throw e; + } + @SuppressWarnings("deprecation") + var _unused = passURL = new URL(policyUrl, storePassURL); + } + + try (InputStream in = passURL.openStream()) { + keyStorePassword = Password.readPassword(in); + } + } + + if (NONE.equals(keyStoreName)) { + ks.load(null, keyStorePassword); + } else { + /* + * location of keystore is specified as absolute URL in policy + * file, or is relative to URL of policy file + */ + URL keyStoreUrl; + try { + @SuppressWarnings("deprecation") + var _unused = keyStoreUrl = new URL(keyStoreName); + // absolute URL + } catch (MalformedURLException e) { + // relative URL + if (policyUrl == null) { + throw e; + } + @SuppressWarnings("deprecation") + var _unused = keyStoreUrl = new URL(policyUrl, keyStoreName); + } + + try (InputStream inStream = new BufferedInputStream(getInputStream(keyStoreUrl))) { + ks.load(inStream, keyStorePassword); + } + } + return ks; + } finally { + if (keyStorePassword != null) { + Arrays.fill(keyStorePassword, ' '); + } + } + } +} diff --git a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PropertyExpander.java b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PropertyExpander.java new file mode 100644 index 0000000000000..759822b0ef2b5 --- /dev/null +++ b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PropertyExpander.java @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.secure_sm.policy; + +import java.net.URI; +import java.net.URISyntaxException; +import java.security.GeneralSecurityException; + +/** + * Adapted from: https://github.com/openjdk/jdk23u/blob/master/src/java.base/share/classes/sun/security/util/PropertyExpander.java + */ +public class PropertyExpander { + + public static class ExpandException extends GeneralSecurityException { + private static final long serialVersionUID = -1L; + + public ExpandException(String msg) { + super(msg); + } + } + + public static String expand(String value) throws ExpandException { + return expand(value, false); + } + + public static String expand(String value, boolean encodeURL) throws ExpandException { + if (value == null) return null; + + int p = value.indexOf("${"); + + // no special characters + if (p == -1) return value; + + StringBuilder sb = new StringBuilder(value.length()); + int max = value.length(); + int i = 0; // index of last character we copied + + scanner: while (p < max) { + if (p > i) { + // copy in anything before the special stuff + sb.append(value.substring(i, p)); + } + int pe = p + 2; + + // do not expand ${{ ... }} + if (pe < max && value.charAt(pe) == '{') { + pe = value.indexOf("}}", pe); + if (pe == -1 || pe + 2 == max) { + // append remaining chars + sb.append(value.substring(p)); + break scanner; + } else { + // append as normal text + pe++; + sb.append(value.substring(p, pe + 1)); + } + } else { + while ((pe < max) && (value.charAt(pe) != '}')) { + pe++; + } + if (pe == max) { + // no matching '}' found, just add in as normal text + sb.append(value.substring(p, pe)); + break scanner; + } + String prop = value.substring(p + 2, pe); + if (prop.equals("/")) { + sb.append(java.io.File.separatorChar); + } else { + String val = System.getProperty(prop); + if (val != null) { + if (encodeURL) { + // encode 'val' unless it's an absolute URI + // at the beginning of the string buffer + try { + if (sb.length() > 0 || !(new URI(val)).isAbsolute()) { + val = ParseUtil.encodePath(val); + } + } catch (URISyntaxException use) { + val = ParseUtil.encodePath(val); + } + } + sb.append(val); + } else { + throw new ExpandException("unable to expand property " + prop); + } + } + } + i = pe + 1; + p = value.indexOf("${", i); + if (p == -1) { + // no more to expand. copy in any extra + if (i < max) { + sb.append(value.substring(i, max)); + } + // break out of loop + break scanner; + } + } + return sb.toString(); + } +} diff --git a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/SecurityConstants.java b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/SecurityConstants.java new file mode 100644 index 0000000000000..39e8efd87868c --- /dev/null +++ b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/SecurityConstants.java @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.secure_sm.policy; + +import java.lang.reflect.ReflectPermission; +import java.net.NetPermission; +import java.net.SocketPermission; +import java.security.AllPermission; +import java.security.SecurityPermission; + +/** + * Adapted from: https://github.com/openjdk/jdk23u/blob/master/src/java.base/share/classes/sun/security/util/SecurityConstants.java + */ +public final class SecurityConstants { + // Cannot create one of these + private SecurityConstants() {} + + // Commonly used string constants for permission actions used by + // SecurityManager. Declare here for shortcut when checking permissions + // in FilePermission, SocketPermission, and PropertyPermission. + + public static final String FILE_DELETE_ACTION = "delete"; + public static final String FILE_EXECUTE_ACTION = "execute"; + public static final String FILE_READ_ACTION = "read"; + public static final String FILE_WRITE_ACTION = "write"; + public static final String FILE_READLINK_ACTION = "readlink"; + + public static final String SOCKET_RESOLVE_ACTION = "resolve"; + public static final String SOCKET_CONNECT_ACTION = "connect"; + public static final String SOCKET_LISTEN_ACTION = "listen"; + public static final String SOCKET_ACCEPT_ACTION = "accept"; + public static final String SOCKET_CONNECT_ACCEPT_ACTION = "connect,accept"; + + public static final String PROPERTY_RW_ACTION = "read,write"; + public static final String PROPERTY_READ_ACTION = "read"; + public static final String PROPERTY_WRITE_ACTION = "write"; + + // Permission constants used in the various checkPermission() calls in JDK. + + // java.lang.Class, java.lang.SecurityManager, java.lang.System, + // java.net.URLConnection, java.security.AllPermission, java.security.Policy, + // sun.security.provider.PolicyFile + public static final AllPermission ALL_PERMISSION = new AllPermission(); + + // java.net.URL + public static final NetPermission SPECIFY_HANDLER_PERMISSION = new NetPermission("specifyStreamHandler"); + + // java.net.ProxySelector + public static final NetPermission SET_PROXYSELECTOR_PERMISSION = new NetPermission("setProxySelector"); + + // java.net.ProxySelector + public static final NetPermission GET_PROXYSELECTOR_PERMISSION = new NetPermission("getProxySelector"); + + // java.net.CookieHandler + public static final NetPermission SET_COOKIEHANDLER_PERMISSION = new NetPermission("setCookieHandler"); + + // java.net.CookieHandler + public static final NetPermission GET_COOKIEHANDLER_PERMISSION = new NetPermission("getCookieHandler"); + + // java.net.ResponseCache + public static final NetPermission SET_RESPONSECACHE_PERMISSION = new NetPermission("setResponseCache"); + + // java.net.ResponseCache + public static final NetPermission GET_RESPONSECACHE_PERMISSION = new NetPermission("getResponseCache"); + + // java.net.ServerSocket, java.net.Socket + public static final NetPermission SET_SOCKETIMPL_PERMISSION = new NetPermission("setSocketImpl"); + + // java.lang.SecurityManager, sun.applet.AppletPanel + public static final RuntimePermission CREATE_CLASSLOADER_PERMISSION = new RuntimePermission("createClassLoader"); + + // java.lang.SecurityManager + public static final RuntimePermission CHECK_MEMBER_ACCESS_PERMISSION = new RuntimePermission("accessDeclaredMembers"); + + // java.lang.SecurityManager, sun.applet.AppletSecurity + public static final RuntimePermission MODIFY_THREAD_PERMISSION = new RuntimePermission("modifyThread"); + + // java.lang.SecurityManager, sun.applet.AppletSecurity + public static final RuntimePermission MODIFY_THREADGROUP_PERMISSION = new RuntimePermission("modifyThreadGroup"); + + // java.lang.Class + public static final RuntimePermission GET_PD_PERMISSION = new RuntimePermission("getProtectionDomain"); + + // java.lang.Class, java.lang.ClassLoader, java.lang.Thread + public static final RuntimePermission GET_CLASSLOADER_PERMISSION = new RuntimePermission("getClassLoader"); + + // java.lang.Thread + public static final RuntimePermission STOP_THREAD_PERMISSION = new RuntimePermission("stopThread"); + + // java.lang.Thread + public static final RuntimePermission GET_STACK_TRACE_PERMISSION = new RuntimePermission("getStackTrace"); + + // java.lang.Thread + public static final RuntimePermission SUBCLASS_IMPLEMENTATION_PERMISSION = new RuntimePermission("enableContextClassLoaderOverride"); + + // java.security.AccessControlContext + public static final SecurityPermission CREATE_ACC_PERMISSION = new SecurityPermission("createAccessControlContext"); + + // java.security.AccessControlContext + public static final SecurityPermission GET_COMBINER_PERMISSION = new SecurityPermission("getDomainCombiner"); + + // java.security.Policy, java.security.ProtectionDomain + public static final SecurityPermission GET_POLICY_PERMISSION = new SecurityPermission("getPolicy"); + + // java.lang.SecurityManager + public static final SocketPermission LOCAL_LISTEN_PERMISSION = new SocketPermission("localhost:0", SOCKET_LISTEN_ACTION); + + // java.lang.reflect.AccessibleObject + public static final ReflectPermission ACCESS_PERMISSION = new ReflectPermission("suppressAccessChecks"); + + // sun.reflect.ReflectionFactory + public static final RuntimePermission REFLECTION_FACTORY_ACCESS_PERMISSION = new RuntimePermission("reflectionFactoryAccess"); + +} diff --git a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/package-info.java b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/package-info.java new file mode 100644 index 0000000000000..d182490b8d173 --- /dev/null +++ b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Java Agent Policy + */ +package org.opensearch.secure_sm.policy; diff --git a/libs/agent-sm/bootstrap/build.gradle b/libs/agent-sm/bootstrap/build.gradle new file mode 100644 index 0000000000000..1757e3cd75c99 --- /dev/null +++ b/libs/agent-sm/bootstrap/build.gradle @@ -0,0 +1,24 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +apply plugin: 'opensearch.build' +apply plugin: 'opensearch.publish' + +base { + archivesName = 'opensearch-agent-bootstrap' +} + +tasks.named('forbiddenApisMain').configure { + replaceSignatureFiles 'jdk-signatures' +} + +test.enabled = false +testingConventions.enabled = false diff --git a/libs/agent-sm/bootstrap/src/main/java/org/opensearch/javaagent/bootstrap/AgentPolicy.java b/libs/agent-sm/bootstrap/src/main/java/org/opensearch/javaagent/bootstrap/AgentPolicy.java new file mode 100644 index 0000000000000..d2c77fac011b5 --- /dev/null +++ b/libs/agent-sm/bootstrap/src/main/java/org/opensearch/javaagent/bootstrap/AgentPolicy.java @@ -0,0 +1,89 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.javaagent.bootstrap; + +import java.lang.StackWalker.Option; +import java.lang.StackWalker.StackFrame; +import java.security.Permission; +import java.security.Policy; +import java.security.ProtectionDomain; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.logging.Logger; +import java.util.stream.Collectors; + +/** + * Agent Policy + */ +@SuppressWarnings("removal") +public class AgentPolicy { + private static final Logger LOGGER = Logger.getLogger(AgentPolicy.class.getName()); + private static volatile Policy policy; + private static volatile Set trustedHosts; + + private AgentPolicy() {} + + /** + * Set Agent policy + * @param policy policy + */ + public static void setPolicy(Policy policy) { + setPolicy(policy, Set.of()); + } + + /** + * Set Agent policy + * @param policy policy + * @param trustedHosts trusted hosts + */ + public static void setPolicy(Policy policy, final Set trustedHosts) { + if (AgentPolicy.policy == null) { + AgentPolicy.policy = policy; + AgentPolicy.trustedHosts = Collections.unmodifiableSet(trustedHosts); + LOGGER.info("Policy attached successfully: " + policy); + } else { + throw new SecurityException("The Policy has been set already: " + AgentPolicy.policy); + } + } + + /** + * Check permissions + * @param permission permission + */ + public static void checkPermission(Permission permission) { + final StackWalker walker = StackWalker.getInstance(Option.RETAIN_CLASS_REFERENCE); + final List callers = walker.walk( + frames -> frames.map(StackFrame::getDeclaringClass).map(Class::getProtectionDomain).distinct().collect(Collectors.toList()) + ); + + for (final ProtectionDomain domain : callers) { + if (!policy.implies(domain, permission)) { + throw new SecurityException("Denied access: " + permission); + } + } + } + + /** + * Get policy + * @return policy + */ + public static Policy getPolicy() { + return policy; + } + + /** + * Check if hostname is trusted + * @param hostname hostname + * @return is trusted or not + */ + public static boolean isTrustedHost(String hostname) { + return AgentPolicy.trustedHosts.contains(hostname); + } +} diff --git a/libs/agent-sm/bootstrap/src/main/java/org/opensearch/javaagent/bootstrap/package-info.java b/libs/agent-sm/bootstrap/src/main/java/org/opensearch/javaagent/bootstrap/package-info.java new file mode 100644 index 0000000000000..6172ae511a8f7 --- /dev/null +++ b/libs/agent-sm/bootstrap/src/main/java/org/opensearch/javaagent/bootstrap/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Java Agent Policy + */ +package org.opensearch.javaagent.bootstrap; diff --git a/libs/agent-sm/bootstrap/src/main/java/org/opensearch/package-info.java b/libs/agent-sm/bootstrap/src/main/java/org/opensearch/package-info.java new file mode 100644 index 0000000000000..8fb377151ae39 --- /dev/null +++ b/libs/agent-sm/bootstrap/src/main/java/org/opensearch/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Java Agent Policy Bootstrap + */ +package org.opensearch; diff --git a/libs/agent-sm/build.gradle b/libs/agent-sm/build.gradle new file mode 100644 index 0000000000000..656411a08080f --- /dev/null +++ b/libs/agent-sm/build.gradle @@ -0,0 +1,22 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory + +base { + archivesName = 'opensearch-agent-sm' +} + +test.enabled = false +testingConventions.enabled = false From fcb0649b14536889a6f4c28197cf485c33dfff5e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 25 Mar 2025 16:26:08 -0400 Subject: [PATCH 110/550] Bump com.google.code.gson:gson from 2.11.0 to 2.12.1 in /plugins/repository-gcs (#17668) * Bump com.google.code.gson:gson in /plugins/repository-gcs Bumps [com.google.code.gson:gson](https://github.com/google/gson) from 2.11.0 to 2.12.1. - [Release notes](https://github.com/google/gson/releases) - [Changelog](https://github.com/google/gson/blob/main/CHANGELOG.md) - [Commits](https://github.com/google/gson/compare/gson-parent-2.11.0...gson-parent-2.12.1) --- updated-dependencies: - dependency-name: com.google.code.gson:gson dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Signed-off-by: Craig Perkins Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Craig Perkins --- CHANGELOG.md | 1 + plugins/repository-gcs/build.gradle | 2 +- plugins/repository-gcs/licenses/gson-2.11.0.jar.sha1 | 1 - plugins/repository-gcs/licenses/gson-2.12.1.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-gcs/licenses/gson-2.11.0.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/gson-2.12.1.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index b9e2426816006..27c39896ab8c5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -26,6 +26,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.google.api:gax` from 2.35.0 to 2.63.1 ([#17465](https://github.com/opensearch-project/OpenSearch/pull/17465)) - Bump `com.azure:azure-storage-blob` from 12.29.1 to 12.30.0 ([#17667](https://github.com/opensearch-project/OpenSearch/pull/17667)) - Bump `tj-actions/changed-files` from 46.0.1 to 46.0.3 ([#17666](https://github.com/opensearch-project/OpenSearch/pull/17666)) +- Bump `com.google.code.gson:gson` from 2.11.0 to 2.12.1 ([#17668](https://github.com/opensearch-project/OpenSearch/pull/17668)) ### Changed diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 515ad4c3bc0ed..cae9a29dd341e 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -66,7 +66,7 @@ dependencies { api 'com.google.cloud:google-cloud-core-http:2.47.0' api 'com.google.cloud:google-cloud-storage:1.113.1' - api 'com.google.code.gson:gson:2.11.0' + api 'com.google.code.gson:gson:2.12.1' runtimeOnly "com.google.guava:guava:${versions.guava}" api 'com.google.guava:failureaccess:1.0.1' diff --git a/plugins/repository-gcs/licenses/gson-2.11.0.jar.sha1 b/plugins/repository-gcs/licenses/gson-2.11.0.jar.sha1 deleted file mode 100644 index 0414a49526895..0000000000000 --- a/plugins/repository-gcs/licenses/gson-2.11.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -527175ca6d81050b53bdd4c457a6d6e017626b0e \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gson-2.12.1.jar.sha1 b/plugins/repository-gcs/licenses/gson-2.12.1.jar.sha1 new file mode 100644 index 0000000000000..7d57e885daa08 --- /dev/null +++ b/plugins/repository-gcs/licenses/gson-2.12.1.jar.sha1 @@ -0,0 +1 @@ +4e773a317740b83b43cfc3d652962856041697cb \ No newline at end of file From 89a6f4e22994e545c3d4abbd29e579e2991bbd11 Mon Sep 17 00:00:00 2001 From: Sayali Gaikawad Date: Tue, 25 Mar 2025 14:19:03 -0700 Subject: [PATCH 111/550] Authenticate jenkins calls for benchmark workflow (#17688) Signed-off-by: Sayali Gaikawad --- .github/workflows/benchmark-pull-request.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/benchmark-pull-request.yml b/.github/workflows/benchmark-pull-request.yml index b1884c6156f08..1459cb06b03b6 100644 --- a/.github/workflows/benchmark-pull-request.yml +++ b/.github/workflows/benchmark-pull-request.yml @@ -167,7 +167,7 @@ jobs: - name: Trigger jenkins workflow to run gradle check run: | cat $GITHUB_ENV - bash opensearch-build/scripts/benchmark/benchmark-pull-request.sh ${{ secrets.JENKINS_PR_BENCHMARK_GENERIC_WEBHOOK_TOKEN }} + bash opensearch-build/scripts/benchmark/benchmark-pull-request.sh -t ${{ secrets.JENKINS_PR_BENCHMARK_GENERIC_WEBHOOK_TOKEN }} -u ${{ secrets.JENKINS_GITHUB_USER}} -p ${{ secrets.JENKINS_GITHUB_USER_TOKEN}} - name: Update PR with Job Url uses: actions/github-script@v7 with: From dab453b9d94b0cfb1dbd333cabe5bcb3d484a2f4 Mon Sep 17 00:00:00 2001 From: expani1729 <110471048+expani@users.noreply.github.com> Date: Tue, 25 Mar 2025 16:09:52 -0700 Subject: [PATCH 112/550] TimeSeries Desc Sort gets skipped with Lucene 10 upgrade (#17329) Ensuring time series desc sort optimisation invokes searchLeaf on CtxIdxSearcher --------- Signed-off-by: expani --- .../search/internal/ContextIndexSearcher.java | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java b/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java index 2f0d2399aa640..3215759b59259 100644 --- a/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java +++ b/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java @@ -44,6 +44,7 @@ import org.apache.lucene.search.Collector; import org.apache.lucene.search.CollectorManager; import org.apache.lucene.search.ConjunctionUtils; +import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.IndexSearcher; @@ -257,8 +258,15 @@ public void search( @Override public void search(Query query, Collector collector) throws IOException { - super.search(query, collector); - searchContext.bucketCollectorProcessor().processPostCollection(collector); + // TODO : Remove when switching to use the @org.apache.lucene.search.IndexSearcher#search(Query, CollectorManager) variant from + // @org.opensearch.search.query.QueryPhase#searchWithCollector which then calls the overridden + // search(LeafReaderContextPartition[] partitions, Weight weight, Collector collector) + query = collector.scoreMode().needsScores() ? rewrite(query) : rewrite(new ConstantScoreQuery(query)); + Weight weight = createWeight(query, collector.scoreMode(), 1); + LeafReaderContextPartition[] partitions = (getLeafContexts() == null) + ? new LeafReaderContextPartition[0] + : getLeafContexts().stream().map(LeafReaderContextPartition::createForEntireSegment).toArray(LeafReaderContextPartition[]::new); + search(partitions, weight, collector); } public void search( @@ -297,6 +305,7 @@ protected void search(LeafReaderContextPartition[] partitions, Weight weight, Co searchLeaf(partition.ctx, partition.minDocId, partition.maxDocId, weight, collector); } } + // TODO : Make this a responsibility for the callers rather than implicitly getting it done here ? searchContext.bucketCollectorProcessor().processPostCollection(collector); } catch (Throwable t) { searchContext.indexShard().getSearchOperationListener().onFailedSliceExecution(searchContext); From 3fb09c770a4359a90d0cc7e88b64c898101fd6c0 Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Tue, 25 Mar 2025 16:45:42 -0700 Subject: [PATCH 113/550] Add simple PolicyParser unit test (#17690) Signed-off-by: Andrew Ross --- libs/agent-sm/agent-policy/build.gradle | 5 +- .../secure_sm/policy/PolicyParserTests.java | 53 +++++++++++++++++++ 2 files changed, 56 insertions(+), 2 deletions(-) create mode 100644 libs/agent-sm/agent-policy/src/test/java/org/opensearch/secure_sm/policy/PolicyParserTests.java diff --git a/libs/agent-sm/agent-policy/build.gradle b/libs/agent-sm/agent-policy/build.gradle index 997ed5ddf174b..a44c2c1349909 100644 --- a/libs/agent-sm/agent-policy/build.gradle +++ b/libs/agent-sm/agent-policy/build.gradle @@ -22,5 +22,6 @@ base { disableTasks('forbiddenApisMain') -test.enabled = false -testingConventions.enabled = false +dependencies { + testImplementation(project(":test:framework")) +} diff --git a/libs/agent-sm/agent-policy/src/test/java/org/opensearch/secure_sm/policy/PolicyParserTests.java b/libs/agent-sm/agent-policy/src/test/java/org/opensearch/secure_sm/policy/PolicyParserTests.java new file mode 100644 index 0000000000000..098e4c2605a95 --- /dev/null +++ b/libs/agent-sm/agent-policy/src/test/java/org/opensearch/secure_sm/policy/PolicyParserTests.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.secure_sm.policy; + +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.io.Reader; +import java.io.StringReader; +import java.util.Enumeration; + +public class PolicyParserTests extends OpenSearchTestCase { + private static final String POLICY = """ + grant codeBase "TestCodeBase" { + permission java.net.NetPermission "accessUnixDomainSocket"; + }; + + grant { + permission java.net.NetPermission "accessUnixDomainSocket"; + permission java.net.SocketPermission "*", "accept,connect"; + }; + """; + + public void testPolicy() throws IOException, PolicyParser.ParsingException { + try (Reader reader = new StringReader(POLICY)) { + final PolicyParser policyParser = new PolicyParser(); + policyParser.read(reader); + + final Enumeration grantEntryEnumeration = policyParser.grantElements(); + final PolicyParser.GrantEntry grantEntry1 = grantEntryEnumeration.nextElement(); + final PolicyParser.GrantEntry grantEntry2 = grantEntryEnumeration.nextElement(); + + assertEquals("TestCodeBase", grantEntry1.codeBase); + assertEquals(1, grantEntry1.permissionEntries.size()); + assertEquals("java.net.NetPermission", grantEntry1.permissionEntries.getFirst().permission); + assertEquals("accessUnixDomainSocket", grantEntry1.permissionEntries.getFirst().name); + + assertNull(grantEntry2.codeBase); + assertEquals(2, grantEntry2.permissionEntries.size()); + assertEquals("java.net.NetPermission", grantEntry2.permissionEntries.getFirst().permission); + assertEquals("accessUnixDomainSocket", grantEntry2.permissionEntries.getFirst().name); + assertEquals("java.net.SocketPermission", grantEntry2.permissionEntries.getLast().permission); + assertEquals("*", grantEntry2.permissionEntries.getLast().name); + assertEquals("accept,connect", grantEntry2.permissionEntries.getLast().action); + } + } +} From e50baefb8f962e9a470338b81ab31f256baa05c3 Mon Sep 17 00:00:00 2001 From: Sayali Gaikawad Date: Wed, 26 Mar 2025 11:16:36 -0700 Subject: [PATCH 114/550] Authenticate jenkins calls for gradle check (#17691) Signed-off-by: Sayali Gaikawad --- .github/workflows/gradle-check.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index e3b5739237fdc..99e2c6a8965c5 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -103,7 +103,7 @@ jobs: run: | set -e set -o pipefail - bash opensearch-build/scripts/gradle/gradle-check.sh ${{ secrets.JENKINS_GRADLE_CHECK_GENERIC_WEBHOOK_TOKEN }} | tee -a gradle-check.log + bash opensearch-build/scripts/gradle/gradle-check.sh -t ${{ secrets.JENKINS_GRADLE_CHECK_GENERIC_WEBHOOK_TOKEN }} -u ${{ secrets.JENKINS_GITHUB_USER}} -p ${{ secrets.JENKINS_GITHUB_USER_TOKEN}} | tee -a gradle-check.log - name: Setup Result Status if: always() From d0667433ae3a906fafb18f6c4c4f68e9f08cdbb8 Mon Sep 17 00:00:00 2001 From: Michael Froh Date: Wed, 26 Mar 2025 16:06:07 -0700 Subject: [PATCH 115/550] Fix MissingValues for Lucene 10 (#17647) With the Lucene 10 upgrade, we should not rely on doc values returning NO_MORE_DOCS once they've run out of ords. Instead, we should return a correct value for docValueCount(). Signed-off-by: Michael Froh --- .../search/aggregations/support/MissingValues.java | 6 ++++-- .../search/aggregations/support/MissingValuesTests.java | 6 +++--- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/MissingValues.java b/server/src/main/java/org/opensearch/search/aggregations/support/MissingValues.java index 429a543281c76..166334292d438 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/MissingValues.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/MissingValues.java @@ -324,7 +324,8 @@ public boolean advanceExact(int doc) throws IOException { @Override public int docValueCount() { - return values.docValueCount(); + // If we don't have ordinals, then we just have the missing value + return hasOrds ? values.docValueCount() : 1; } @Override @@ -359,7 +360,8 @@ public long getValueCount() { @Override public int docValueCount() { - return Math.max(1, values.docValueCount()); + // If we don't have ordinals, then we just have the missing value + return hasOrds ? values.docValueCount() : 1; } @Override diff --git a/server/src/test/java/org/opensearch/search/aggregations/support/MissingValuesTests.java b/server/src/test/java/org/opensearch/search/aggregations/support/MissingValuesTests.java index a0a280c34579e..b92d912c0f5f5 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/support/MissingValuesTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/support/MissingValuesTests.java @@ -152,7 +152,7 @@ public long nextOrd() { if (i < ords[doc].length) { return ords[doc][i++]; } else { - return NO_MORE_DOCS; + throw new IllegalStateException(); } } @@ -175,13 +175,13 @@ public int docValueCount() { for (int i = 0; i < numDocs; ++i) { assertTrue(withMissingReplaced.advanceExact(i)); if (ords[i].length > 0) { + assertEquals(ords[i].length, withMissingReplaced.docValueCount()); for (int ord : ords[i]) { assertEquals(values[ord], withMissingReplaced.lookupOrd(withMissingReplaced.nextOrd())); } - assertEquals(SortedSetDocValues.NO_MORE_DOCS, withMissingReplaced.nextOrd()); } else { + assertEquals(1, withMissingReplaced.docValueCount()); assertEquals(missing, withMissingReplaced.lookupOrd(withMissingReplaced.nextOrd())); - assertEquals(SortedSetDocValues.NO_MORE_DOCS, withMissingReplaced.nextOrd()); } } } From 399188f3034936f237bd9d43e40cf7528d8f5c40 Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Thu, 27 Mar 2025 18:18:36 -0700 Subject: [PATCH 116/550] Revert policy parsing logic (#17703) * Revert "Add simple PolicyParser unit test (#17690)" This reverts commit 3fb09c770a4359a90d0cc7e88b64c898101fd6c0. Signed-off-by: Andrew Ross * Revert "Add support of Java policies (#17663)" This reverts commit 17289b7685e72425e13051b1169518cdcafc4600. Signed-off-by: Andrew Ross --------- Signed-off-by: Andrew Ross --- CHANGELOG.md | 1 - gradle/missing-javadoc.gradle | 1 - libs/agent-sm/agent-policy/build.gradle | 27 - .../java/org/opensearch/package-info.java | 12 - .../secure_sm/policy/ParseUtil.java | 616 ------- .../opensearch/secure_sm/policy/Password.java | 173 -- .../secure_sm/policy/PolicyFile.java | 1601 ----------------- .../secure_sm/policy/PolicyParser.java | 1163 ------------ .../secure_sm/policy/PolicyUtil.java | 170 -- .../secure_sm/policy/PropertyExpander.java | 133 -- .../secure_sm/policy/SecurityConstants.java | 145 -- .../secure_sm/policy/package-info.java | 12 - .../secure_sm/policy/PolicyParserTests.java | 53 - libs/agent-sm/bootstrap/build.gradle | 24 - .../javaagent/bootstrap/AgentPolicy.java | 89 - .../javaagent/bootstrap/package-info.java | 12 - .../java/org/opensearch/package-info.java | 12 - libs/agent-sm/build.gradle | 22 - 18 files changed, 4266 deletions(-) delete mode 100644 libs/agent-sm/agent-policy/build.gradle delete mode 100644 libs/agent-sm/agent-policy/src/main/java/org/opensearch/package-info.java delete mode 100644 libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/ParseUtil.java delete mode 100644 libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/Password.java delete mode 100644 libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyFile.java delete mode 100644 libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyParser.java delete mode 100644 libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyUtil.java delete mode 100644 libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PropertyExpander.java delete mode 100644 libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/SecurityConstants.java delete mode 100644 libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/package-info.java delete mode 100644 libs/agent-sm/agent-policy/src/test/java/org/opensearch/secure_sm/policy/PolicyParserTests.java delete mode 100644 libs/agent-sm/bootstrap/build.gradle delete mode 100644 libs/agent-sm/bootstrap/src/main/java/org/opensearch/javaagent/bootstrap/AgentPolicy.java delete mode 100644 libs/agent-sm/bootstrap/src/main/java/org/opensearch/javaagent/bootstrap/package-info.java delete mode 100644 libs/agent-sm/bootstrap/src/main/java/org/opensearch/package-info.java delete mode 100644 libs/agent-sm/build.gradle diff --git a/CHANGELOG.md b/CHANGELOG.md index 27c39896ab8c5..c96ddf75e3282 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,7 +11,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Introduce a new search node role to hold search only shards ([#17620](https://github.com/opensearch-project/OpenSearch/pull/17620)) - Fix systemd integTest on deb regarding path ownership check ([#17641](https://github.com/opensearch-project/OpenSearch/pull/17641)) - Add dfs transformation function in XContentMapValues ([#17612](https://github.com/opensearch-project/OpenSearch/pull/17612)) -- [Security Manager Replacement] Add support of Java policies ([#17663](https://github.com/opensearch-project/OpenSearch/pull/17663)) - Added Kinesis support as a plugin for the pull-based ingestion ([#17615](https://github.com/opensearch-project/OpenSearch/pull/17615) ### Changed diff --git a/gradle/missing-javadoc.gradle b/gradle/missing-javadoc.gradle index 9f27dc5cadcd2..6e31f838e678a 100644 --- a/gradle/missing-javadoc.gradle +++ b/gradle/missing-javadoc.gradle @@ -106,7 +106,6 @@ configure([ project(":libs:opensearch-secure-sm"), project(":libs:opensearch-ssl-config"), project(":libs:opensearch-x-content"), - project(":libs:agent-sm:agent-policy"), project(":modules:aggs-matrix-stats"), project(":modules:analysis-common"), project(":modules:geo"), diff --git a/libs/agent-sm/agent-policy/build.gradle b/libs/agent-sm/agent-policy/build.gradle deleted file mode 100644 index a44c2c1349909..0000000000000 --- a/libs/agent-sm/agent-policy/build.gradle +++ /dev/null @@ -1,27 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - * - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -apply plugin: 'opensearch.build' -apply plugin: 'opensearch.publish' - -ext { - failOnJavadocWarning = false -} - -base { - archivesName = 'opensearch-agent-policy' -} - -disableTasks('forbiddenApisMain') - -dependencies { - testImplementation(project(":test:framework")) -} diff --git a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/package-info.java b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/package-info.java deleted file mode 100644 index 0724b60d1777f..0000000000000 --- a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/package-info.java +++ /dev/null @@ -1,12 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** - * Java Agent Policy - */ -package org.opensearch; diff --git a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/ParseUtil.java b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/ParseUtil.java deleted file mode 100644 index d4477fa13fdcd..0000000000000 --- a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/ParseUtil.java +++ /dev/null @@ -1,616 +0,0 @@ -/* - * Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. Oracle designates this - * particular file as subject to the "Classpath" exception as provided - * by Oracle in the LICENSE file that accompanied this code. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.secure_sm.policy; - -import java.io.File; -import java.net.MalformedURLException; -import java.net.URI; -import java.net.URISyntaxException; -import java.net.URL; -import java.nio.ByteBuffer; -import java.nio.CharBuffer; -import java.nio.charset.CharacterCodingException; -import java.nio.charset.CharsetDecoder; -import java.nio.charset.CharsetEncoder; -import java.nio.charset.CoderResult; -import java.nio.charset.CodingErrorAction; -import java.nio.charset.StandardCharsets; -import java.util.HexFormat; - -/** - * Adapted from: https://github.com/openjdk/jdk23u/blob/master/src/java.base/share/classes/sun/net/www/ParseUtil.java - */ -public final class ParseUtil { - - private static final HexFormat HEX_UPPERCASE = HexFormat.of().withUpperCase(); - - private ParseUtil() {} - - /** - * Constructs an encoded version of the specified path string suitable - * for use in the construction of a URL. - * - * A path separator is replaced by a forward slash. The string is UTF8 - * encoded. The % escape sequence is used for characters that are above - * 0x7F or those defined in RFC2396 as reserved or excluded in the path - * component of a URL. - */ - public static String encodePath(String path) { - return encodePath(path, true); - } - - /* - * flag indicates whether path uses platform dependent - * File.separatorChar or not. True indicates path uses platform - * dependent File.separatorChar. - */ - public static String encodePath(String path, boolean flag) { - if (flag && File.separatorChar != '/') { - return encodePath(path, 0, File.separatorChar); - } else { - int index = firstEncodeIndex(path); - if (index > -1) { - return encodePath(path, index, '/'); - } else { - return path; - } - } - } - - private static int firstEncodeIndex(String path) { - int len = path.length(); - for (int i = 0; i < len; i++) { - char c = path.charAt(i); - // Ordering in the following test is performance sensitive, - // and typically paths have most chars in the a-z range, then - // in the symbol range '&'-':' (includes '.', '/' and '0'-'9') - // and more rarely in the A-Z range. - if (c >= 'a' && c <= 'z' || c >= '&' && c <= ':' || c >= 'A' && c <= 'Z') { - continue; - } else if (c > 0x007F || match(c, L_ENCODED, H_ENCODED)) { - return i; - } - } - return -1; - } - - private static String encodePath(String path, int index, char sep) { - char[] pathCC = path.toCharArray(); - char[] retCC = new char[pathCC.length * 2 + 16 - index]; - if (index > 0) { - System.arraycopy(pathCC, 0, retCC, 0, index); - } - int retLen = index; - - for (int i = index; i < pathCC.length; i++) { - char c = pathCC[i]; - if (c == sep) retCC[retLen++] = '/'; - else { - if (c <= 0x007F) { - if (c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c >= '0' && c <= '9') { - retCC[retLen++] = c; - } else if (match(c, L_ENCODED, H_ENCODED)) { - retLen = escape(retCC, c, retLen); - } else { - retCC[retLen++] = c; - } - } else if (c > 0x07FF) { - retLen = escape(retCC, (char) (0xE0 | ((c >> 12) & 0x0F)), retLen); - retLen = escape(retCC, (char) (0x80 | ((c >> 6) & 0x3F)), retLen); - retLen = escape(retCC, (char) (0x80 | ((c >> 0) & 0x3F)), retLen); - } else { - retLen = escape(retCC, (char) (0xC0 | ((c >> 6) & 0x1F)), retLen); - retLen = escape(retCC, (char) (0x80 | ((c >> 0) & 0x3F)), retLen); - } - } - // worst case scenario for character [0x7ff-] every single - // character will be encoded into 9 characters. - if (retLen + 9 > retCC.length) { - int newLen = retCC.length * 2 + 16; - if (newLen < 0) { - newLen = Integer.MAX_VALUE; - } - char[] buf = new char[newLen]; - System.arraycopy(retCC, 0, buf, 0, retLen); - retCC = buf; - } - } - return new String(retCC, 0, retLen); - } - - /** - * Appends the URL escape sequence for the specified char to the - * specified character array. - */ - private static int escape(char[] cc, char c, int index) { - cc[index++] = '%'; - cc[index++] = Character.forDigit((c >> 4) & 0xF, 16); - cc[index++] = Character.forDigit(c & 0xF, 16); - return index; - } - - /** - * Un-escape and return the character at position i in string s. - */ - private static byte unescape(String s, int i) { - return (byte) Integer.parseInt(s, i + 1, i + 3, 16); - } - - /** - * Returns a new String constructed from the specified String by replacing - * the URL escape sequences and UTF8 encoding with the characters they - * represent. - */ - public static String decode(String s) { - int n = s.length(); - if ((n == 0) || (s.indexOf('%') < 0)) return s; - - StringBuilder sb = new StringBuilder(n); - ByteBuffer bb = ByteBuffer.allocate(n); - CharBuffer cb = CharBuffer.allocate(n); - CharsetDecoder dec = StandardCharsets.UTF_8.newDecoder() - .onMalformedInput(CodingErrorAction.REPORT) - .onUnmappableCharacter(CodingErrorAction.REPORT); - - char c = s.charAt(0); - for (int i = 0; i < n;) { - assert c == s.charAt(i); - if (c != '%') { - sb.append(c); - if (++i >= n) break; - c = s.charAt(i); - continue; - } - bb.clear(); - for (;;) { - if (n - i < 2) { - throw new IllegalArgumentException("Malformed escape pair: " + s); - } - - try { - bb.put(unescape(s, i)); - } catch (NumberFormatException | IndexOutOfBoundsException e) { - throw new IllegalArgumentException("Malformed escape pair: " + s); - } - i += 3; - if (i >= n) break; - c = s.charAt(i); - if (c != '%') break; - } - bb.flip(); - cb.clear(); - dec.reset(); - CoderResult cr = dec.decode(bb, cb, true); - if (cr.isError()) throw new IllegalArgumentException("Error decoding percent encoded characters"); - cr = dec.flush(cb); - if (cr.isError()) throw new IllegalArgumentException("Error decoding percent encoded characters"); - sb.append(cb.flip().toString()); - } - - return sb.toString(); - } - - public static URL fileToEncodedURL(File file) throws MalformedURLException { - String path = file.getAbsolutePath(); - path = ParseUtil.encodePath(path); - if (!path.startsWith("/")) { - path = "/" + path; - } - if (!path.endsWith("/") && file.isDirectory()) { - path = path + "/"; - } - @SuppressWarnings("deprecation") - var result = new URL("file", "", path); - return result; - } - - public static java.net.URI toURI(URL url) { - String protocol = url.getProtocol(); - String auth = url.getAuthority(); - String path = url.getPath(); - String query = url.getQuery(); - String ref = url.getRef(); - if (path != null && !(path.startsWith("/"))) path = "/" + path; - - // - // In java.net.URI class, a port number of -1 implies the default - // port number. So get it stripped off before creating URI instance. - // - if (auth != null && auth.endsWith(":-1")) auth = auth.substring(0, auth.length() - 3); - - java.net.URI uri; - try { - uri = createURI(protocol, auth, path, query, ref); - } catch (java.net.URISyntaxException e) { - uri = null; - } - return uri; - } - - // - // createURI() and its auxiliary code are cloned from java.net.URI. - // Most of the code are just copy and paste, except that quote() - // has been modified to avoid double-escape. - // - // Usually it is unacceptable, but we're forced to do it because - // otherwise we need to change public API, namely java.net.URI's - // multi-argument constructors. It turns out that the changes cause - // incompatibilities so can't be done. - // - private static URI createURI(String scheme, String authority, String path, String query, String fragment) throws URISyntaxException { - String s = toString(scheme, null, authority, null, null, -1, path, query, fragment); - checkPath(s, scheme, path); - return new URI(s); - } - - private static String toString( - String scheme, - String opaquePart, - String authority, - String userInfo, - String host, - int port, - String path, - String query, - String fragment - ) { - StringBuilder sb = new StringBuilder(); - if (scheme != null) { - sb.append(scheme); - sb.append(':'); - } - appendSchemeSpecificPart(sb, opaquePart, authority, userInfo, host, port, path, query); - appendFragment(sb, fragment); - return sb.toString(); - } - - private static void appendSchemeSpecificPart( - StringBuilder sb, - String opaquePart, - String authority, - String userInfo, - String host, - int port, - String path, - String query - ) { - if (opaquePart != null) { - /* check if SSP begins with an IPv6 address - * because we must not quote a literal IPv6 address - */ - if (opaquePart.startsWith("//[")) { - int end = opaquePart.indexOf(']'); - if (end != -1 && opaquePart.indexOf(':') != -1) { - String doquote, dontquote; - if (end == opaquePart.length()) { - dontquote = opaquePart; - doquote = ""; - } else { - dontquote = opaquePart.substring(0, end + 1); - doquote = opaquePart.substring(end + 1); - } - sb.append(dontquote); - sb.append(quote(doquote, L_URIC, H_URIC)); - } - } else { - sb.append(quote(opaquePart, L_URIC, H_URIC)); - } - } else { - appendAuthority(sb, authority, userInfo, host, port); - if (path != null) sb.append(quote(path, L_PATH, H_PATH)); - if (query != null) { - sb.append('?'); - sb.append(quote(query, L_URIC, H_URIC)); - } - } - } - - private static void appendAuthority(StringBuilder sb, String authority, String userInfo, String host, int port) { - if (host != null) { - sb.append("//"); - if (userInfo != null) { - sb.append(quote(userInfo, L_USERINFO, H_USERINFO)); - sb.append('@'); - } - boolean needBrackets = ((host.indexOf(':') >= 0) && !host.startsWith("[") && !host.endsWith("]")); - if (needBrackets) sb.append('['); - sb.append(host); - if (needBrackets) sb.append(']'); - if (port != -1) { - sb.append(':'); - sb.append(port); - } - } else if (authority != null) { - sb.append("//"); - if (authority.startsWith("[")) { - int end = authority.indexOf(']'); - if (end != -1 && authority.indexOf(':') != -1) { - String doquote, dontquote; - if (end == authority.length()) { - dontquote = authority; - doquote = ""; - } else { - dontquote = authority.substring(0, end + 1); - doquote = authority.substring(end + 1); - } - sb.append(dontquote); - sb.append(quote(doquote, L_REG_NAME | L_SERVER, H_REG_NAME | H_SERVER)); - } - } else { - sb.append(quote(authority, L_REG_NAME | L_SERVER, H_REG_NAME | H_SERVER)); - } - } - } - - private static void appendFragment(StringBuilder sb, String fragment) { - if (fragment != null) { - sb.append('#'); - sb.append(quote(fragment, L_URIC, H_URIC)); - } - } - - // Quote any characters in s that are not permitted - // by the given mask pair - // - private static String quote(String s, long lowMask, long highMask) { - int n = s.length(); - StringBuilder sb = null; - CharsetEncoder encoder = null; - boolean allowNonASCII = ((lowMask & L_ESCAPED) != 0); - for (int i = 0; i < s.length(); i++) { - char c = s.charAt(i); - if (c < '\u0080') { - if (!match(c, lowMask, highMask) && !isEscaped(s, i)) { - if (sb == null) { - sb = new StringBuilder(); - sb.append(s, 0, i); - } - appendEscape(sb, (byte) c); - } else { - if (sb != null) sb.append(c); - } - } else if (allowNonASCII && (Character.isSpaceChar(c) || Character.isISOControl(c))) { - if (encoder == null) { - encoder = StandardCharsets.UTF_8.newEncoder(); - } - if (sb == null) { - sb = new StringBuilder(); - sb.append(s, 0, i); - } - appendEncoded(encoder, sb, c); - } else { - if (sb != null) sb.append(c); - } - } - return (sb == null) ? s : sb.toString(); - } - - // - // To check if the given string has an escaped triplet - // at the given position - // - private static boolean isEscaped(String s, int pos) { - if (s == null || (s.length() <= (pos + 2))) return false; - - return s.charAt(pos) == '%' && match(s.charAt(pos + 1), L_HEX, H_HEX) && match(s.charAt(pos + 2), L_HEX, H_HEX); - } - - private static void appendEncoded(CharsetEncoder encoder, StringBuilder sb, char c) { - ByteBuffer bb = null; - try { - bb = encoder.encode(CharBuffer.wrap("" + c)); - } catch (CharacterCodingException x) { - assert false; - } - while (bb.hasRemaining()) { - int b = bb.get() & 0xff; - if (b >= 0x80) appendEscape(sb, (byte) b); - else sb.append((char) b); - } - } - - private static void appendEscape(StringBuilder sb, byte b) { - sb.append('%'); - HEX_UPPERCASE.toHexDigits(sb, b); - } - - // Tell whether the given character is permitted by the given mask pair - private static boolean match(char c, long lowMask, long highMask) { - if (c < 64) return ((1L << c) & lowMask) != 0; - if (c < 128) return ((1L << (c - 64)) & highMask) != 0; - return false; - } - - // If a scheme is given then the path, if given, must be absolute - // - private static void checkPath(String s, String scheme, String path) throws URISyntaxException { - if (scheme != null) { - if (path != null && !path.isEmpty() && path.charAt(0) != '/') throw new URISyntaxException(s, "Relative path in absolute URI"); - } - } - - // -- Character classes for parsing -- - - // To save startup time, we manually calculate the low-/highMask constants. - // For reference, the following methods were used to calculate the values: - - // Compute a low-order mask for the characters - // between first and last, inclusive - // private static long lowMask(char first, char last) { - // long m = 0; - // int f = Math.max(Math.min(first, 63), 0); - // int l = Math.max(Math.min(last, 63), 0); - // for (int i = f; i <= l; i++) - // m |= 1L << i; - // return m; - // } - - // Compute the low-order mask for the characters in the given string - // private static long lowMask(String chars) { - // int n = chars.length(); - // long m = 0; - // for (int i = 0; i < n; i++) { - // char c = chars.charAt(i); - // if (c < 64) - // m |= (1L << c); - // } - // return m; - // } - - // Compute a high-order mask for the characters - // between first and last, inclusive - // private static long highMask(char first, char last) { - // long m = 0; - // int f = Math.max(Math.min(first, 127), 64) - 64; - // int l = Math.max(Math.min(last, 127), 64) - 64; - // for (int i = f; i <= l; i++) - // m |= 1L << i; - // return m; - // } - - // Compute the high-order mask for the characters in the given string - // private static long highMask(String chars) { - // int n = chars.length(); - // long m = 0; - // for (int i = 0; i < n; i++) { - // char c = chars.charAt(i); - // if ((c >= 64) && (c < 128)) - // m |= (1L << (c - 64)); - // } - // return m; - // } - - // Character-class masks - - // digit = "0" | "1" | "2" | "3" | "4" | "5" | "6" | "7" | - // "8" | "9" - private static final long L_DIGIT = 0x3FF000000000000L; // lowMask('0', '9'); - private static final long H_DIGIT = 0L; - - // hex = digit | "A" | "B" | "C" | "D" | "E" | "F" | - // "a" | "b" | "c" | "d" | "e" | "f" - private static final long L_HEX = L_DIGIT; - private static final long H_HEX = 0x7E0000007EL; // highMask('A', 'F') | highMask('a', 'f'); - - // upalpha = "A" | "B" | "C" | "D" | "E" | "F" | "G" | "H" | "I" | - // "J" | "K" | "L" | "M" | "N" | "O" | "P" | "Q" | "R" | - // "S" | "T" | "U" | "V" | "W" | "X" | "Y" | "Z" - private static final long L_UPALPHA = 0L; - private static final long H_UPALPHA = 0x7FFFFFEL; // highMask('A', 'Z'); - - // lowalpha = "a" | "b" | "c" | "d" | "e" | "f" | "g" | "h" | "i" | - // "j" | "k" | "l" | "m" | "n" | "o" | "p" | "q" | "r" | - // "s" | "t" | "u" | "v" | "w" | "x" | "y" | "z" - private static final long L_LOWALPHA = 0L; - private static final long H_LOWALPHA = 0x7FFFFFE00000000L; // highMask('a', 'z'); - - // alpha = lowalpha | upalpha - private static final long L_ALPHA = L_LOWALPHA | L_UPALPHA; - private static final long H_ALPHA = H_LOWALPHA | H_UPALPHA; - - // alphanum = alpha | digit - private static final long L_ALPHANUM = L_DIGIT | L_ALPHA; - private static final long H_ALPHANUM = H_DIGIT | H_ALPHA; - - // mark = "-" | "_" | "." | "!" | "~" | "*" | "'" | - // "(" | ")" - private static final long L_MARK = 0x678200000000L; // lowMask("-_.!~*'()"); - private static final long H_MARK = 0x4000000080000000L; // highMask("-_.!~*'()"); - - // unreserved = alphanum | mark - private static final long L_UNRESERVED = L_ALPHANUM | L_MARK; - private static final long H_UNRESERVED = H_ALPHANUM | H_MARK; - - // reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" | - // "$" | "," | "[" | "]" - // Added per RFC2732: "[", "]" - private static final long L_RESERVED = 0xAC00985000000000L; // lowMask(";/?:@&=+$,[]"); - private static final long H_RESERVED = 0x28000001L; // highMask(";/?:@&=+$,[]"); - - // The zero'th bit is used to indicate that escape pairs and non-US-ASCII - // characters are allowed; this is handled by the scanEscape method below. - private static final long L_ESCAPED = 1L; - private static final long H_ESCAPED = 0L; - - // uric = reserved | unreserved | escaped - private static final long L_URIC = L_RESERVED | L_UNRESERVED | L_ESCAPED; - private static final long H_URIC = H_RESERVED | H_UNRESERVED | H_ESCAPED; - - // pchar = unreserved | escaped | - // ":" | "@" | "&" | "=" | "+" | "$" | "," - private static final long L_PCHAR = L_UNRESERVED | L_ESCAPED | 0x2400185000000000L; // lowMask(":@&=+$,"); - private static final long H_PCHAR = H_UNRESERVED | H_ESCAPED | 0x1L; // highMask(":@&=+$,"); - - // All valid path characters - private static final long L_PATH = L_PCHAR | 0x800800000000000L; // lowMask(";/"); - private static final long H_PATH = H_PCHAR; // highMask(";/") == 0x0L; - - // Dash, for use in domainlabel and toplabel - private static final long L_DASH = 0x200000000000L; // lowMask("-"); - private static final long H_DASH = 0x0L; // highMask("-"); - - // userinfo = *( unreserved | escaped | - // ";" | ":" | "&" | "=" | "+" | "$" | "," ) - private static final long L_USERINFO = L_UNRESERVED | L_ESCAPED | 0x2C00185000000000L; // lowMask(";:&=+$,"); - private static final long H_USERINFO = H_UNRESERVED | H_ESCAPED; // | highMask(";:&=+$,") == 0L; - - // reg_name = 1*( unreserved | escaped | "$" | "," | - // ";" | ":" | "@" | "&" | "=" | "+" ) - private static final long L_REG_NAME = L_UNRESERVED | L_ESCAPED | 0x2C00185000000000L; // lowMask("$,;:@&=+"); - private static final long H_REG_NAME = H_UNRESERVED | H_ESCAPED | 0x1L; // highMask("$,;:@&=+"); - - // All valid characters for server-based authorities - private static final long L_SERVER = L_USERINFO | L_ALPHANUM | L_DASH | 0x400400000000000L; // lowMask(".:@[]"); - private static final long H_SERVER = H_USERINFO | H_ALPHANUM | H_DASH | 0x28000001L; // highMask(".:@[]"); - - // Characters that are encoded in the path component of a URI. - // - // These characters are reserved in the path segment as described in - // RFC2396 section 3.3: - // "=" | ";" | "?" | "/" - // - // These characters are defined as excluded in RFC2396 section 2.4.3 - // and must be escaped if they occur in the data part of a URI: - // "#" | " " | "<" | ">" | "%" | "\"" | "{" | "}" | "|" | "\\" | "^" | - // "[" | "]" | "`" - // - // Also US ASCII control characters 00-1F and 7F. - - // lowMask((char)0, (char)31) | lowMask("=;?/# <>%\"{}|\\^[]`"); - private static final long L_ENCODED = 0xF800802DFFFFFFFFL; - - // highMask((char)0x7F, (char)0x7F) | highMask("=;?/# <>%\"{}|\\^[]`"); - private static final long H_ENCODED = 0xB800000178000000L; - -} diff --git a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/Password.java b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/Password.java deleted file mode 100644 index ffe5f734fa0ea..0000000000000 --- a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/Password.java +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. Oracle designates this - * particular file as subject to the "Classpath" exception as provided - * by Oracle in the LICENSE file that accompanied this code. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.secure_sm.policy; - -import java.io.ByteArrayInputStream; -import java.io.Console; -import java.io.IOException; -import java.io.InputStream; -import java.io.PushbackInputStream; -import java.nio.ByteBuffer; -import java.nio.CharBuffer; -import java.nio.charset.CharsetEncoder; -import java.nio.charset.CodingErrorAction; -import java.util.Arrays; - -/** - * Adapted from: https://github.com/openjdk/jdk23u/blob/master/src/java.base/share/classes/sun/security/util/Password.java - */ -public class Password { - /** Reads user password from given input stream. */ - public static char[] readPassword(InputStream in) throws IOException { - return readPassword(in, false); - } - - /** Reads user password from given input stream. - * @param isEchoOn true if the password should be echoed on the screen - */ - @SuppressWarnings("fallthrough") - public static char[] readPassword(InputStream in, boolean isEchoOn) throws IOException { - - char[] consoleEntered = null; - byte[] consoleBytes = null; - - try { - // Use the new java.io.Console class - Console con = null; - if (!isEchoOn && in == System.in && ((con = System.console()) != null)) { - consoleEntered = con.readPassword(); - // readPassword returns "" if you just print ENTER, - // to be compatible with old Password class, change to null - if (consoleEntered != null && consoleEntered.length == 0) { - return null; - } - consoleBytes = convertToBytes(consoleEntered); - in = new ByteArrayInputStream(consoleBytes); - } - - // Rest of the lines still necessary for KeyStoreLoginModule - // and when there is no console. - - char[] lineBuffer; - char[] buf; - int i; - - buf = lineBuffer = new char[128]; - - int room = buf.length; - int offset = 0; - int c; - - boolean done = false; - while (!done) { - switch (c = in.read()) { - case -1: - case '\n': - done = true; - break; - - case '\r': - int c2 = in.read(); - if ((c2 != '\n') && (c2 != -1)) { - if (!(in instanceof PushbackInputStream)) { - in = new PushbackInputStream(in); - } - ((PushbackInputStream) in).unread(c2); - } else { - done = true; - break; - } - /* fall through */ - default: - if (--room < 0) { - buf = new char[offset + 128]; - room = buf.length - offset - 1; - System.arraycopy(lineBuffer, 0, buf, 0, offset); - Arrays.fill(lineBuffer, ' '); - lineBuffer = buf; - } - buf[offset++] = (char) c; - break; - } - } - - if (offset == 0) { - return null; - } - - char[] ret = new char[offset]; - System.arraycopy(buf, 0, ret, 0, offset); - Arrays.fill(buf, ' '); - - return ret; - } finally { - if (consoleEntered != null) { - Arrays.fill(consoleEntered, ' '); - } - if (consoleBytes != null) { - Arrays.fill(consoleBytes, (byte) 0); - } - } - } - - /** - * Change a password read from Console.readPassword() into - * its original bytes. - * - * @param pass a char[] - * @return its byte[] format, similar to new String(pass).getBytes() - */ - private static byte[] convertToBytes(char[] pass) { - if (enc == null) { - synchronized (Password.class) { - enc = System.console() - .charset() - .newEncoder() - .onMalformedInput(CodingErrorAction.REPLACE) - .onUnmappableCharacter(CodingErrorAction.REPLACE); - } - } - byte[] ba = new byte[(int) (enc.maxBytesPerChar() * pass.length)]; - ByteBuffer bb = ByteBuffer.wrap(ba); - synchronized (enc) { - enc.reset().encode(CharBuffer.wrap(pass), bb, true); - } - if (bb.position() < ba.length) { - ba[bb.position()] = '\n'; - } - return ba; - } - - private static volatile CharsetEncoder enc; -} diff --git a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyFile.java b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyFile.java deleted file mode 100644 index 14b1a8f56375c..0000000000000 --- a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyFile.java +++ /dev/null @@ -1,1601 +0,0 @@ -/* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. Oracle designates this - * particular file as subject to the "Classpath" exception as provided - * by Oracle in the LICENSE file that accompanied this code. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.secure_sm.policy; - -import javax.security.auth.Subject; -import javax.security.auth.x500.X500Principal; - -import java.io.File; -import java.io.FilePermission; -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.io.ObjectInputStream; -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationTargetException; -import java.net.MalformedURLException; -import java.net.NetPermission; -import java.net.SocketPermission; -import java.net.URI; -import java.net.URL; -import java.security.AllPermission; -import java.security.CodeSource; -import java.security.KeyStore; -import java.security.KeyStoreException; -import java.security.Permission; -import java.security.PermissionCollection; -import java.security.Permissions; -import java.security.Principal; -import java.security.ProtectionDomain; -import java.security.Security; -import java.security.SecurityPermission; -import java.security.UnresolvedPermission; -import java.security.cert.Certificate; -import java.security.cert.X509Certificate; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.Enumeration; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.PropertyPermission; -import java.util.Set; -import java.util.StringTokenizer; -import java.util.concurrent.ConcurrentHashMap; - -import static java.nio.charset.StandardCharsets.UTF_8; - -/** - * Adapted from: https://github.com/openjdk/jdk23u/blob/master/src/java.base/share/classes/sun/security/provider/PolicyFile.java - */ -@SuppressWarnings("removal") -public class PolicyFile extends java.security.Policy { - private static final String SELF = "${{self}}"; - private static final String X500PRINCIPAL = "javax.security.auth.x500.X500Principal"; - private static final String POLICY = "java.security.policy"; - private static final String POLICY_URL = "policy.url."; - - private static final int DEFAULT_CACHE_SIZE = 1; - - // contains the policy grant entries, PD cache, and alias mapping - // can be updated if refresh() is called - private volatile PolicyInfo policyInfo; - - private boolean expandProperties = true; - private boolean allowSystemProperties = true; - private boolean notUtf8 = false; - private URL url; - - // for use with the reflection API - private static final Class[] PARAMS0 = {}; - private static final Class[] PARAMS1 = { String.class }; - private static final Class[] PARAMS2 = { String.class, String.class }; - - /** - * When a policy file has a syntax error, the exception code may generate - * another permission check and this can cause the policy file to be parsed - * repeatedly, leading to a StackOverflowError or ClassCircularityError. - * To avoid this, this set is populated with policy files that have been - * previously parsed and have syntax errors, so that they can be - * subsequently ignored. - */ - private static Set badPolicyURLs = Collections.newSetFromMap(new ConcurrentHashMap()); - - /** - * Initializes the Policy object and reads the default policy - * configuration file(s) into the Policy object. - */ - public PolicyFile() { - init((URL) null); - } - - /** - * Initializes the Policy object and reads the default policy - * from the specified URL only. - */ - public PolicyFile(URL url) { - this.url = url; - init(url); - } - - /** - * Initializes the Policy object and reads the default policy - * configuration file(s) into the Policy object. - * - * See the class description for details on the algorithm used to - * initialize the Policy object. - */ - private void init(URL url) { - int numCaches = DEFAULT_CACHE_SIZE; - PolicyInfo newInfo = new PolicyInfo(numCaches); - initPolicyFile(newInfo, url); - policyInfo = newInfo; - } - - private void initPolicyFile(final PolicyInfo newInfo, final URL url) { - if (url != null) { - - /** - * If the caller specified a URL via Policy.getInstance, - * we only read from default.policy and that URL. - */ - - if (init(url, newInfo) == false) { - // use static policy if all else fails - initStaticPolicy(newInfo); - } - - } else { - - /** - * Caller did not specify URL via Policy.getInstance. - * Read from URLs listed in the java.security properties file. - */ - - boolean loaded_one = initPolicyFile(POLICY, POLICY_URL, newInfo); - // To maintain strict backward compatibility - // we load the static policy only if POLICY load failed - if (!loaded_one) { - // use static policy if all else fails - initStaticPolicy(newInfo); - } - } - } - - private boolean initPolicyFile(final String propname, final String urlname, final PolicyInfo newInfo) { - boolean loaded_policy = false; - - if (allowSystemProperties) { - String extra_policy = System.getProperty(propname); - if (extra_policy != null) { - boolean overrideAll = false; - if (extra_policy.startsWith("=")) { - overrideAll = true; - extra_policy = extra_policy.substring(1); - } - try { - extra_policy = PropertyExpander.expand(extra_policy); - URL policyURL; - - File policyFile = new File(extra_policy); - if (policyFile.exists()) { - policyURL = ParseUtil.fileToEncodedURL(new File(policyFile.getCanonicalPath())); - } else { - policyURL = newURL(extra_policy); - } - if (init(policyURL, newInfo)) { - loaded_policy = true; - } - } catch (Exception e) {} - if (overrideAll) { - return Boolean.valueOf(loaded_policy); - } - } - } - - int n = 1; - String policy_uri; - - while ((policy_uri = Security.getProperty(urlname + n)) != null) { - try { - URL policy_url = null; - String expanded_uri = PropertyExpander.expand(policy_uri).replace(File.separatorChar, '/'); - - if (policy_uri.startsWith("file:${java.home}/") || policy_uri.startsWith("file:${user.home}/")) { - - // this special case accommodates - // the situation java.home/user.home - // expand to a single slash, resulting in - // a file://foo URI - policy_url = new File(expanded_uri.substring(5)).toURI().toURL(); - } else { - policy_url = new URI(expanded_uri).toURL(); - } - - if (init(policy_url, newInfo)) { - loaded_policy = true; - } - } catch (Exception e) { - // ignore that policy - } - n++; - } - return Boolean.valueOf(loaded_policy); - } - - /** - * Reads a policy configuration into the Policy object using a - * Reader object. - */ - private boolean init(URL policy, PolicyInfo newInfo) { - - // skip parsing policy file if it has been previously parsed and - // has syntax errors - if (badPolicyURLs.contains(policy)) { - return false; - } - - try (InputStreamReader isr = getInputStreamReader(PolicyUtil.getInputStream(policy))) { - - PolicyParser pp = new PolicyParser(expandProperties); - pp.read(isr); - - KeyStore keyStore = null; - try { - keyStore = PolicyUtil.getKeyStore( - policy, - pp.getKeyStoreUrl(), - pp.getKeyStoreType(), - pp.getKeyStoreProvider(), - pp.getStorePassURL() - ); - } catch (Exception e) { - // ignore, treat it like we have no keystore - } - - Enumeration enum_ = pp.grantElements(); - while (enum_.hasMoreElements()) { - PolicyParser.GrantEntry ge = enum_.nextElement(); - addGrantEntry(ge, keyStore, newInfo); - } - return true; - } catch (PolicyParser.ParsingException pe) { - // record bad policy file to avoid later reparsing it - badPolicyURLs.add(policy); - pe.printStackTrace(System.err); - } catch (Exception e) {} - - return false; - } - - private InputStreamReader getInputStreamReader(InputStream is) { - /* - * Read in policy using UTF-8 by default. - * - * Check non-standard system property to see if the default encoding - * should be used instead. - */ - return (notUtf8) ? new InputStreamReader(is) : new InputStreamReader(is, UTF_8); - } - - private void initStaticPolicy(final PolicyInfo newInfo) { - PolicyEntry pe = new PolicyEntry(new CodeSource(null, (Certificate[]) null)); - pe.add(SecurityConstants.LOCAL_LISTEN_PERMISSION); - pe.add(new PropertyPermission("java.version", SecurityConstants.PROPERTY_READ_ACTION)); - pe.add(new PropertyPermission("java.vendor", SecurityConstants.PROPERTY_READ_ACTION)); - pe.add(new PropertyPermission("java.vendor.url", SecurityConstants.PROPERTY_READ_ACTION)); - pe.add(new PropertyPermission("java.class.version", SecurityConstants.PROPERTY_READ_ACTION)); - pe.add(new PropertyPermission("os.name", SecurityConstants.PROPERTY_READ_ACTION)); - pe.add(new PropertyPermission("os.version", SecurityConstants.PROPERTY_READ_ACTION)); - pe.add(new PropertyPermission("os.arch", SecurityConstants.PROPERTY_READ_ACTION)); - pe.add(new PropertyPermission("file.separator", SecurityConstants.PROPERTY_READ_ACTION)); - pe.add(new PropertyPermission("path.separator", SecurityConstants.PROPERTY_READ_ACTION)); - pe.add(new PropertyPermission("line.separator", SecurityConstants.PROPERTY_READ_ACTION)); - pe.add(new PropertyPermission("java.specification.version", SecurityConstants.PROPERTY_READ_ACTION)); - pe.add(new PropertyPermission("java.specification.maintenance.version", SecurityConstants.PROPERTY_READ_ACTION)); - pe.add(new PropertyPermission("java.specification.vendor", SecurityConstants.PROPERTY_READ_ACTION)); - pe.add(new PropertyPermission("java.specification.name", SecurityConstants.PROPERTY_READ_ACTION)); - pe.add(new PropertyPermission("java.vm.specification.version", SecurityConstants.PROPERTY_READ_ACTION)); - pe.add(new PropertyPermission("java.vm.specification.vendor", SecurityConstants.PROPERTY_READ_ACTION)); - pe.add(new PropertyPermission("java.vm.specification.name", SecurityConstants.PROPERTY_READ_ACTION)); - pe.add(new PropertyPermission("java.vm.version", SecurityConstants.PROPERTY_READ_ACTION)); - pe.add(new PropertyPermission("java.vm.vendor", SecurityConstants.PROPERTY_READ_ACTION)); - pe.add(new PropertyPermission("java.vm.name", SecurityConstants.PROPERTY_READ_ACTION)); - - // No need to sync because no one has access to newInfo yet - newInfo.policyEntries.add(pe); - } - - /** - * Given a GrantEntry, create a codeSource. - * - * @return null if signedBy alias is not recognized - */ - private CodeSource getCodeSource(PolicyParser.GrantEntry ge, KeyStore keyStore, PolicyInfo newInfo) - throws java.net.MalformedURLException { - Certificate[] certs = null; - if (ge.signedBy != null) { - certs = getCertificates(keyStore, ge.signedBy, newInfo); - if (certs == null) { - return null; - } - } - - URL location; - - if (ge.codeBase != null) location = newURL(ge.codeBase); - else location = null; - - return (canonicalizeCodebase(new CodeSource(location, certs), false)); - } - - /** - * Add one policy entry to the list. - */ - private void addGrantEntry(PolicyParser.GrantEntry ge, KeyStore keyStore, PolicyInfo newInfo) { - - try { - CodeSource codesource = getCodeSource(ge, keyStore, newInfo); - // skip if signedBy alias was unknown... - if (codesource == null) return; - - // perform keystore alias principal replacement. - // for example, if alias resolves to X509 certificate, - // replace principal with: - // -- skip if alias is unknown - if (replacePrincipals(ge.principals, keyStore) == false) return; - PolicyEntry entry = new PolicyEntry(codesource, ge.principals); - Enumeration enum_ = ge.permissionElements(); - while (enum_.hasMoreElements()) { - PolicyParser.PermissionEntry pe = enum_.nextElement(); - - try { - // perform ${{ ... }} expansions within permission name - expandPermissionName(pe, keyStore); - - // XXX special case PrivateCredentialPermission-SELF - Permission perm; - if (pe.permission.equals("javax.security.auth.PrivateCredentialPermission") && pe.name.endsWith(" self")) { - pe.name = pe.name.substring(0, pe.name.indexOf("self")) + SELF; - } - // check for self - if (pe.name != null && pe.name.contains(SELF)) { - // Create a "SelfPermission" , it could be an - // an unresolved permission which will be resolved - // when implies is called - // Add it to entry - Certificate[] certs; - if (pe.signedBy != null) { - certs = getCertificates(keyStore, pe.signedBy, newInfo); - } else { - certs = null; - } - perm = new SelfPermission(pe.permission, pe.name, pe.action, certs); - } else { - perm = getInstance(pe.permission, pe.name, pe.action); - } - entry.add(perm); - } catch (ClassNotFoundException cnfe) { - Certificate[] certs; - if (pe.signedBy != null) { - certs = getCertificates(keyStore, pe.signedBy, newInfo); - } else { - certs = null; - } - - // only add if we had no signer or we had - // a signer and found the keys for it. - if (certs != null || pe.signedBy == null) { - Permission perm = new UnresolvedPermission(pe.permission, pe.name, pe.action, certs); - entry.add(perm); - } - } catch (java.lang.reflect.InvocationTargetException ite) { - ite.printStackTrace(System.err); - } catch (Exception e) { - e.printStackTrace(System.err); - } - } - - // No need to sync because no one has access to newInfo yet - newInfo.policyEntries.add(entry); - } catch (Exception e) { - e.printStackTrace(System.err); - } - } - - /** - * Returns a new Permission object of the given Type. The Permission is - * created by getting the - * Class object using the Class.forName method, and using - * the reflection API to invoke the (String name, String actions) - * constructor on the - * object. - * - * @param type the type of Permission being created. - * @param name the name of the Permission being created. - * @param actions the actions of the Permission being created. - * - * @exception ClassNotFoundException if the particular Permission - * class could not be found. - * - * @exception IllegalAccessException if the class or initializer is - * not accessible. - * - * @exception InstantiationException if getInstance tries to - * instantiate an abstract class or an interface, or if the - * instantiation fails for some other reason. - * - * @exception NoSuchMethodException if the (String, String) constructor - * is not found. - * - * @exception InvocationTargetException if the underlying Permission - * constructor throws an exception. - * - */ - - private static final Permission getInstance(String type, String name, String actions) throws ClassNotFoundException, - InstantiationException, IllegalAccessException, NoSuchMethodException, InvocationTargetException { - Class pc = Class.forName(type, false, null); - Permission answer = getKnownPermission(pc, name, actions); - if (answer != null) { - return answer; - } - if (!Permission.class.isAssignableFrom(pc)) { - // not the right subtype - throw new ClassCastException(type + " is not a Permission"); - } - - if (name == null && actions == null) { - try { - Constructor c = pc.getConstructor(PARAMS0); - return (Permission) c.newInstance(new Object[] {}); - } catch (NoSuchMethodException ne) { - try { - Constructor c = pc.getConstructor(PARAMS1); - return (Permission) c.newInstance(new Object[] { name }); - } catch (NoSuchMethodException ne1) { - Constructor c = pc.getConstructor(PARAMS2); - return (Permission) c.newInstance(new Object[] { name, actions }); - } - } - } else { - if (name != null && actions == null) { - try { - Constructor c = pc.getConstructor(PARAMS1); - return (Permission) c.newInstance(new Object[] { name }); - } catch (NoSuchMethodException ne) { - Constructor c = pc.getConstructor(PARAMS2); - return (Permission) c.newInstance(new Object[] { name, actions }); - } - } else { - Constructor c = pc.getConstructor(PARAMS2); - return (Permission) c.newInstance(new Object[] { name, actions }); - } - } - } - - /** - * Creates one of the well-known permissions in the java.base module - * directly instead of via reflection. Keep list short to not penalize - * permissions from other modules. - */ - private static Permission getKnownPermission(Class claz, String name, String actions) { - if (claz.equals(FilePermission.class)) { - return new FilePermission(name, actions); - } else if (claz.equals(SocketPermission.class)) { - return new SocketPermission(name, actions); - } else if (claz.equals(RuntimePermission.class)) { - return new RuntimePermission(name, actions); - } else if (claz.equals(PropertyPermission.class)) { - return new PropertyPermission(name, actions); - } else if (claz.equals(NetPermission.class)) { - return new NetPermission(name, actions); - } else if (claz.equals(AllPermission.class)) { - return SecurityConstants.ALL_PERMISSION; - } else if (claz.equals(SecurityPermission.class)) { - return new SecurityPermission(name, actions); - } else { - return null; - } - } - - /** - * Creates one of the well-known principals in the java.base module - * directly instead of via reflection. Keep list short to not penalize - * principals from other modules. - */ - private static Principal getKnownPrincipal(Class claz, String name) { - if (claz.equals(X500Principal.class)) { - return new X500Principal(name); - } else { - return null; - } - } - - /** - * Fetch all certs associated with this alias. - */ - private Certificate[] getCertificates(KeyStore keyStore, String aliases, PolicyInfo newInfo) { - - List vcerts = null; - - StringTokenizer st = new StringTokenizer(aliases, ","); - int n = 0; - - while (st.hasMoreTokens()) { - String alias = st.nextToken().trim(); - n++; - Certificate cert = null; - // See if this alias's cert has already been cached - synchronized (newInfo.aliasMapping) { - cert = (Certificate) newInfo.aliasMapping.get(alias); - - if (cert == null && keyStore != null) { - - try { - cert = keyStore.getCertificate(alias); - } catch (KeyStoreException kse) { - // never happens, because keystore has already been loaded - // when we call this - } - if (cert != null) { - newInfo.aliasMapping.put(alias, cert); - newInfo.aliasMapping.put(cert, alias); - } - } - } - - if (cert != null) { - if (vcerts == null) vcerts = new ArrayList<>(); - vcerts.add(cert); - } - } - - // make sure n == vcerts.size, since we are doing a logical *and* - if (vcerts != null && n == vcerts.size()) { - Certificate[] certs = new Certificate[vcerts.size()]; - vcerts.toArray(certs); - return certs; - } else { - return null; - } - } - - /** - * Refreshes the policy object by re-reading all the policy files. - */ - @Override - public void refresh() { - init(url); - } - - /** - * Evaluates the global policy for the permissions granted to - * the ProtectionDomain and tests whether the permission is - * granted. - * - * @param pd the ProtectionDomain to test - * @param p the Permission object to be tested for implication. - * - * @return true if "permission" is a proper subset of a permission - * granted to this ProtectionDomain. - * - * @see java.security.ProtectionDomain - */ - @Override - public boolean implies(ProtectionDomain pd, Permission p) { - PermissionCollection pc = getPermissions(pd); - if (pc == null) { - return false; - } - - // cache mapping of protection domain to its PermissionCollection - return pc.implies(p); - } - - /** - * Examines this Policy and returns the permissions granted - * to the specified ProtectionDomain. This includes - * the permissions currently associated with the domain as well - * as the policy permissions granted to the domain's - * CodeSource, ClassLoader, and Principals. - * - *

Note that this Policy implementation has - * special handling for PrivateCredentialPermissions. - * When this method encounters a PrivateCredentialPermission - * which specifies "self" as the Principal class and name, - * it does not add that Permission to the returned - * PermissionCollection. Instead, it builds - * a new PrivateCredentialPermission - * for each Principal associated with the provided - * Subject. Each new PrivateCredentialPermission - * contains the same Credential class as specified in the - * originally granted permission, as well as the Class and name - * for the respective Principal. - * - * @param domain the Permissions granted to this - * ProtectionDomain are returned. - * - * @return the Permissions granted to the provided - * ProtectionDomain. - */ - @Override - public PermissionCollection getPermissions(ProtectionDomain domain) { - Permissions perms = new Permissions(); - - if (domain == null) return perms; - - // first get policy perms - getPermissions(perms, domain); - - // add static perms - // - adding static perms after policy perms is necessary - // to avoid a regression for 4301064 - PermissionCollection pc = domain.getPermissions(); - if (pc != null) { - synchronized (pc) { - Enumeration e = pc.elements(); - while (e.hasMoreElements()) { - perms.add(e.nextElement()); - } - } - } - - return perms; - } - - /** - * Examines this Policy and creates a PermissionCollection object with - * the set of permissions for the specified CodeSource. - * - * @param codesource the CodeSource associated with the caller. - * This encapsulates the original location of the code (where the code - * came from) and the public key(s) of its signer. - * - * @return the set of permissions according to the policy. - */ - @Override - public PermissionCollection getPermissions(CodeSource codesource) { - return getPermissions(new Permissions(), codesource); - } - - /** - * Examines the global policy and returns the provided Permissions - * object with additional permissions granted to the specified - * ProtectionDomain. - * - * @param perms the Permissions to populate - * @param pd the ProtectionDomain associated with the caller. - * - * @return the set of Permissions according to the policy. - */ - private PermissionCollection getPermissions(Permissions perms, ProtectionDomain pd) { - final CodeSource cs = pd.getCodeSource(); - if (cs == null) return perms; - - CodeSource canonCodeSource = canonicalizeCodebase(cs, true); - return getPermissions(perms, canonCodeSource, pd.getPrincipals()); - } - - /** - * Examines the global policy and returns the provided Permissions - * object with additional permissions granted to the specified - * CodeSource. - * - * @param perms the permissions to populate - * @param cs the codesource associated with the caller. - * This encapsulates the original location of the code (where the code - * came from) and the public key(s) of its signer. - * - * @return the set of permissions according to the policy. - */ - private PermissionCollection getPermissions(Permissions perms, final CodeSource cs) { - - if (cs == null) return perms; - - CodeSource canonCodeSource = canonicalizeCodebase(cs, true); - return getPermissions(perms, canonCodeSource, null); - } - - private Permissions getPermissions(Permissions perms, final CodeSource cs, Principal[] principals) { - for (PolicyEntry entry : policyInfo.policyEntries) { - addPermissions(perms, cs, principals, entry); - } - - return perms; - } - - private void addPermissions(Permissions perms, final CodeSource cs, Principal[] principals, final PolicyEntry entry) { - - // check to see if the CodeSource implies - Boolean imp = entry.getCodeSource().implies(cs); - if (!imp.booleanValue()) { - // CodeSource does not imply - return and try next policy entry - return; - } - - // check to see if the Principals imply - - List entryPs = entry.getPrincipals(); - - if (entryPs == null || entryPs.isEmpty()) { - - // policy entry has no principals - - // add perms regardless of principals in current ACC - - addPerms(perms, principals, entry); - return; - - } else if (principals == null || principals.length == 0) { - - // current thread has no principals but this policy entry - // has principals - perms are not added - - return; - } - - // current thread has principals and this policy entry - // has principals. see if policy entry principals match - // principals in current ACC - - for (PolicyParser.PrincipalEntry pppe : entryPs) { - - // Check for wildcards - if (pppe.isWildcardClass()) { - // a wildcard class matches all principals in current ACC - continue; - } - - if (pppe.isWildcardName()) { - // a wildcard name matches any principal with the same class - if (wildcardPrincipalNameImplies(pppe.principalClass, principals)) { - continue; - } - // policy entry principal not in current ACC - - // immediately return and go to next policy entry - return; - } - - Set pSet = new HashSet<>(Arrays.asList(principals)); - Subject subject = new Subject(true, pSet, Collections.EMPTY_SET, Collections.EMPTY_SET); - try { - ClassLoader cl = Thread.currentThread().getContextClassLoader(); - Class pClass = Class.forName(pppe.principalClass, false, cl); - Principal p = getKnownPrincipal(pClass, pppe.principalName); - if (p == null) { - if (!Principal.class.isAssignableFrom(pClass)) { - // not the right subtype - throw new ClassCastException(pppe.principalClass + " is not a Principal"); - } - - Constructor c = pClass.getConstructor(PARAMS1); - p = (Principal) c.newInstance(new Object[] { pppe.principalName }); - - } - - // check if the Principal implies the current - // thread's principals - if (!p.implies(subject)) { - // policy principal does not imply the current Subject - - // immediately return and go to next policy entry - return; - } - } catch (Exception e) { - // fall back to default principal comparison. - // see if policy entry principal is in current ACC - - if (!pppe.implies(subject)) { - // policy entry principal not in current ACC - - // immediately return and go to next policy entry - return; - } - } - - // either the principal information matched, - // or the Principal.implies succeeded. - // continue loop and test the next policy principal - } - - // all policy entry principals were found in the current ACC - - // grant the policy permissions - - addPerms(perms, principals, entry); - } - - /** - * Returns true if the array of principals contains at least one - * principal of the specified class. - */ - private static boolean wildcardPrincipalNameImplies(String principalClass, Principal[] principals) { - for (Principal p : principals) { - if (principalClass.equals(p.getClass().getName())) { - return true; - } - } - return false; - } - - private void addPerms(Permissions perms, Principal[] accPs, PolicyEntry entry) { - for (int i = 0; i < entry.permissions.size(); i++) { - Permission p = entry.permissions.get(i); - - if (p instanceof SelfPermission) { - // handle "SELF" permissions - expandSelf((SelfPermission) p, entry.getPrincipals(), accPs, perms); - } else { - perms.add(p); - } - } - } - - /** - * @param sp the SelfPermission that needs to be expanded. - * - * @param entryPs list of principals for the Policy entry. - * - * @param pdp Principal array from the current ProtectionDomain. - * - * @param perms the PermissionCollection where the individual - * Permissions will be added after expansion. - */ - - private void expandSelf(SelfPermission sp, List entryPs, Principal[] pdp, Permissions perms) { - - if (entryPs == null || entryPs.isEmpty()) { - return; - } - int startIndex = 0; - int v; - StringBuilder sb = new StringBuilder(); - while ((v = sp.getSelfName().indexOf(SELF, startIndex)) != -1) { - - // add non-SELF string - sb.append(sp.getSelfName().substring(startIndex, v)); - - // expand SELF - Iterator pli = entryPs.iterator(); - while (pli.hasNext()) { - PolicyParser.PrincipalEntry pppe = pli.next(); - String[][] principalInfo = getPrincipalInfo(pppe, pdp); - for (int i = 0; i < principalInfo.length; i++) { - if (i != 0) { - sb.append(", "); - } - sb.append(principalInfo[i][0] + " " + "\"" + principalInfo[i][1] + "\""); - } - if (pli.hasNext()) { - sb.append(", "); - } - } - startIndex = v + SELF.length(); - } - // add remaining string (might be the entire string) - sb.append(sp.getSelfName().substring(startIndex)); - - try { - // first try to instantiate the permission - perms.add(getInstance(sp.getSelfType(), sb.toString(), sp.getSelfActions())); - } catch (ClassNotFoundException cnfe) { - // ok, the permission is not in the bootclasspath. - // before we add an UnresolvedPermission, check to see - // whether this perm already belongs to the collection. - // if so, use that perm's ClassLoader to create a new - // one. - Class pc = null; - synchronized (perms) { - Enumeration e = perms.elements(); - while (e.hasMoreElements()) { - Permission pElement = e.nextElement(); - if (pElement.getClass().getName().equals(sp.getSelfType())) { - pc = pElement.getClass(); - break; - } - } - } - if (pc == null) { - // create an UnresolvedPermission - perms.add(new UnresolvedPermission(sp.getSelfType(), sb.toString(), sp.getSelfActions(), sp.getCerts())); - } else { - try { - // we found an instantiated permission. - // use its class loader to instantiate a new permission. - Constructor c; - // name parameter can not be null - if (sp.getSelfActions() == null) { - try { - c = pc.getConstructor(PARAMS1); - perms.add((Permission) c.newInstance(new Object[] { sb.toString() })); - } catch (NoSuchMethodException ne) { - c = pc.getConstructor(PARAMS2); - perms.add((Permission) c.newInstance(new Object[] { sb.toString(), sp.getSelfActions() })); - } - } else { - c = pc.getConstructor(PARAMS2); - perms.add((Permission) c.newInstance(new Object[] { sb.toString(), sp.getSelfActions() })); - } - } catch (Exception nme) {} - } - } catch (Exception e) {} - } - - /** - * return the principal class/name pair in the 2D array. - * array[x][y]: x corresponds to the array length. - * if (y == 0), it's the principal class. - * if (y == 1), it's the principal name. - */ - private String[][] getPrincipalInfo(PolicyParser.PrincipalEntry pe, Principal[] pdp) { - - // there are 3 possibilities: - // 1) the entry's Principal class and name are not wildcarded - // 2) the entry's Principal name is wildcarded only - // 3) the entry's Principal class and name are wildcarded - - if (!pe.isWildcardClass() && !pe.isWildcardName()) { - - // build an info array for the principal - // from the Policy entry - String[][] info = new String[1][2]; - info[0][0] = pe.principalClass; - info[0][1] = pe.principalName; - return info; - - } else if (!pe.isWildcardClass() && pe.isWildcardName()) { - - // build an info array for every principal - // in the current domain which has a principal class - // that is equal to policy entry principal class name - List plist = new ArrayList<>(); - for (int i = 0; i < pdp.length; i++) { - if (pe.principalClass.equals(pdp[i].getClass().getName())) plist.add(pdp[i]); - } - String[][] info = new String[plist.size()][2]; - int i = 0; - for (Principal p : plist) { - info[i][0] = p.getClass().getName(); - info[i][1] = p.getName(); - i++; - } - return info; - - } else { - - // build an info array for every - // one of the current Domain's principals - - String[][] info = new String[pdp.length][2]; - - for (int i = 0; i < pdp.length; i++) { - info[i][0] = pdp[i].getClass().getName(); - info[i][1] = pdp[i].getName(); - } - return info; - } - } - - /* - * Returns the signer certificates from the list of certificates - * associated with the given code source. - * - * The signer certificates are those certificates that were used - * to verify signed code originating from the codesource location. - * - * This method assumes that in the given code source, each signer - * certificate is followed by its supporting certificate chain - * (which may be empty), and that the signer certificate and its - * supporting certificate chain are ordered bottom-to-top - * (i.e., with the signer certificate first and the (root) certificate - * authority last). - */ - protected Certificate[] getSignerCertificates(CodeSource cs) { - Certificate[] certs = null; - if ((certs = cs.getCertificates()) == null) return null; - for (int i = 0; i < certs.length; i++) { - if (!(certs[i] instanceof X509Certificate)) return cs.getCertificates(); - } - - // Do we have to do anything? - int i = 0; - int count = 0; - while (i < certs.length) { - count++; - while (((i + 1) < certs.length) - && ((X509Certificate) certs[i]).getIssuerX500Principal() - .equals(((X509Certificate) certs[i + 1]).getSubjectX500Principal())) { - i++; - } - i++; - } - if (count == certs.length) - // Done - return certs; - - List userCertList = new ArrayList<>(); - i = 0; - while (i < certs.length) { - userCertList.add(certs[i]); - while (((i + 1) < certs.length) - && ((X509Certificate) certs[i]).getIssuerX500Principal() - .equals(((X509Certificate) certs[i + 1]).getSubjectX500Principal())) { - i++; - } - i++; - } - Certificate[] userCerts = new Certificate[userCertList.size()]; - userCertList.toArray(userCerts); - return userCerts; - } - - private CodeSource canonicalizeCodebase(CodeSource cs, boolean extractSignerCerts) { - - String path = null; - - CodeSource canonCs = cs; - URL u = cs.getLocation(); - if (u != null) { - if (u.getProtocol().equals("jar")) { - // unwrap url embedded inside jar url - String spec = u.getFile(); - int separator = spec.indexOf("!/"); - if (separator != -1) { - try { - u = newURL(spec.substring(0, separator)); - } catch (MalformedURLException e) { - // Fail silently. In this case, url stays what - // it was above - } - } - } - if (u.getProtocol().equals("file")) { - boolean isLocalFile = false; - String host = u.getHost(); - isLocalFile = (host == null || host.isEmpty() || host.equals("~") || host.equalsIgnoreCase("localhost")); - - if (isLocalFile) { - path = u.getFile().replace('/', File.separatorChar); - path = ParseUtil.decode(path); - } - } - } - - if (path != null) { - try { - URL csUrl = null; - path = canonPath(path); - csUrl = ParseUtil.fileToEncodedURL(new File(path)); - - if (extractSignerCerts) { - canonCs = new CodeSource(csUrl, getSignerCertificates(cs)); - } else { - canonCs = new CodeSource(csUrl, cs.getCertificates()); - } - } catch (IOException ioe) { - // leave codesource as it is, unless we have to extract its - // signer certificates - if (extractSignerCerts) { - canonCs = new CodeSource(cs.getLocation(), getSignerCertificates(cs)); - } - } - } else { - if (extractSignerCerts) { - canonCs = new CodeSource(cs.getLocation(), getSignerCertificates(cs)); - } - } - return canonCs; - } - - // Wrapper to return a canonical path that avoids calling getCanonicalPath() - // with paths that are intended to match all entries in the directory - private static String canonPath(String path) throws IOException { - if (path.endsWith("*")) { - path = path.substring(0, path.length() - 1) + "-"; - path = new File(path).getCanonicalPath(); - return path.substring(0, path.length() - 1) + "*"; - } else { - return new File(path).getCanonicalPath(); - } - } - - /** - * return true if no replacement was performed, - * or if replacement succeeded. - */ - private boolean replacePrincipals(List principals, KeyStore keystore) { - - if (principals == null || principals.isEmpty() || keystore == null) return true; - - for (PolicyParser.PrincipalEntry pppe : principals) { - if (pppe.isReplaceName()) { - - // perform replacement - // (only X509 replacement is possible now) - String name; - if ((name = getDN(pppe.principalName, keystore)) == null) { - return false; - } - - pppe.principalClass = X500PRINCIPAL; - pppe.principalName = name; - } - } - // return true if no replacement was performed, - // or if replacement succeeded - return true; - } - - private void expandPermissionName(PolicyParser.PermissionEntry pe, KeyStore keystore) throws Exception { - // short cut the common case - if (pe.name == null || pe.name.indexOf("${{", 0) == -1) { - return; - } - - int startIndex = 0; - int b, e; - StringBuilder sb = new StringBuilder(); - while ((b = pe.name.indexOf("${{", startIndex)) != -1) { - e = pe.name.indexOf("}}", b); - if (e < 1) { - break; - } - sb.append(pe.name.substring(startIndex, b)); - - // get the value in ${{...}} - String value = pe.name.substring(b + 3, e); - - // parse up to the first ':' - int colonIndex; - String prefix = value; - String suffix; - if ((colonIndex = value.indexOf(':')) != -1) { - prefix = value.substring(0, colonIndex); - } - - // handle different prefix possibilities - if (prefix.equalsIgnoreCase("self")) { - // do nothing - handled later - sb.append(pe.name.substring(b, e + 2)); - startIndex = e + 2; - continue; - } else if (prefix.equalsIgnoreCase("alias")) { - // get the suffix and perform keystore alias replacement - if (colonIndex == -1) { - throw new Exception("Alias name not provided pe.name: " + pe.name); - } - suffix = value.substring(colonIndex + 1); - if ((suffix = getDN(suffix, keystore)) == null) { - throw new Exception("Unable to perform substitution on alias suffix: " + value.substring(colonIndex + 1)); - } - - sb.append(X500PRINCIPAL + " \"" + suffix + "\""); - startIndex = e + 2; - } else { - throw new Exception("Substitution value prefix unsupported: " + prefix); - } - } - - // copy the rest of the value - sb.append(pe.name.substring(startIndex)); - - pe.name = sb.toString(); - } - - private String getDN(String alias, KeyStore keystore) { - Certificate cert = null; - try { - cert = keystore.getCertificate(alias); - } catch (Exception e) { - return null; - } - - if (!(cert instanceof X509Certificate x509Cert)) { - return null; - } else { - // 4702543: X500 names with an EmailAddress - // were encoded incorrectly. create new - // X500Principal name with correct encoding - - X500Principal p = new X500Principal(x509Cert.getSubjectX500Principal().toString()); - return p.getName(); - } - } - - /** - * Each entry in the policy configuration file is represented by a - * PolicyEntry object.

- * - * A PolicyEntry is a (CodeSource,Permission) pair. The - * CodeSource contains the (URL, PublicKey) that together identify - * where the Java bytecodes come from and who (if anyone) signed - * them. The URL could refer to localhost. The URL could also be - * null, meaning that this policy entry is given to all comers, as - * long as they match the signer field. The signer could be null, - * meaning the code is not signed.

- * - * The Permission contains the (Type, Name, Action) triplet.

- * - * For now, the Policy object retrieves the public key from the - * X.509 certificate on disk that corresponds to the signedBy - * alias specified in the Policy config file. For reasons of - * efficiency, the Policy object keeps a hashtable of certs already - * read in. This could be replaced by a secure internal key - * store. - * - *

- * For example, the entry - *

-     *          permission java.io.File "/tmp", "read,write",
-     *          signedBy "Duke";
-     * 
- * is represented internally - *
-     *
-     * FilePermission f = new FilePermission("/tmp", "read,write");
-     * PublicKey p = publickeys.get("Duke");
-     * URL u = InetAddress.getLocalHost();
-     * CodeBase c = new CodeBase( p, u );
-     * pe = new PolicyEntry(f, c);
-     * 
- * - * @author Marianne Mueller - * @author Roland Schemers - * @see java.security.CodeSource - * @see java.security.Policy - * @see java.security.Permissions - * @see java.security.ProtectionDomain - */ - private static class PolicyEntry { - - private final CodeSource codesource; - final List permissions; - private final List principals; - - /** - * Given a Permission and a CodeSource, create a policy entry. - * - * XXX Decide if/how to add validity fields and "purpose" fields to - * XXX policy entries - * - * @param cs the CodeSource, which encapsulates the URL and the - * public key - * attributes from the policy config file. Validity checks - * are performed on the public key before PolicyEntry is - * called. - * - */ - PolicyEntry(CodeSource cs, List principals) { - this.codesource = cs; - this.permissions = new ArrayList(); - this.principals = principals; // can be null - } - - PolicyEntry(CodeSource cs) { - this(cs, null); - } - - List getPrincipals() { - return principals; // can be null - } - - /** - * add a Permission object to this entry. - * No need to sync add op because perms are added to entry only - * while entry is being initialized - */ - void add(Permission p) { - permissions.add(p); - } - - /** - * Return the CodeSource for this policy entry - */ - CodeSource getCodeSource() { - return codesource; - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("{"); - sb.append(getCodeSource()); - sb.append("\n"); - for (int j = 0; j < permissions.size(); j++) { - Permission p = permissions.get(j); - sb.append(" "); - sb.append(" "); - sb.append(p); - sb.append("\n"); - } - sb.append("}"); - sb.append("\n"); - return sb.toString(); - } - } - - private static class SelfPermission extends Permission { - - @java.io.Serial - private static final long serialVersionUID = -8315562579967246806L; - - /** - * The class name of the Permission class that will be - * created when this self permission is expanded . - * - * @serial - */ - private String type; - - /** - * The permission name. - * - * @serial - */ - private String name; - - /** - * The actions of the permission. - * - * @serial - */ - private String actions; - - /** - * The certs of the permission. - * - * @serial - */ - private Certificate[] certs; - - /** - * Creates a new SelfPermission containing the permission - * information needed later to expand the self - * @param type the class name of the Permission class that will be - * created when this permission is expanded and if necessary resolved. - * @param name the name of the permission. - * @param actions the actions of the permission. - * @param certs the certificates the permission's class was signed with. - * This is a list of certificate chains, where each chain is composed of - * a signer certificate and optionally its supporting certificate chain. - * Each chain is ordered bottom-to-top (i.e., with the signer - * certificate first and the (root) certificate authority last). - */ - public SelfPermission(String type, String name, String actions, Certificate[] certs) { - super(type); - if (type == null) { - throw new NullPointerException("Ttype cannot be null"); - } - this.type = type; - this.name = name; - this.actions = actions; - if (certs != null) { - // Extract the signer certs from the list of certificates. - for (int i = 0; i < certs.length; i++) { - if (!(certs[i] instanceof X509Certificate)) { - // there is no concept of signer certs, so we store the - // entire cert array - this.certs = certs.clone(); - break; - } - } - - if (this.certs == null) { - // Go through the list of certs and see if all the certs are - // signer certs. - int i = 0; - int count = 0; - while (i < certs.length) { - count++; - while (((i + 1) < certs.length) - && ((X509Certificate) certs[i]).getIssuerX500Principal() - .equals(((X509Certificate) certs[i + 1]).getSubjectX500Principal())) { - i++; - } - i++; - } - if (count == certs.length) { - // All the certs are signer certs, so we store the - // entire array - this.certs = certs.clone(); - } - - if (this.certs == null) { - // extract the signer certs - List signerCerts = new ArrayList<>(); - i = 0; - while (i < certs.length) { - signerCerts.add(certs[i]); - while (((i + 1) < certs.length) - && ((X509Certificate) certs[i]).getIssuerX500Principal() - .equals(((X509Certificate) certs[i + 1]).getSubjectX500Principal())) { - i++; - } - i++; - } - this.certs = new Certificate[signerCerts.size()]; - signerCerts.toArray(this.certs); - } - } - } - } - - /** - * This method always returns false for SelfPermission permissions. - * That is, an SelfPermission never considered to - * imply another permission. - * - * @param p the permission to check against. - * - * @return false. - */ - @Override - public boolean implies(Permission p) { - return false; - } - - /** - * Checks two SelfPermission objects for equality. - * - * Checks that obj is an SelfPermission, and has - * the same type (class) name, permission name, actions, and - * certificates as this object. - * - * @param obj the object we are testing for equality with this object. - * - * @return true if obj is an SelfPermission, and has the same - * type (class) name, permission name, actions, and - * certificates as this object. - */ - @Override - public boolean equals(Object obj) { - if (obj == this) return true; - - if (!(obj instanceof SelfPermission)) return false; - SelfPermission that = (SelfPermission) obj; - - if (!(this.type.equals(that.type) && this.name.equals(that.name) && this.actions.equals(that.actions))) return false; - - if ((this.certs == null) && (that.certs == null)) { - return true; - } - - if ((this.certs == null) || (that.certs == null)) { - return false; - } - - if (this.certs.length != that.certs.length) { - return false; - } - - int i, j; - boolean match; - - for (i = 0; i < this.certs.length; i++) { - match = false; - for (j = 0; j < that.certs.length; j++) { - if (this.certs[i].equals(that.certs[j])) { - match = true; - break; - } - } - if (!match) return false; - } - - for (i = 0; i < that.certs.length; i++) { - match = false; - for (j = 0; j < this.certs.length; j++) { - if (that.certs[i].equals(this.certs[j])) { - match = true; - break; - } - } - if (!match) return false; - } - return true; - } - - /** - * Returns the hash code value for this object. - * - * @return a hash code value for this object. - */ - @Override - public int hashCode() { - int hash = type.hashCode(); - if (name != null) hash ^= name.hashCode(); - if (actions != null) hash ^= actions.hashCode(); - return hash; - } - - /** - * Returns the canonical string representation of the actions, - * which currently is the empty string "", since there are no actions - * for an SelfPermission. That is, the actions for the - * permission that will be created when this SelfPermission - * is resolved may be non-null, but an SelfPermission - * itself is never considered to have any actions. - * - * @return the empty string "". - */ - @Override - public String getActions() { - return ""; - } - - public String getSelfType() { - return type; - } - - public String getSelfName() { - return name; - } - - public String getSelfActions() { - return actions; - } - - public Certificate[] getCerts() { - return (certs == null ? null : certs.clone()); - } - - /** - * Returns a string describing this SelfPermission. The convention - * is to specify the class name, the permission name, and the actions, - * in the following format: '(unresolved "ClassName" "name" "actions")'. - * - * @return information about this SelfPermission. - */ - @Override - public String toString() { - return "(SelfPermission " + type + " " + name + " " + actions + ")"; - } - - /** - * Restores the state of this object from the stream. - * - * @param stream the {@code ObjectInputStream} from which data is read - * @throws IOException if an I/O error occurs - * @throws ClassNotFoundException if a serialized class cannot be loaded - */ - @java.io.Serial - private void readObject(ObjectInputStream stream) throws IOException, ClassNotFoundException { - stream.defaultReadObject(); - if (certs != null) { - this.certs = certs.clone(); - } - } - } - - /** - * holds policy information that we need to synch on - */ - private static class PolicyInfo { - // Stores grant entries in the policy - final List policyEntries; - - // Maps aliases to certs - final Map aliasMapping; - - PolicyInfo(int numCaches) { - policyEntries = new ArrayList<>(); - aliasMapping = Collections.synchronizedMap(new HashMap<>(11)); - } - } - - @SuppressWarnings("deprecation") - private static URL newURL(String spec) throws MalformedURLException { - return new URL(spec); - } -} diff --git a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyParser.java b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyParser.java deleted file mode 100644 index 9d5b0d5a13722..0000000000000 --- a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyParser.java +++ /dev/null @@ -1,1163 +0,0 @@ -/* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. Oracle designates this - * particular file as subject to the "Classpath" exception as provided - * by Oracle in the LICENSE file that accompanied this code. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.secure_sm.policy; - -import javax.security.auth.x500.X500Principal; - -import java.io.BufferedReader; -import java.io.BufferedWriter; -import java.io.File; -import java.io.IOException; -import java.io.PrintWriter; -import java.io.Reader; -import java.io.StreamTokenizer; -import java.io.Writer; -import java.security.GeneralSecurityException; -import java.security.Principal; -import java.util.Collection; -import java.util.Enumeration; -import java.util.HashMap; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.Locale; -import java.util.Map; -import java.util.StringTokenizer; -import java.util.TreeMap; -import java.util.Vector; - -/** - * Adapted from: https://github.com/openjdk/jdk23u/blob/master/src/java.base/share/classes/sun/security/provider/PolicyParser.java - */ -public class PolicyParser { - - private final Vector grantEntries; - private Map domainEntries; - - private StreamTokenizer st; - private int lookahead; - private boolean expandProp = false; - private String keyStoreUrlString = null; // unexpanded - private String keyStoreType = null; - private String keyStoreProvider = null; - private String storePassURL = null; - - private String expand(String value) throws PropertyExpander.ExpandException { - return expand(value, false); - } - - private String expand(String value, boolean encodeURL) throws PropertyExpander.ExpandException { - if (!expandProp) { - return value; - } else { - return PropertyExpander.expand(value, encodeURL); - } - } - - /** - * Creates a PolicyParser object. - */ - - public PolicyParser() { - grantEntries = new Vector<>(); - } - - public PolicyParser(boolean expandProp) { - this(); - this.expandProp = expandProp; - } - - /** - * Reads a policy configuration into the Policy object using a - * Reader object. - * - * @param policy the policy Reader object. - * - * @exception ParsingException if the policy configuration contains - * a syntax error. - * - * @exception IOException if an error occurs while reading the policy - * configuration. - */ - - public void read(Reader policy) throws ParsingException, IOException { - if (!(policy instanceof BufferedReader)) { - policy = new BufferedReader(policy); - } - - /* - * Configure the stream tokenizer: - * Recognize strings between "..." - * Don't convert words to lowercase - * Recognize both C-style and C++-style comments - * Treat end-of-line as white space, not as a token - */ - st = new StreamTokenizer(policy); - - st.resetSyntax(); - st.wordChars('a', 'z'); - st.wordChars('A', 'Z'); - st.wordChars('.', '.'); - st.wordChars('0', '9'); - st.wordChars('_', '_'); - st.wordChars('$', '$'); - st.wordChars(128 + 32, 255); - st.whitespaceChars(0, ' '); - st.commentChar('/'); - st.quoteChar('\''); - st.quoteChar('"'); - st.lowerCaseMode(false); - st.ordinaryChar('/'); - st.slashSlashComments(true); - st.slashStarComments(true); - - /* - * The main parsing loop. The loop is executed once - * for each entry in the config file. The entries - * are delimited by semicolons. Once we've read in - * the information for an entry, go ahead and try to - * add it to the policy vector. - * - */ - - lookahead = st.nextToken(); - GrantEntry ge = null; - while (lookahead != StreamTokenizer.TT_EOF) { - if (peek("grant")) { - ge = parseGrantEntry(); - // could be null if we couldn't expand a property - if (ge != null) add(ge); - } else if (peek("keystore") && keyStoreUrlString == null) { - // only one keystore entry per policy file, others will be - // ignored - parseKeyStoreEntry(); - } else if (peek("keystorePasswordURL") && storePassURL == null) { - // only one keystore passwordURL per policy file, others will be - // ignored - parseStorePassURL(); - } else if (ge == null && keyStoreUrlString == null && storePassURL == null && peek("domain")) { - if (domainEntries == null) { - domainEntries = new TreeMap<>(); - } - DomainEntry de = parseDomainEntry(); - String domainName = de.getName(); - if (domainEntries.putIfAbsent(domainName, de) != null) { - Object[] source = { domainName }; - String msg = "duplicate keystore domain name: " + domainName; - throw new ParsingException(msg, source); - } - } else { - // error? - } - match(";"); - } - - if (keyStoreUrlString == null && storePassURL != null) { - throw new ParsingException("Keystore Password URL cannot be specified without also specifying keystore"); - } - } - - public void add(GrantEntry ge) { - grantEntries.addElement(ge); - } - - public void replace(GrantEntry origGe, GrantEntry newGe) { - grantEntries.setElementAt(newGe, grantEntries.indexOf(origGe)); - } - - public boolean remove(GrantEntry ge) { - return grantEntries.removeElement(ge); - } - - /** - * Returns the (possibly expanded) keystore location, or null if the - * expansion fails. - */ - public String getKeyStoreUrl() { - try { - if (keyStoreUrlString != null && keyStoreUrlString.length() != 0) { - return expand(keyStoreUrlString, true).replace(File.separatorChar, '/'); - } - } catch (PropertyExpander.ExpandException peee) { - return null; - } - return null; - } - - public void setKeyStoreUrl(String url) { - keyStoreUrlString = url; - } - - public String getKeyStoreType() { - return keyStoreType; - } - - public void setKeyStoreType(String type) { - keyStoreType = type; - } - - public String getKeyStoreProvider() { - return keyStoreProvider; - } - - public void setKeyStoreProvider(String provider) { - keyStoreProvider = provider; - } - - public String getStorePassURL() { - try { - if (storePassURL != null && storePassURL.length() != 0) { - return expand(storePassURL, true).replace(File.separatorChar, '/'); - } - } catch (PropertyExpander.ExpandException peee) { - return null; - } - return null; - } - - public void setStorePassURL(String storePassURL) { - this.storePassURL = storePassURL; - } - - /** - * Enumerate all the entries in the global policy object. - * This method is used by policy admin tools. The tools - * should use the Enumeration methods on the returned object - * to fetch the elements sequentially. - */ - public Enumeration grantElements() { - return grantEntries.elements(); - } - - public Collection getDomainEntries() { - return domainEntries.values(); - } - - /** - * write out the policy - */ - - public void write(Writer policy) { - PrintWriter out = new PrintWriter(new BufferedWriter(policy)); - - out.println("/* AUTOMATICALLY GENERATED ON " + (new java.util.Date()) + "*/"); - out.println("/* DO NOT EDIT */"); - out.println(); - - // write the (unexpanded) keystore entry as the first entry of the - // policy file - if (keyStoreUrlString != null) { - writeKeyStoreEntry(out); - } - if (storePassURL != null) { - writeStorePassURL(out); - } - - // write "grant" entries - for (GrantEntry ge : grantEntries) { - ge.write(out); - out.println(); - } - out.flush(); - } - - /** - * parses a keystore entry - */ - private void parseKeyStoreEntry() throws ParsingException, IOException { - match("keystore"); - keyStoreUrlString = match("quoted string"); - - // parse keystore type - if (!peek(",")) { - return; // default type - } - match(","); - - if (peek("\"")) { - keyStoreType = match("quoted string"); - } else { - throw new ParsingException(st.lineno(), "Expected keystore type"); - } - - // parse keystore provider - if (!peek(",")) { - return; // provider optional - } - match(","); - - if (peek("\"")) { - keyStoreProvider = match("quoted string"); - } else { - throw new ParsingException(st.lineno(), "Keystore provider expected"); - } - } - - private void parseStorePassURL() throws ParsingException, IOException { - match("keyStorePasswordURL"); - storePassURL = match("quoted string"); - } - - /** - * writes the (unexpanded) keystore entry - */ - private void writeKeyStoreEntry(PrintWriter out) { - out.print("keystore \""); - out.print(keyStoreUrlString); - out.print('"'); - if (keyStoreType != null && !keyStoreType.isEmpty()) out.print(", \"" + keyStoreType + "\""); - if (keyStoreProvider != null && !keyStoreProvider.isEmpty()) out.print(", \"" + keyStoreProvider + "\""); - out.println(";"); - out.println(); - } - - private void writeStorePassURL(PrintWriter out) { - out.print("keystorePasswordURL \""); - out.print(storePassURL); - out.print('"'); - out.println(";"); - out.println(); - } - - /** - * parse a Grant entry - */ - private GrantEntry parseGrantEntry() throws ParsingException, IOException { - GrantEntry e = new GrantEntry(); - LinkedList principals = null; - boolean ignoreEntry = false; - - match("grant"); - - while (!peek("{")) { - - if (peekAndMatch("Codebase")) { - if (e.codeBase != null) throw new ParsingException(st.lineno(), "Multiple Codebase expressions"); - e.codeBase = match("quoted string"); - peekAndMatch(","); - } else if (peekAndMatch("SignedBy")) { - if (e.signedBy != null) throw new ParsingException(st.lineno(), "Multiple SignedBy expressions"); - e.signedBy = match("quoted string"); - - // verify syntax of the aliases - StringTokenizer aliases = new StringTokenizer(e.signedBy, ",", true); - int actr = 0; - int cctr = 0; - while (aliases.hasMoreTokens()) { - String alias = aliases.nextToken().trim(); - if (alias.equals(",")) cctr++; - else if (!alias.isEmpty()) actr++; - } - if (actr <= cctr) throw new ParsingException(st.lineno(), "SignedBy has an empty alias"); - - peekAndMatch(","); - } else if (peekAndMatch("Principal")) { - if (principals == null) { - principals = new LinkedList<>(); - } - - String principalClass; - String principalName; - - if (peek("\"")) { - // both the principalClass and principalName - // will be replaced later - principalClass = PrincipalEntry.REPLACE_NAME; - principalName = match("principal type"); - } else { - // check for principalClass wildcard - if (peek("*")) { - match("*"); - principalClass = PrincipalEntry.WILDCARD_CLASS; - } else { - principalClass = match("principal type"); - } - - // check for principalName wildcard - if (peek("*")) { - match("*"); - principalName = PrincipalEntry.WILDCARD_NAME; - } else { - principalName = match("quoted string"); - } - - // disallow WILDCARD_CLASS && actual name - if (principalClass.equals(PrincipalEntry.WILDCARD_CLASS) && !principalName.equals(PrincipalEntry.WILDCARD_NAME)) { - throw new ParsingException(st.lineno(), "Cannot specify Principal with a wildcard class without a wildcard name"); - } - } - - try { - principalName = expand(principalName); - - if (principalClass.equals("javax.security.auth.x500.X500Principal") - && !principalName.equals(PrincipalEntry.WILDCARD_NAME)) { - - // 4702543: X500 names with an EmailAddress - // were encoded incorrectly. construct a new - // X500Principal with correct encoding. - - X500Principal p = new X500Principal((new X500Principal(principalName)).toString()); - principalName = p.getName(); - } - - principals.add(new PrincipalEntry(principalClass, principalName)); - } catch (PropertyExpander.ExpandException peee) { - ignoreEntry = true; - } - peekAndMatch(","); - - } else { - throw new ParsingException(st.lineno(), "Expected codeBase or SignedBy or Principal"); - } - } - - if (principals != null) e.principals = principals; - match("{"); - - while (!peek("}")) { - if (peek("Permission")) { - try { - PermissionEntry pe = parsePermissionEntry(); - e.add(pe); - } catch (PropertyExpander.ExpandException peee) { - skipEntry(); // BugId 4219343 - } - match(";"); - } else { - throw new ParsingException(st.lineno(), "Expected permission entry"); - } - } - match("}"); - - try { - if (e.signedBy != null) e.signedBy = expand(e.signedBy); - if (e.codeBase != null) { - e.codeBase = expand(e.codeBase, true).replace(File.separatorChar, '/'); - } - } catch (PropertyExpander.ExpandException peee) { - return null; - } - - return (ignoreEntry) ? null : e; - } - - /** - * parse a Permission entry - */ - private PermissionEntry parsePermissionEntry() throws ParsingException, IOException, PropertyExpander.ExpandException { - PermissionEntry e = new PermissionEntry(); - - // Permission - match("Permission"); - e.permission = match("permission type"); - - if (peek("\"")) { - // Permission name - e.name = expand(match("quoted string")); - } - - if (!peek(",")) { - return e; - } - match(","); - - if (peek("\"")) { - e.action = expand(match("quoted string")); - if (!peek(",")) { - return e; - } - match(","); - } - - if (peekAndMatch("SignedBy")) { - e.signedBy = expand(match("quoted string")); - } - return e; - } - - /** - * parse a domain entry - */ - private DomainEntry parseDomainEntry() throws ParsingException, IOException { - DomainEntry domainEntry; - String name; - Map properties = new HashMap<>(); - - match("domain"); - name = match("domain name"); - - while (!peek("{")) { - // get the domain properties - properties = parseProperties("{"); - } - match("{"); - domainEntry = new DomainEntry(name, properties); - - while (!peek("}")) { - - match("keystore"); - name = match("keystore name"); - // get the keystore properties - if (!peek("}")) { - properties = parseProperties(";"); - } - match(";"); - domainEntry.add(new KeyStoreEntry(name, properties)); - } - match("}"); - - return domainEntry; - } - - /* - * Return a collection of domain properties or keystore properties. - */ - private Map parseProperties(String terminator) throws ParsingException, IOException { - - Map properties = new HashMap<>(); - String key; - String value; - while (!peek(terminator)) { - key = match("property name"); - match("="); - - try { - value = expand(match("quoted string")); - } catch (PropertyExpander.ExpandException peee) { - throw new IOException(peee.getLocalizedMessage()); - } - properties.put(key.toLowerCase(Locale.ENGLISH), value); - } - - return properties; - } - - private boolean peekAndMatch(String expect) throws ParsingException, IOException { - if (peek(expect)) { - match(expect); - return true; - } else { - return false; - } - } - - private boolean peek(String expect) { - boolean found = false; - - switch (lookahead) { - - case StreamTokenizer.TT_WORD: - if (expect.equalsIgnoreCase(st.sval)) found = true; - break; - case ',': - if (expect.equalsIgnoreCase(",")) found = true; - break; - case '{': - if (expect.equalsIgnoreCase("{")) found = true; - break; - case '}': - if (expect.equalsIgnoreCase("}")) found = true; - break; - case '"': - if (expect.equalsIgnoreCase("\"")) found = true; - break; - case '*': - if (expect.equalsIgnoreCase("*")) found = true; - break; - case ';': - if (expect.equalsIgnoreCase(";")) found = true; - break; - default: - - } - return found; - } - - private String match(String expect) throws ParsingException, IOException { - String value = null; - - switch (lookahead) { - case StreamTokenizer.TT_NUMBER: - throw new ParsingException(st.lineno(), expect); - case StreamTokenizer.TT_EOF: - Object[] source = { expect }; - String msg = "expected [" + expect + "], read [end of file]"; - throw new ParsingException(msg, source); - case StreamTokenizer.TT_WORD: - if (expect.equalsIgnoreCase(st.sval)) { - lookahead = st.nextToken(); - } else if (expect.equalsIgnoreCase("permission type")) { - value = st.sval; - lookahead = st.nextToken(); - } else if (expect.equalsIgnoreCase("principal type")) { - value = st.sval; - lookahead = st.nextToken(); - } else if (expect.equalsIgnoreCase("domain name") - || expect.equalsIgnoreCase("keystore name") - || expect.equalsIgnoreCase("property name")) { - value = st.sval; - lookahead = st.nextToken(); - } else { - throw new ParsingException(st.lineno(), expect, st.sval); - } - break; - case '"': - if (expect.equalsIgnoreCase("quoted string")) { - value = st.sval; - lookahead = st.nextToken(); - } else if (expect.equalsIgnoreCase("permission type")) { - value = st.sval; - lookahead = st.nextToken(); - } else if (expect.equalsIgnoreCase("principal type")) { - value = st.sval; - lookahead = st.nextToken(); - } else { - throw new ParsingException(st.lineno(), expect, st.sval); - } - break; - case ',': - if (expect.equalsIgnoreCase(",")) lookahead = st.nextToken(); - else throw new ParsingException(st.lineno(), expect, ","); - break; - case '{': - if (expect.equalsIgnoreCase("{")) lookahead = st.nextToken(); - else throw new ParsingException(st.lineno(), expect, "{"); - break; - case '}': - if (expect.equalsIgnoreCase("}")) lookahead = st.nextToken(); - else throw new ParsingException(st.lineno(), expect, "}"); - break; - case ';': - if (expect.equalsIgnoreCase(";")) lookahead = st.nextToken(); - else throw new ParsingException(st.lineno(), expect, ";"); - break; - case '*': - if (expect.equalsIgnoreCase("*")) lookahead = st.nextToken(); - else throw new ParsingException(st.lineno(), expect, "*"); - break; - case '=': - if (expect.equalsIgnoreCase("=")) lookahead = st.nextToken(); - else throw new ParsingException(st.lineno(), expect, "="); - break; - default: - throw new ParsingException(st.lineno(), expect, String.valueOf((char) lookahead)); - } - return value; - } - - /** - * skip all tokens for this entry leaving the delimiter ";" - * in the stream. - */ - private void skipEntry() throws ParsingException, IOException { - while (lookahead != ';') { - switch (lookahead) { - case StreamTokenizer.TT_NUMBER: - throw new ParsingException(st.lineno(), ";"); - case StreamTokenizer.TT_EOF: - throw new ParsingException("Expected read end of file"); - default: - lookahead = st.nextToken(); - } - } - } - - /** - * Each grant entry in the policy configuration file is - * represented by a GrantEntry object. - * - *

- * For example, the entry - *

-     *      grant signedBy "Duke" {
-     *          permission java.io.FilePermission "/tmp", "read,write";
-     *      };
-     *
-     * 
- * is represented internally - *
-     *
-     * pe = new PermissionEntry("java.io.FilePermission",
-     *                           "/tmp", "read,write");
-     *
-     * ge = new GrantEntry("Duke", null);
-     *
-     * ge.add(pe);
-     *
-     * 
- * - * @author Roland Schemers - * - * version 1.19, 05/21/98 - */ - - public static class GrantEntry { - - public String signedBy; - public String codeBase; - public LinkedList principals; - public Vector permissionEntries; - - public GrantEntry() { - principals = new LinkedList<>(); - permissionEntries = new Vector<>(); - } - - public GrantEntry(String signedBy, String codeBase) { - this.codeBase = codeBase; - this.signedBy = signedBy; - principals = new LinkedList<>(); - permissionEntries = new Vector<>(); - } - - public void add(PermissionEntry pe) { - permissionEntries.addElement(pe); - } - - public boolean remove(PrincipalEntry pe) { - return principals.remove(pe); - } - - public boolean remove(PermissionEntry pe) { - return permissionEntries.removeElement(pe); - } - - public boolean contains(PrincipalEntry pe) { - return principals.contains(pe); - } - - public boolean contains(PermissionEntry pe) { - return permissionEntries.contains(pe); - } - - /** - * Enumerate all the permission entries in this GrantEntry. - */ - public Enumeration permissionElements() { - return permissionEntries.elements(); - } - - public void write(PrintWriter out) { - out.print("grant"); - if (signedBy != null) { - out.print(" signedBy \""); - out.print(signedBy); - out.print('"'); - if (codeBase != null) out.print(", "); - } - if (codeBase != null) { - out.print(" codeBase \""); - out.print(codeBase); - out.print('"'); - if (principals != null && principals.size() > 0) out.print(",\n"); - } - if (principals != null && principals.size() > 0) { - Iterator pli = principals.iterator(); - while (pli.hasNext()) { - out.print(" "); - PrincipalEntry pe = pli.next(); - pe.write(out); - if (pli.hasNext()) out.print(",\n"); - } - } - out.println(" {"); - for (PermissionEntry pe : permissionEntries) { - out.write(" "); - pe.write(out); - } - out.println("};"); - } - - public Object clone() { - GrantEntry ge = new GrantEntry(); - ge.codeBase = this.codeBase; - ge.signedBy = this.signedBy; - ge.principals = new LinkedList<>(this.principals); - ge.permissionEntries = new Vector<>(this.permissionEntries); - return ge; - } - } - - /** - * Principal info (class and name) in a grant entry - */ - public static class PrincipalEntry implements Principal { - - public static final String WILDCARD_CLASS = "WILDCARD_PRINCIPAL_CLASS"; - public static final String WILDCARD_NAME = "WILDCARD_PRINCIPAL_NAME"; - public static final String REPLACE_NAME = "PolicyParser.REPLACE_NAME"; - - String principalClass; - String principalName; - - /** - * A PrincipalEntry consists of the Principal class and Principal name. - * - * @param principalClass the Principal class - * @param principalName the Principal name - * @throws NullPointerException if principalClass or principalName - * are null - */ - public PrincipalEntry(String principalClass, String principalName) { - if (principalClass == null || principalName == null) throw new NullPointerException("principalClass or principalName is null"); - this.principalClass = principalClass; - this.principalName = principalName; - } - - boolean isWildcardName() { - return principalName.equals(WILDCARD_NAME); - } - - boolean isWildcardClass() { - return principalClass.equals(WILDCARD_CLASS); - } - - boolean isReplaceName() { - return principalClass.equals(REPLACE_NAME); - } - - public String getPrincipalClass() { - return principalClass; - } - - public String getPrincipalName() { - return principalName; - } - - public String getDisplayClass() { - if (isWildcardClass()) { - return "*"; - } else if (isReplaceName()) { - return ""; - } else return principalClass; - } - - public String getDisplayName() { - return getDisplayName(false); - } - - public String getDisplayName(boolean addQuote) { - if (isWildcardName()) { - return "*"; - } else { - if (addQuote) return "\"" + principalName + "\""; - else return principalName; - } - } - - @Override - public String getName() { - return principalName; - } - - @Override - public String toString() { - if (!isReplaceName()) { - return getDisplayClass() + "/" + getDisplayName(); - } else { - return getDisplayName(); - } - } - - /** - * Test for equality between the specified object and this object. - * Two PrincipalEntries are equal if their class and name values - * are equal. - * - * @param obj the object to test for equality with this object - * @return true if the objects are equal, false otherwise - */ - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - - if (!(obj instanceof PrincipalEntry that)) return false; - - return (principalClass.equals(that.principalClass) && principalName.equals(that.principalName)); - } - - /** - * Return a hashcode for this PrincipalEntry. - * - * @return a hashcode for this PrincipalEntry - */ - @Override - public int hashCode() { - return principalClass.hashCode(); - } - - public void write(PrintWriter out) { - out.print("principal " + getDisplayClass() + " " + getDisplayName(true)); - } - } - - /** - * Each permission entry in the policy configuration file is - * represented by a - * PermissionEntry object. - * - *

- * For example, the entry - *

-     *          permission java.io.FilePermission "/tmp", "read,write";
-     * 
- * is represented internally - *
-     *
-     * pe = new PermissionEntry("java.io.FilePermission",
-     *                           "/tmp", "read,write");
-     * 
- * - * @author Roland Schemers - * - * version 1.19, 05/21/98 - */ - - public static class PermissionEntry { - - public String permission; - public String name; - public String action; - public String signedBy; - - public PermissionEntry() {} - - public PermissionEntry(String permission, String name, String action) { - this.permission = permission; - this.name = name; - this.action = action; - } - - /** - * Calculates a hash code value for the object. Objects - * which are equal will also have the same hashcode. - */ - @Override - public int hashCode() { - int retval = permission.hashCode(); - if (name != null) retval ^= name.hashCode(); - if (action != null) retval ^= action.hashCode(); - return retval; - } - - @Override - public boolean equals(Object obj) { - if (obj == this) return true; - - if (!(obj instanceof PermissionEntry that)) return false; - - if (this.permission == null) { - if (that.permission != null) return false; - } else { - if (!this.permission.equals(that.permission)) return false; - } - - if (this.name == null) { - if (that.name != null) return false; - } else { - if (!this.name.equals(that.name)) return false; - } - - if (this.action == null) { - if (that.action != null) return false; - } else { - if (!this.action.equals(that.action)) return false; - } - - if (this.signedBy == null) { - return that.signedBy == null; - } else { - return this.signedBy.equals(that.signedBy); - } - } - - public void write(PrintWriter out) { - out.print("permission "); - out.print(permission); - if (name != null) { - out.print(" \""); - - // ATTENTION: regex with double escaping, - // the normal forms look like: - // $name =~ s/\\/\\\\/g; and - // $name =~ s/\"/\\\"/g; - // and then in a java string, it's escaped again - - out.print(name.replaceAll("\\\\", "\\\\\\\\").replaceAll("\"", "\\\\\\\"")); - out.print('"'); - } - if (action != null) { - out.print(", \""); - out.print(action); - out.print('"'); - } - if (signedBy != null) { - out.print(", signedBy \""); - out.print(signedBy); - out.print('"'); - } - out.println(";"); - } - } - - /** - * Each domain entry in the keystore domain configuration file is - * represented by a DomainEntry object. - */ - static class DomainEntry { - private final String name; - private final Map properties; - private final Map entries; - - DomainEntry(String name, Map properties) { - this.name = name; - this.properties = properties; - entries = new HashMap<>(); - } - - String getName() { - return name; - } - - Map getProperties() { - return properties; - } - - Collection getEntries() { - return entries.values(); - } - - void add(KeyStoreEntry entry) throws ParsingException { - String keystoreName = entry.getName(); - if (!entries.containsKey(keystoreName)) { - entries.put(keystoreName, entry); - } else { - Object[] source = { keystoreName }; - String msg = "duplicate keystore name: " + keystoreName; - throw new ParsingException(msg, source); - } - } - - @Override - public String toString() { - StringBuilder s = new StringBuilder("\ndomain ").append(name); - - if (properties != null) { - for (Map.Entry property : properties.entrySet()) { - s.append("\n ").append(property.getKey()).append('=').append(property.getValue()); - } - } - s.append(" {\n"); - - for (KeyStoreEntry entry : entries.values()) { - s.append(entry).append("\n"); - } - s.append("}"); - - return s.toString(); - } - } - - /** - * Each keystore entry in the keystore domain configuration file is - * represented by a KeyStoreEntry object. - */ - - static class KeyStoreEntry { - private final String name; - private final Map properties; - - KeyStoreEntry(String name, Map properties) { - this.name = name; - this.properties = properties; - } - - String getName() { - return name; - } - - Map getProperties() { - return properties; - } - - @Override - public String toString() { - StringBuilder s = new StringBuilder("\n keystore ").append(name); - if (properties != null) { - for (Map.Entry property : properties.entrySet()) { - s.append("\n ").append(property.getKey()).append('=').append(property.getValue()); - } - } - s.append(";"); - - return s.toString(); - } - } - - public static class ParsingException extends GeneralSecurityException { - - @java.io.Serial - private static final long serialVersionUID = -4330692689482574072L; - - @SuppressWarnings("serial") // Not statically typed as Serializable - private Object[] source; - - /** - * Constructs a ParsingException with the specified - * detail message. A detail message is a String that describes - * this particular exception, which may, for example, specify which - * algorithm is not available. - * - * @param msg the detail message. - */ - public ParsingException(String msg) { - super(msg); - } - - public ParsingException(String msg, Object[] source) { - super(msg); - this.source = source; - } - - public ParsingException(int line, String msg) { - super("line " + line + ": " + msg); - source = new Object[] { line, msg }; - } - - public ParsingException(int line, String expect, String actual) { - super("line " + line + ": expected [" + expect + "], found [" + actual + "]"); - source = new Object[] { line, expect, actual }; - } - } -} diff --git a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyUtil.java b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyUtil.java deleted file mode 100644 index ed19379b697c0..0000000000000 --- a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyUtil.java +++ /dev/null @@ -1,170 +0,0 @@ -/* - * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. Oracle designates this - * particular file as subject to the "Classpath" exception as provided - * by Oracle in the LICENSE file that accompanied this code. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.secure_sm.policy; - -import java.io.BufferedInputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.net.MalformedURLException; -import java.net.URL; -import java.security.KeyStore; -import java.security.KeyStoreException; -import java.security.NoSuchAlgorithmException; -import java.security.NoSuchProviderException; -import java.util.Arrays; - -/** - * Adapted from: https://github.com/openjdk/jdk23u/blob/master/src/java.base/share/classes/sun/security/util/PolicyUtil.java - */ -public class PolicyUtil { - - // standard PKCS11 KeyStore type - private static final String P11KEYSTORE = "PKCS11"; - - // reserved word - private static final String NONE = "NONE"; - - /* - * Fast path reading from file urls in order to avoid calling - * FileURLConnection.connect() which can be quite slow the first time - * it is called. We really should clean up FileURLConnection so that - * this is not a problem but in the meantime this fix helps reduce - * start up time noticeably for the new launcher. -- DAC - */ - public static InputStream getInputStream(URL url) throws IOException { - if ("file".equals(url.getProtocol())) { - String path = url.getFile().replace('/', File.separatorChar); - path = ParseUtil.decode(path); - return new FileInputStream(path); - } else { - return url.openStream(); - } - } - - /** - * this is intended for use by the policy parser to - * instantiate a KeyStore from the information in the GUI/policy file - */ - public static KeyStore getKeyStore( - URL policyUrl, // URL of policy file - String keyStoreName, // input: keyStore URL - String keyStoreType, // input: keyStore type - String keyStoreProvider, // input: keyStore provider - String storePassURL // input: keyStore password - ) throws KeyStoreException, IOException, NoSuchProviderException, NoSuchAlgorithmException, java.security.cert.CertificateException { - - if (keyStoreName == null) { - throw new IllegalArgumentException("null KeyStore name"); - } - - char[] keyStorePassword = null; - try { - KeyStore ks; - if (keyStoreType == null) { - keyStoreType = KeyStore.getDefaultType(); - } - - if (P11KEYSTORE.equalsIgnoreCase(keyStoreType) && !NONE.equals(keyStoreName)) { - throw new IllegalArgumentException( - "Invalid value (" - + keyStoreName - + ") for keystore URL. If the keystore type is \"" - + P11KEYSTORE - + "\", the keystore url must be \"" - + NONE - + "\"" - ); - } - - if (keyStoreProvider != null) { - ks = KeyStore.getInstance(keyStoreType, keyStoreProvider); - } else { - ks = KeyStore.getInstance(keyStoreType); - } - - if (storePassURL != null) { - URL passURL; - try { - @SuppressWarnings("deprecation") - var _unused = passURL = new URL(storePassURL); - // absolute URL - } catch (MalformedURLException e) { - // relative URL - if (policyUrl == null) { - throw e; - } - @SuppressWarnings("deprecation") - var _unused = passURL = new URL(policyUrl, storePassURL); - } - - try (InputStream in = passURL.openStream()) { - keyStorePassword = Password.readPassword(in); - } - } - - if (NONE.equals(keyStoreName)) { - ks.load(null, keyStorePassword); - } else { - /* - * location of keystore is specified as absolute URL in policy - * file, or is relative to URL of policy file - */ - URL keyStoreUrl; - try { - @SuppressWarnings("deprecation") - var _unused = keyStoreUrl = new URL(keyStoreName); - // absolute URL - } catch (MalformedURLException e) { - // relative URL - if (policyUrl == null) { - throw e; - } - @SuppressWarnings("deprecation") - var _unused = keyStoreUrl = new URL(policyUrl, keyStoreName); - } - - try (InputStream inStream = new BufferedInputStream(getInputStream(keyStoreUrl))) { - ks.load(inStream, keyStorePassword); - } - } - return ks; - } finally { - if (keyStorePassword != null) { - Arrays.fill(keyStorePassword, ' '); - } - } - } -} diff --git a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PropertyExpander.java b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PropertyExpander.java deleted file mode 100644 index 759822b0ef2b5..0000000000000 --- a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PropertyExpander.java +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. Oracle designates this - * particular file as subject to the "Classpath" exception as provided - * by Oracle in the LICENSE file that accompanied this code. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.secure_sm.policy; - -import java.net.URI; -import java.net.URISyntaxException; -import java.security.GeneralSecurityException; - -/** - * Adapted from: https://github.com/openjdk/jdk23u/blob/master/src/java.base/share/classes/sun/security/util/PropertyExpander.java - */ -public class PropertyExpander { - - public static class ExpandException extends GeneralSecurityException { - private static final long serialVersionUID = -1L; - - public ExpandException(String msg) { - super(msg); - } - } - - public static String expand(String value) throws ExpandException { - return expand(value, false); - } - - public static String expand(String value, boolean encodeURL) throws ExpandException { - if (value == null) return null; - - int p = value.indexOf("${"); - - // no special characters - if (p == -1) return value; - - StringBuilder sb = new StringBuilder(value.length()); - int max = value.length(); - int i = 0; // index of last character we copied - - scanner: while (p < max) { - if (p > i) { - // copy in anything before the special stuff - sb.append(value.substring(i, p)); - } - int pe = p + 2; - - // do not expand ${{ ... }} - if (pe < max && value.charAt(pe) == '{') { - pe = value.indexOf("}}", pe); - if (pe == -1 || pe + 2 == max) { - // append remaining chars - sb.append(value.substring(p)); - break scanner; - } else { - // append as normal text - pe++; - sb.append(value.substring(p, pe + 1)); - } - } else { - while ((pe < max) && (value.charAt(pe) != '}')) { - pe++; - } - if (pe == max) { - // no matching '}' found, just add in as normal text - sb.append(value.substring(p, pe)); - break scanner; - } - String prop = value.substring(p + 2, pe); - if (prop.equals("/")) { - sb.append(java.io.File.separatorChar); - } else { - String val = System.getProperty(prop); - if (val != null) { - if (encodeURL) { - // encode 'val' unless it's an absolute URI - // at the beginning of the string buffer - try { - if (sb.length() > 0 || !(new URI(val)).isAbsolute()) { - val = ParseUtil.encodePath(val); - } - } catch (URISyntaxException use) { - val = ParseUtil.encodePath(val); - } - } - sb.append(val); - } else { - throw new ExpandException("unable to expand property " + prop); - } - } - } - i = pe + 1; - p = value.indexOf("${", i); - if (p == -1) { - // no more to expand. copy in any extra - if (i < max) { - sb.append(value.substring(i, max)); - } - // break out of loop - break scanner; - } - } - return sb.toString(); - } -} diff --git a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/SecurityConstants.java b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/SecurityConstants.java deleted file mode 100644 index 39e8efd87868c..0000000000000 --- a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/SecurityConstants.java +++ /dev/null @@ -1,145 +0,0 @@ -/* - * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. Oracle designates this - * particular file as subject to the "Classpath" exception as provided - * by Oracle in the LICENSE file that accompanied this code. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.secure_sm.policy; - -import java.lang.reflect.ReflectPermission; -import java.net.NetPermission; -import java.net.SocketPermission; -import java.security.AllPermission; -import java.security.SecurityPermission; - -/** - * Adapted from: https://github.com/openjdk/jdk23u/blob/master/src/java.base/share/classes/sun/security/util/SecurityConstants.java - */ -public final class SecurityConstants { - // Cannot create one of these - private SecurityConstants() {} - - // Commonly used string constants for permission actions used by - // SecurityManager. Declare here for shortcut when checking permissions - // in FilePermission, SocketPermission, and PropertyPermission. - - public static final String FILE_DELETE_ACTION = "delete"; - public static final String FILE_EXECUTE_ACTION = "execute"; - public static final String FILE_READ_ACTION = "read"; - public static final String FILE_WRITE_ACTION = "write"; - public static final String FILE_READLINK_ACTION = "readlink"; - - public static final String SOCKET_RESOLVE_ACTION = "resolve"; - public static final String SOCKET_CONNECT_ACTION = "connect"; - public static final String SOCKET_LISTEN_ACTION = "listen"; - public static final String SOCKET_ACCEPT_ACTION = "accept"; - public static final String SOCKET_CONNECT_ACCEPT_ACTION = "connect,accept"; - - public static final String PROPERTY_RW_ACTION = "read,write"; - public static final String PROPERTY_READ_ACTION = "read"; - public static final String PROPERTY_WRITE_ACTION = "write"; - - // Permission constants used in the various checkPermission() calls in JDK. - - // java.lang.Class, java.lang.SecurityManager, java.lang.System, - // java.net.URLConnection, java.security.AllPermission, java.security.Policy, - // sun.security.provider.PolicyFile - public static final AllPermission ALL_PERMISSION = new AllPermission(); - - // java.net.URL - public static final NetPermission SPECIFY_HANDLER_PERMISSION = new NetPermission("specifyStreamHandler"); - - // java.net.ProxySelector - public static final NetPermission SET_PROXYSELECTOR_PERMISSION = new NetPermission("setProxySelector"); - - // java.net.ProxySelector - public static final NetPermission GET_PROXYSELECTOR_PERMISSION = new NetPermission("getProxySelector"); - - // java.net.CookieHandler - public static final NetPermission SET_COOKIEHANDLER_PERMISSION = new NetPermission("setCookieHandler"); - - // java.net.CookieHandler - public static final NetPermission GET_COOKIEHANDLER_PERMISSION = new NetPermission("getCookieHandler"); - - // java.net.ResponseCache - public static final NetPermission SET_RESPONSECACHE_PERMISSION = new NetPermission("setResponseCache"); - - // java.net.ResponseCache - public static final NetPermission GET_RESPONSECACHE_PERMISSION = new NetPermission("getResponseCache"); - - // java.net.ServerSocket, java.net.Socket - public static final NetPermission SET_SOCKETIMPL_PERMISSION = new NetPermission("setSocketImpl"); - - // java.lang.SecurityManager, sun.applet.AppletPanel - public static final RuntimePermission CREATE_CLASSLOADER_PERMISSION = new RuntimePermission("createClassLoader"); - - // java.lang.SecurityManager - public static final RuntimePermission CHECK_MEMBER_ACCESS_PERMISSION = new RuntimePermission("accessDeclaredMembers"); - - // java.lang.SecurityManager, sun.applet.AppletSecurity - public static final RuntimePermission MODIFY_THREAD_PERMISSION = new RuntimePermission("modifyThread"); - - // java.lang.SecurityManager, sun.applet.AppletSecurity - public static final RuntimePermission MODIFY_THREADGROUP_PERMISSION = new RuntimePermission("modifyThreadGroup"); - - // java.lang.Class - public static final RuntimePermission GET_PD_PERMISSION = new RuntimePermission("getProtectionDomain"); - - // java.lang.Class, java.lang.ClassLoader, java.lang.Thread - public static final RuntimePermission GET_CLASSLOADER_PERMISSION = new RuntimePermission("getClassLoader"); - - // java.lang.Thread - public static final RuntimePermission STOP_THREAD_PERMISSION = new RuntimePermission("stopThread"); - - // java.lang.Thread - public static final RuntimePermission GET_STACK_TRACE_PERMISSION = new RuntimePermission("getStackTrace"); - - // java.lang.Thread - public static final RuntimePermission SUBCLASS_IMPLEMENTATION_PERMISSION = new RuntimePermission("enableContextClassLoaderOverride"); - - // java.security.AccessControlContext - public static final SecurityPermission CREATE_ACC_PERMISSION = new SecurityPermission("createAccessControlContext"); - - // java.security.AccessControlContext - public static final SecurityPermission GET_COMBINER_PERMISSION = new SecurityPermission("getDomainCombiner"); - - // java.security.Policy, java.security.ProtectionDomain - public static final SecurityPermission GET_POLICY_PERMISSION = new SecurityPermission("getPolicy"); - - // java.lang.SecurityManager - public static final SocketPermission LOCAL_LISTEN_PERMISSION = new SocketPermission("localhost:0", SOCKET_LISTEN_ACTION); - - // java.lang.reflect.AccessibleObject - public static final ReflectPermission ACCESS_PERMISSION = new ReflectPermission("suppressAccessChecks"); - - // sun.reflect.ReflectionFactory - public static final RuntimePermission REFLECTION_FACTORY_ACCESS_PERMISSION = new RuntimePermission("reflectionFactoryAccess"); - -} diff --git a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/package-info.java b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/package-info.java deleted file mode 100644 index d182490b8d173..0000000000000 --- a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/package-info.java +++ /dev/null @@ -1,12 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** - * Java Agent Policy - */ -package org.opensearch.secure_sm.policy; diff --git a/libs/agent-sm/agent-policy/src/test/java/org/opensearch/secure_sm/policy/PolicyParserTests.java b/libs/agent-sm/agent-policy/src/test/java/org/opensearch/secure_sm/policy/PolicyParserTests.java deleted file mode 100644 index 098e4c2605a95..0000000000000 --- a/libs/agent-sm/agent-policy/src/test/java/org/opensearch/secure_sm/policy/PolicyParserTests.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.secure_sm.policy; - -import org.opensearch.test.OpenSearchTestCase; - -import java.io.IOException; -import java.io.Reader; -import java.io.StringReader; -import java.util.Enumeration; - -public class PolicyParserTests extends OpenSearchTestCase { - private static final String POLICY = """ - grant codeBase "TestCodeBase" { - permission java.net.NetPermission "accessUnixDomainSocket"; - }; - - grant { - permission java.net.NetPermission "accessUnixDomainSocket"; - permission java.net.SocketPermission "*", "accept,connect"; - }; - """; - - public void testPolicy() throws IOException, PolicyParser.ParsingException { - try (Reader reader = new StringReader(POLICY)) { - final PolicyParser policyParser = new PolicyParser(); - policyParser.read(reader); - - final Enumeration grantEntryEnumeration = policyParser.grantElements(); - final PolicyParser.GrantEntry grantEntry1 = grantEntryEnumeration.nextElement(); - final PolicyParser.GrantEntry grantEntry2 = grantEntryEnumeration.nextElement(); - - assertEquals("TestCodeBase", grantEntry1.codeBase); - assertEquals(1, grantEntry1.permissionEntries.size()); - assertEquals("java.net.NetPermission", grantEntry1.permissionEntries.getFirst().permission); - assertEquals("accessUnixDomainSocket", grantEntry1.permissionEntries.getFirst().name); - - assertNull(grantEntry2.codeBase); - assertEquals(2, grantEntry2.permissionEntries.size()); - assertEquals("java.net.NetPermission", grantEntry2.permissionEntries.getFirst().permission); - assertEquals("accessUnixDomainSocket", grantEntry2.permissionEntries.getFirst().name); - assertEquals("java.net.SocketPermission", grantEntry2.permissionEntries.getLast().permission); - assertEquals("*", grantEntry2.permissionEntries.getLast().name); - assertEquals("accept,connect", grantEntry2.permissionEntries.getLast().action); - } - } -} diff --git a/libs/agent-sm/bootstrap/build.gradle b/libs/agent-sm/bootstrap/build.gradle deleted file mode 100644 index 1757e3cd75c99..0000000000000 --- a/libs/agent-sm/bootstrap/build.gradle +++ /dev/null @@ -1,24 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - * - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -apply plugin: 'opensearch.build' -apply plugin: 'opensearch.publish' - -base { - archivesName = 'opensearch-agent-bootstrap' -} - -tasks.named('forbiddenApisMain').configure { - replaceSignatureFiles 'jdk-signatures' -} - -test.enabled = false -testingConventions.enabled = false diff --git a/libs/agent-sm/bootstrap/src/main/java/org/opensearch/javaagent/bootstrap/AgentPolicy.java b/libs/agent-sm/bootstrap/src/main/java/org/opensearch/javaagent/bootstrap/AgentPolicy.java deleted file mode 100644 index d2c77fac011b5..0000000000000 --- a/libs/agent-sm/bootstrap/src/main/java/org/opensearch/javaagent/bootstrap/AgentPolicy.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.javaagent.bootstrap; - -import java.lang.StackWalker.Option; -import java.lang.StackWalker.StackFrame; -import java.security.Permission; -import java.security.Policy; -import java.security.ProtectionDomain; -import java.util.Collections; -import java.util.List; -import java.util.Set; -import java.util.logging.Logger; -import java.util.stream.Collectors; - -/** - * Agent Policy - */ -@SuppressWarnings("removal") -public class AgentPolicy { - private static final Logger LOGGER = Logger.getLogger(AgentPolicy.class.getName()); - private static volatile Policy policy; - private static volatile Set trustedHosts; - - private AgentPolicy() {} - - /** - * Set Agent policy - * @param policy policy - */ - public static void setPolicy(Policy policy) { - setPolicy(policy, Set.of()); - } - - /** - * Set Agent policy - * @param policy policy - * @param trustedHosts trusted hosts - */ - public static void setPolicy(Policy policy, final Set trustedHosts) { - if (AgentPolicy.policy == null) { - AgentPolicy.policy = policy; - AgentPolicy.trustedHosts = Collections.unmodifiableSet(trustedHosts); - LOGGER.info("Policy attached successfully: " + policy); - } else { - throw new SecurityException("The Policy has been set already: " + AgentPolicy.policy); - } - } - - /** - * Check permissions - * @param permission permission - */ - public static void checkPermission(Permission permission) { - final StackWalker walker = StackWalker.getInstance(Option.RETAIN_CLASS_REFERENCE); - final List callers = walker.walk( - frames -> frames.map(StackFrame::getDeclaringClass).map(Class::getProtectionDomain).distinct().collect(Collectors.toList()) - ); - - for (final ProtectionDomain domain : callers) { - if (!policy.implies(domain, permission)) { - throw new SecurityException("Denied access: " + permission); - } - } - } - - /** - * Get policy - * @return policy - */ - public static Policy getPolicy() { - return policy; - } - - /** - * Check if hostname is trusted - * @param hostname hostname - * @return is trusted or not - */ - public static boolean isTrustedHost(String hostname) { - return AgentPolicy.trustedHosts.contains(hostname); - } -} diff --git a/libs/agent-sm/bootstrap/src/main/java/org/opensearch/javaagent/bootstrap/package-info.java b/libs/agent-sm/bootstrap/src/main/java/org/opensearch/javaagent/bootstrap/package-info.java deleted file mode 100644 index 6172ae511a8f7..0000000000000 --- a/libs/agent-sm/bootstrap/src/main/java/org/opensearch/javaagent/bootstrap/package-info.java +++ /dev/null @@ -1,12 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** - * Java Agent Policy - */ -package org.opensearch.javaagent.bootstrap; diff --git a/libs/agent-sm/bootstrap/src/main/java/org/opensearch/package-info.java b/libs/agent-sm/bootstrap/src/main/java/org/opensearch/package-info.java deleted file mode 100644 index 8fb377151ae39..0000000000000 --- a/libs/agent-sm/bootstrap/src/main/java/org/opensearch/package-info.java +++ /dev/null @@ -1,12 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** - * Java Agent Policy Bootstrap - */ -package org.opensearch; diff --git a/libs/agent-sm/build.gradle b/libs/agent-sm/build.gradle deleted file mode 100644 index 656411a08080f..0000000000000 --- a/libs/agent-sm/build.gradle +++ /dev/null @@ -1,22 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - * - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -// This file is intentionally blank. All configuration of the -// distribution is done in the parent project. - -// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory - -base { - archivesName = 'opensearch-agent-sm' -} - -test.enabled = false -testingConventions.enabled = false From f4b839300d5f0067c4e90be2b04ebc09c2af3359 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Fri, 28 Mar 2025 11:32:36 -0400 Subject: [PATCH 117/550] Create initial Java Agent to intercept Socket::connect calls (#17724) Signed-off-by: Andriy Redko --- CHANGELOG.md | 3 +- gradle/libs.versions.toml | 6 +- libs/agent-sm/agent/build.gradle | 64 +++++++ .../agent/licenses/byte-buddy-1.17.4.jar.sha1 | 1 + .../agent/licenses/byte-buddy-LICENSE.txt | 180 ++++++++++++++++++ .../agent/licenses/byte-buddy-NOTICE.txt | 13 ++ .../java/org/opensearch/javaagent/Agent.java | 89 +++++++++ .../javaagent/SocketChannelInterceptor.java | 72 +++++++ .../javaagent/StackCallerChainExtractor.java | 39 ++++ .../opensearch/javaagent/package-info.java | 12 ++ .../java/org/opensearch/package-info.java | 12 ++ libs/agent-sm/bootstrap/build.gradle | 24 +++ .../javaagent/bootstrap/AgentPolicy.java | 89 +++++++++ .../javaagent/bootstrap/package-info.java | 12 ++ .../java/org/opensearch/package-info.java | 12 ++ libs/agent-sm/build.gradle | 22 +++ libs/build.gradle | 24 +-- .../licenses/jna-platform-5.13.0.jar.sha1 | 1 - .../licenses/jna-platform-5.16.0.jar.sha1 | 1 + server/licenses/jna-5.13.0.jar.sha1 | 1 - server/licenses/jna-5.16.0.jar.sha1 | 1 + .../licenses/byte-buddy-1.15.10.jar.sha1 | 1 - .../licenses/byte-buddy-1.17.4.jar.sha1 | 1 + .../licenses/mockito-core-5.14.2.jar.sha1 | 1 - .../licenses/mockito-core-5.16.1.jar.sha1 | 1 + 25 files changed, 663 insertions(+), 19 deletions(-) create mode 100644 libs/agent-sm/agent/build.gradle create mode 100644 libs/agent-sm/agent/licenses/byte-buddy-1.17.4.jar.sha1 create mode 100644 libs/agent-sm/agent/licenses/byte-buddy-LICENSE.txt create mode 100644 libs/agent-sm/agent/licenses/byte-buddy-NOTICE.txt create mode 100644 libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/Agent.java create mode 100644 libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/SocketChannelInterceptor.java create mode 100644 libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/StackCallerChainExtractor.java create mode 100644 libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/package-info.java create mode 100644 libs/agent-sm/agent/src/main/java/org/opensearch/package-info.java create mode 100644 libs/agent-sm/bootstrap/build.gradle create mode 100644 libs/agent-sm/bootstrap/src/main/java/org/opensearch/javaagent/bootstrap/AgentPolicy.java create mode 100644 libs/agent-sm/bootstrap/src/main/java/org/opensearch/javaagent/bootstrap/package-info.java create mode 100644 libs/agent-sm/bootstrap/src/main/java/org/opensearch/package-info.java create mode 100644 libs/agent-sm/build.gradle delete mode 100644 plugins/repository-azure/licenses/jna-platform-5.13.0.jar.sha1 create mode 100644 plugins/repository-azure/licenses/jna-platform-5.16.0.jar.sha1 delete mode 100644 server/licenses/jna-5.13.0.jar.sha1 create mode 100644 server/licenses/jna-5.16.0.jar.sha1 delete mode 100644 test/framework/licenses/byte-buddy-1.15.10.jar.sha1 create mode 100644 test/framework/licenses/byte-buddy-1.17.4.jar.sha1 delete mode 100644 test/framework/licenses/mockito-core-5.14.2.jar.sha1 create mode 100644 test/framework/licenses/mockito-core-5.16.1.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index c96ddf75e3282..890d3f2b470d4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,7 +11,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Introduce a new search node role to hold search only shards ([#17620](https://github.com/opensearch-project/OpenSearch/pull/17620)) - Fix systemd integTest on deb regarding path ownership check ([#17641](https://github.com/opensearch-project/OpenSearch/pull/17641)) - Add dfs transformation function in XContentMapValues ([#17612](https://github.com/opensearch-project/OpenSearch/pull/17612)) -- Added Kinesis support as a plugin for the pull-based ingestion ([#17615](https://github.com/opensearch-project/OpenSearch/pull/17615) +- Added Kinesis support as a plugin for the pull-based ingestion ([#17615](https://github.com/opensearch-project/OpenSearch/pull/17615)) +- [Security Manager Replacement] Create initial Java Agent to intercept Socket::connect calls ([#17724](https://github.com/opensearch-project/OpenSearch/pull/17724)) ### Changed - Migrate BC libs to their FIPS counterparts ([#14912](https://github.com/opensearch-project/OpenSearch/pull/14912)) diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index 4ccb794137c14..e0a34482e2d7b 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -31,7 +31,7 @@ grpc = "1.68.2" json_smart = "2.5.2" # when updating the JNA version, also update the version in buildSrc/build.gradle -jna = "5.13.0" +jna = "5.16.0" netty = "4.1.118.Final" joda = "2.12.7" @@ -70,9 +70,9 @@ password4j = "1.8.2" randomizedrunner = "2.7.1" junit = "4.13.2" hamcrest = "2.1" -mockito = "5.14.2" +mockito = "5.16.1" objenesis = "3.3" -bytebuddy = "1.15.10" +bytebuddy = "1.17.4" # benchmark dependencies jmh = "1.35" diff --git a/libs/agent-sm/agent/build.gradle b/libs/agent-sm/agent/build.gradle new file mode 100644 index 0000000000000..266750f8d9529 --- /dev/null +++ b/libs/agent-sm/agent/build.gradle @@ -0,0 +1,64 @@ +apply plugin: 'opensearch.build' +apply plugin: 'opensearch.publish' + +base { + archivesName = 'opensearch-agent' +} + +configurations { + bootstrap.extendsFrom(implementation) +} + +dependencies { + implementation project(":libs:agent-sm:bootstrap") + implementation "net.bytebuddy:byte-buddy:${versions.bytebuddy}" + compileOnly "com.google.code.findbugs:jsr305:3.0.2" +} + +var bootClasspath = configurations.bootstrap.incoming.artifactView { }.files + .getFiles() + .collect { it.name } + +jar { + manifest { + attributes( + "Can-Redefine-Classes": "true", + "Can-Retransform-Classes": "true", + "Agent-Class": "org.opensearch.javaagent.Agent", + "Premain-Class": "org.opensearch.javaagent.Agent", + "Boot-Class-Path": bootClasspath.join(' ') + ) + } +} + +compileJava { + options.compilerArgs -= '-Werror' +} + +test.enabled = false +testingConventions.enabled = false + +tasks.named('forbiddenApisMain').configure { + replaceSignatureFiles 'jdk-signatures' +} + +task prepareAgent(type: Copy) { + from(configurations.runtimeClasspath) + into "$buildDir/distributions" + dependsOn jar +} + +thirdPartyAudit { + ignoreMissingClasses( + 'com.sun.jna.FunctionMapper', + 'com.sun.jna.JNIEnv', + 'com.sun.jna.Library', + 'com.sun.jna.Native', + 'com.sun.jna.NativeLibrary', + 'com.sun.jna.Platform' + ) +} + +tasks.named('validateNebulaPom') { + dependsOn prepareAgent +} diff --git a/libs/agent-sm/agent/licenses/byte-buddy-1.17.4.jar.sha1 b/libs/agent-sm/agent/licenses/byte-buddy-1.17.4.jar.sha1 new file mode 100644 index 0000000000000..ebf5d77477f30 --- /dev/null +++ b/libs/agent-sm/agent/licenses/byte-buddy-1.17.4.jar.sha1 @@ -0,0 +1 @@ +ffb8488d93290eff074fb542a596e4c5a26d0315 \ No newline at end of file diff --git a/libs/agent-sm/agent/licenses/byte-buddy-LICENSE.txt b/libs/agent-sm/agent/licenses/byte-buddy-LICENSE.txt new file mode 100644 index 0000000000000..719c6605bb9b4 --- /dev/null +++ b/libs/agent-sm/agent/licenses/byte-buddy-LICENSE.txt @@ -0,0 +1,180 @@ +This product bundles ASM 9.7.1, which is available under a "3-clause BSD" +license. For details, see licenses/ASM. For more information visit ${asm.url}. + +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + diff --git a/libs/agent-sm/agent/licenses/byte-buddy-NOTICE.txt b/libs/agent-sm/agent/licenses/byte-buddy-NOTICE.txt new file mode 100644 index 0000000000000..929cbc6b18bba --- /dev/null +++ b/libs/agent-sm/agent/licenses/byte-buddy-NOTICE.txt @@ -0,0 +1,13 @@ +Copyright 2014 - Present Rafael Winterhalter + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/Agent.java b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/Agent.java new file mode 100644 index 0000000000000..6c5931f537956 --- /dev/null +++ b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/Agent.java @@ -0,0 +1,89 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.javaagent; + +import org.opensearch.javaagent.bootstrap.AgentPolicy; + +import java.lang.instrument.Instrumentation; +import java.nio.channels.SocketChannel; +import java.util.Map; + +import net.bytebuddy.ByteBuddy; +import net.bytebuddy.agent.builder.AgentBuilder; +import net.bytebuddy.asm.Advice; +import net.bytebuddy.description.type.TypeDescription; +import net.bytebuddy.dynamic.ClassFileLocator; +import net.bytebuddy.dynamic.loading.ClassInjector; +import net.bytebuddy.implementation.Implementation; +import net.bytebuddy.matcher.ElementMatcher.Junction; +import net.bytebuddy.matcher.ElementMatchers; + +/** + * Java Agent + */ +public class Agent { + /** + * Constructor + */ + private Agent() {} + + /** + * Premain + * @param agentArguments agent arguments + * @param instrumentation instrumentation + * @throws Exception Exception + */ + public static void premain(String agentArguments, Instrumentation instrumentation) throws Exception { + initAgent(instrumentation); + } + + /** + * Agent Main + * @param agentArguments agent arguments + * @param instrumentation instrumentation + * @throws Exception Exception + */ + public static void agentmain(String agentArguments, Instrumentation instrumentation) throws Exception { + initAgent(instrumentation); + } + + private static AgentBuilder createAgentBuilder(Instrumentation inst) throws Exception { + final Junction systemType = ElementMatchers.isSubTypeOf(SocketChannel.class); + + final AgentBuilder.Transformer transformer = (b, typeDescription, classLoader, module, pd) -> b.visit( + Advice.to(SocketChannelInterceptor.class) + .on(ElementMatchers.named("connect").and(ElementMatchers.not(ElementMatchers.isAbstract()))) + ); + + ClassInjector.UsingUnsafe.ofBootLoader() + .inject( + Map.of( + new TypeDescription.ForLoadedType(StackCallerChainExtractor.class), + ClassFileLocator.ForClassLoader.read(StackCallerChainExtractor.class), + new TypeDescription.ForLoadedType(AgentPolicy.class), + ClassFileLocator.ForClassLoader.read(AgentPolicy.class) + ) + ); + + final ByteBuddy byteBuddy = new ByteBuddy().with(Implementation.Context.Disabled.Factory.INSTANCE); + final AgentBuilder agentBuilder = new AgentBuilder.Default(byteBuddy).with(AgentBuilder.InitializationStrategy.NoOp.INSTANCE) + .with(AgentBuilder.RedefinitionStrategy.REDEFINITION) + .with(AgentBuilder.TypeStrategy.Default.REDEFINE) + .ignore(ElementMatchers.none()) + .type(systemType) + .transform(transformer); + + return agentBuilder; + } + + private static void initAgent(Instrumentation instrumentation) throws Exception { + AgentBuilder agentBuilder = createAgentBuilder(instrumentation); + agentBuilder.installOn(instrumentation); + } +} diff --git a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/SocketChannelInterceptor.java b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/SocketChannelInterceptor.java new file mode 100644 index 0000000000000..b3e0251c6f1b1 --- /dev/null +++ b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/SocketChannelInterceptor.java @@ -0,0 +1,72 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.javaagent; + +import org.opensearch.javaagent.bootstrap.AgentPolicy; + +import java.lang.StackWalker.Option; +import java.lang.reflect.Method; +import java.net.InetSocketAddress; +import java.net.NetPermission; +import java.net.SocketPermission; +import java.net.UnixDomainSocketAddress; +import java.security.Policy; +import java.security.ProtectionDomain; +import java.util.List; + +import net.bytebuddy.asm.Advice; +import net.bytebuddy.asm.Advice.Origin; + +/** + * SocketChannelInterceptor + */ +public class SocketChannelInterceptor { + /** + * SocketChannelInterceptor + */ + public SocketChannelInterceptor() {} + + /** + * Interceptors + * @param args arguments + * @param method method + * @throws Exception exceptions + */ + @Advice.OnMethodEnter + @SuppressWarnings("removal") + public static void intercept(@Advice.AllArguments Object[] args, @Origin Method method) throws Exception { + final Policy policy = AgentPolicy.getPolicy(); + if (policy == null) { + return; /* noop */ + } + + final StackWalker walker = StackWalker.getInstance(Option.RETAIN_CLASS_REFERENCE); + final List callers = walker.walk(new StackCallerChainExtractor()); + + if (args[0] instanceof InetSocketAddress address) { + if (!AgentPolicy.isTrustedHost(address.getHostString())) { + final String host = address.getHostString() + ":" + address.getPort(); + + final SocketPermission permission = new SocketPermission(host, "connect,resolve"); + for (final ProtectionDomain domain : callers) { + if (!policy.implies(domain, permission)) { + throw new SecurityException("Denied access to: " + host + ", domain " + domain); + } + } + } + } else if (args[0] instanceof UnixDomainSocketAddress address) { + final NetPermission permission = new NetPermission("accessUnixDomainSocket"); + for (final ProtectionDomain domain : callers) { + if (!policy.implies(domain, permission)) { + throw new SecurityException("Denied access to: " + address + ", domain " + domain); + } + } + } + } +} diff --git a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/StackCallerChainExtractor.java b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/StackCallerChainExtractor.java new file mode 100644 index 0000000000000..6c33ca73e107d --- /dev/null +++ b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/StackCallerChainExtractor.java @@ -0,0 +1,39 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.javaagent; + +import java.lang.StackWalker.StackFrame; +import java.security.ProtectionDomain; +import java.util.List; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** + * Stack Caller Chain Extractor + */ +public final class StackCallerChainExtractor implements Function, List> { + /** + * Constructor + */ + public StackCallerChainExtractor() {} + + /** + * Folds the stack + * @param frames stack frames + */ + @Override + public List apply(Stream frames) { + return frames.map(StackFrame::getDeclaringClass) + .map(Class::getProtectionDomain) + .filter(pd -> pd.getCodeSource() != null) /* JDK */ + .distinct() + .collect(Collectors.toList()); + } +} diff --git a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/package-info.java b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/package-info.java new file mode 100644 index 0000000000000..447a0d8828875 --- /dev/null +++ b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Java Agent + */ +package org.opensearch.javaagent; diff --git a/libs/agent-sm/agent/src/main/java/org/opensearch/package-info.java b/libs/agent-sm/agent/src/main/java/org/opensearch/package-info.java new file mode 100644 index 0000000000000..a15b5b51ae3de --- /dev/null +++ b/libs/agent-sm/agent/src/main/java/org/opensearch/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Java Agent + */ +package org.opensearch; diff --git a/libs/agent-sm/bootstrap/build.gradle b/libs/agent-sm/bootstrap/build.gradle new file mode 100644 index 0000000000000..1757e3cd75c99 --- /dev/null +++ b/libs/agent-sm/bootstrap/build.gradle @@ -0,0 +1,24 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +apply plugin: 'opensearch.build' +apply plugin: 'opensearch.publish' + +base { + archivesName = 'opensearch-agent-bootstrap' +} + +tasks.named('forbiddenApisMain').configure { + replaceSignatureFiles 'jdk-signatures' +} + +test.enabled = false +testingConventions.enabled = false diff --git a/libs/agent-sm/bootstrap/src/main/java/org/opensearch/javaagent/bootstrap/AgentPolicy.java b/libs/agent-sm/bootstrap/src/main/java/org/opensearch/javaagent/bootstrap/AgentPolicy.java new file mode 100644 index 0000000000000..d2c77fac011b5 --- /dev/null +++ b/libs/agent-sm/bootstrap/src/main/java/org/opensearch/javaagent/bootstrap/AgentPolicy.java @@ -0,0 +1,89 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.javaagent.bootstrap; + +import java.lang.StackWalker.Option; +import java.lang.StackWalker.StackFrame; +import java.security.Permission; +import java.security.Policy; +import java.security.ProtectionDomain; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.logging.Logger; +import java.util.stream.Collectors; + +/** + * Agent Policy + */ +@SuppressWarnings("removal") +public class AgentPolicy { + private static final Logger LOGGER = Logger.getLogger(AgentPolicy.class.getName()); + private static volatile Policy policy; + private static volatile Set trustedHosts; + + private AgentPolicy() {} + + /** + * Set Agent policy + * @param policy policy + */ + public static void setPolicy(Policy policy) { + setPolicy(policy, Set.of()); + } + + /** + * Set Agent policy + * @param policy policy + * @param trustedHosts trusted hosts + */ + public static void setPolicy(Policy policy, final Set trustedHosts) { + if (AgentPolicy.policy == null) { + AgentPolicy.policy = policy; + AgentPolicy.trustedHosts = Collections.unmodifiableSet(trustedHosts); + LOGGER.info("Policy attached successfully: " + policy); + } else { + throw new SecurityException("The Policy has been set already: " + AgentPolicy.policy); + } + } + + /** + * Check permissions + * @param permission permission + */ + public static void checkPermission(Permission permission) { + final StackWalker walker = StackWalker.getInstance(Option.RETAIN_CLASS_REFERENCE); + final List callers = walker.walk( + frames -> frames.map(StackFrame::getDeclaringClass).map(Class::getProtectionDomain).distinct().collect(Collectors.toList()) + ); + + for (final ProtectionDomain domain : callers) { + if (!policy.implies(domain, permission)) { + throw new SecurityException("Denied access: " + permission); + } + } + } + + /** + * Get policy + * @return policy + */ + public static Policy getPolicy() { + return policy; + } + + /** + * Check if hostname is trusted + * @param hostname hostname + * @return is trusted or not + */ + public static boolean isTrustedHost(String hostname) { + return AgentPolicy.trustedHosts.contains(hostname); + } +} diff --git a/libs/agent-sm/bootstrap/src/main/java/org/opensearch/javaagent/bootstrap/package-info.java b/libs/agent-sm/bootstrap/src/main/java/org/opensearch/javaagent/bootstrap/package-info.java new file mode 100644 index 0000000000000..6172ae511a8f7 --- /dev/null +++ b/libs/agent-sm/bootstrap/src/main/java/org/opensearch/javaagent/bootstrap/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Java Agent Policy + */ +package org.opensearch.javaagent.bootstrap; diff --git a/libs/agent-sm/bootstrap/src/main/java/org/opensearch/package-info.java b/libs/agent-sm/bootstrap/src/main/java/org/opensearch/package-info.java new file mode 100644 index 0000000000000..8fb377151ae39 --- /dev/null +++ b/libs/agent-sm/bootstrap/src/main/java/org/opensearch/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Java Agent Policy Bootstrap + */ +package org.opensearch; diff --git a/libs/agent-sm/build.gradle b/libs/agent-sm/build.gradle new file mode 100644 index 0000000000000..656411a08080f --- /dev/null +++ b/libs/agent-sm/build.gradle @@ -0,0 +1,22 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory + +base { + archivesName = 'opensearch-agent-sm' +} + +test.enabled = false +testingConventions.enabled = false diff --git a/libs/build.gradle b/libs/build.gradle index c0fcc1ff2b977..9bf359d936178 100644 --- a/libs/build.gradle +++ b/libs/build.gradle @@ -40,17 +40,19 @@ subprojects { * other libs. This keeps our dependencies simpler. */ project.afterEvaluate { - configurations.all { Configuration conf -> - dependencies.matching { it instanceof ProjectDependency }.all { ProjectDependency dep -> - Project depProject = project.project(dep.path) - if (depProject != null - && (false == depProject.path.equals(':libs:opensearch-core') && - false == depProject.path.equals(':libs:opensearch-common')) - && depProject.path.startsWith(':libs')) { - throw new InvalidUserDataException("projects in :libs " - + "may not depend on other projects libs except " - + ":libs:opensearch-core or :libs:opensearch-common but " - + "${project.path} depends on ${depProject.path}") + if (!project.path.equals(':libs:agent-sm:agent')) { + configurations.all { Configuration conf -> + dependencies.matching { it instanceof ProjectDependency }.all { ProjectDependency dep -> + Project depProject = project.project(dep.path) + if (depProject != null + && (false == depProject.path.equals(':libs:opensearch-core') && + false == depProject.path.equals(':libs:opensearch-common')) + && depProject.path.startsWith(':libs')) { + throw new InvalidUserDataException("projects in :libs " + + "may not depend on other projects libs except " + + ":libs:opensearch-core or :libs:opensearch-common but " + + "${project.path} depends on ${depProject.path}") + } } } } diff --git a/plugins/repository-azure/licenses/jna-platform-5.13.0.jar.sha1 b/plugins/repository-azure/licenses/jna-platform-5.13.0.jar.sha1 deleted file mode 100644 index e2a8ba1c1bbd3..0000000000000 --- a/plugins/repository-azure/licenses/jna-platform-5.13.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -88e9a306715e9379f3122415ef4ae759a352640d \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jna-platform-5.16.0.jar.sha1 b/plugins/repository-azure/licenses/jna-platform-5.16.0.jar.sha1 new file mode 100644 index 0000000000000..84b5ec150d7a5 --- /dev/null +++ b/plugins/repository-azure/licenses/jna-platform-5.16.0.jar.sha1 @@ -0,0 +1 @@ +b2a9065f97c166893d504b164706512338e3bbc2 \ No newline at end of file diff --git a/server/licenses/jna-5.13.0.jar.sha1 b/server/licenses/jna-5.13.0.jar.sha1 deleted file mode 100644 index faf2012f0b5c0..0000000000000 --- a/server/licenses/jna-5.13.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1200e7ebeedbe0d10062093f32925a912020e747 \ No newline at end of file diff --git a/server/licenses/jna-5.16.0.jar.sha1 b/server/licenses/jna-5.16.0.jar.sha1 new file mode 100644 index 0000000000000..4b45642a01952 --- /dev/null +++ b/server/licenses/jna-5.16.0.jar.sha1 @@ -0,0 +1 @@ +ebea09f91dc9f7048099f963fb8d6f919f0a4d9c \ No newline at end of file diff --git a/test/framework/licenses/byte-buddy-1.15.10.jar.sha1 b/test/framework/licenses/byte-buddy-1.15.10.jar.sha1 deleted file mode 100644 index b89163a2aa842..0000000000000 --- a/test/framework/licenses/byte-buddy-1.15.10.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -635c873fadd853c084f84fdc3cbd58c5dd8537f9 \ No newline at end of file diff --git a/test/framework/licenses/byte-buddy-1.17.4.jar.sha1 b/test/framework/licenses/byte-buddy-1.17.4.jar.sha1 new file mode 100644 index 0000000000000..ebf5d77477f30 --- /dev/null +++ b/test/framework/licenses/byte-buddy-1.17.4.jar.sha1 @@ -0,0 +1 @@ +ffb8488d93290eff074fb542a596e4c5a26d0315 \ No newline at end of file diff --git a/test/framework/licenses/mockito-core-5.14.2.jar.sha1 b/test/framework/licenses/mockito-core-5.14.2.jar.sha1 deleted file mode 100644 index a9fe959400ceb..0000000000000 --- a/test/framework/licenses/mockito-core-5.14.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f7bf936008d7664e2002c3faf0c02071c8d10e7c \ No newline at end of file diff --git a/test/framework/licenses/mockito-core-5.16.1.jar.sha1 b/test/framework/licenses/mockito-core-5.16.1.jar.sha1 new file mode 100644 index 0000000000000..7ced27fb416bf --- /dev/null +++ b/test/framework/licenses/mockito-core-5.16.1.jar.sha1 @@ -0,0 +1 @@ +be8bf71080016b793f32456faff861e2c3a77f62 \ No newline at end of file From edd854a61b6fc051dfab2a3ce4641748ff799491 Mon Sep 17 00:00:00 2001 From: Yupeng Fu Date: Fri, 28 Mar 2025 11:20:18 -0700 Subject: [PATCH 118/550] Fix flaky test in IngestFromKinesisIT (#17697) --------- Signed-off-by: Yupeng Fu --- .../plugin/kinesis/IngestFromKinesisIT.java | 50 +++++++++++++++++-- 1 file changed, 47 insertions(+), 3 deletions(-) diff --git a/plugins/ingestion-kinesis/src/internalClusterTest/java/org/opensearch/plugin/kinesis/IngestFromKinesisIT.java b/plugins/ingestion-kinesis/src/internalClusterTest/java/org/opensearch/plugin/kinesis/IngestFromKinesisIT.java index 44b9be94eec17..ea668ff8bfeec 100644 --- a/plugins/ingestion-kinesis/src/internalClusterTest/java/org/opensearch/plugin/kinesis/IngestFromKinesisIT.java +++ b/plugins/ingestion-kinesis/src/internalClusterTest/java/org/opensearch/plugin/kinesis/IngestFromKinesisIT.java @@ -8,6 +8,15 @@ package org.opensearch.plugin.kinesis; +import software.amazon.awssdk.services.kinesis.model.DescribeStreamRequest; +import software.amazon.awssdk.services.kinesis.model.DescribeStreamResponse; +import software.amazon.awssdk.services.kinesis.model.GetRecordsRequest; +import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; +import software.amazon.awssdk.services.kinesis.model.GetShardIteratorRequest; +import software.amazon.awssdk.services.kinesis.model.GetShardIteratorResponse; +import software.amazon.awssdk.services.kinesis.model.Record; +import software.amazon.awssdk.services.kinesis.model.ShardIteratorType; + import org.opensearch.action.admin.cluster.node.info.NodeInfo; import org.opensearch.action.admin.cluster.node.info.NodesInfoRequest; import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; @@ -94,8 +103,12 @@ public void testKinesisIngestion() { public void testKinesisIngestion_RewindByOffset() throws InterruptedException { produceData("1", "name1", "24"); - String sequenceNumber = produceData("2", "name2", "20"); - Thread.sleep(1000); + produceData("2", "name2", "24"); + String sequenceNumber = produceData("3", "name3", "20"); + logger.info("Produced message with sequence number: {}", sequenceNumber); + produceData("4", "name4", "21"); + + await().atMost(5, TimeUnit.SECONDS).until(() -> isRewinded(sequenceNumber)); // create an index with ingestion source from kinesis createIndex( @@ -122,7 +135,38 @@ public void testKinesisIngestion_RewindByOffset() throws InterruptedException { await().atMost(10, TimeUnit.SECONDS).untilAsserted(() -> { refresh("test_rewind_by_offset"); SearchResponse response = client().prepareSearch("test_rewind_by_offset").setQuery(query).get(); - assertThat(response.getHits().getTotalHits().value(), is(1L)); + assertThat(response.getHits().getTotalHits().value(), is(2L)); }); } + + private boolean isRewinded(String sequenceNumber) { + DescribeStreamResponse describeStreamResponse = kinesisClient.describeStream( + DescribeStreamRequest.builder().streamName(streamName).build() + ); + + String shardId = describeStreamResponse.streamDescription().shards().get(0).shardId(); + + GetShardIteratorRequest iteratorRequest = GetShardIteratorRequest.builder() + .streamName(streamName) + .shardId(shardId) + .shardIteratorType(ShardIteratorType.AT_SEQUENCE_NUMBER) + .startingSequenceNumber(sequenceNumber) + .build(); + + GetShardIteratorResponse iteratorResponse = kinesisClient.getShardIterator(iteratorRequest); + String shardIterator = iteratorResponse.shardIterator(); + + // Use the iterator to read the record + GetRecordsRequest recordsRequest = GetRecordsRequest.builder() + .shardIterator(shardIterator) + .limit(1) // Adjust as needed + .build(); + + GetRecordsResponse recordsResponse = kinesisClient.getRecords(recordsRequest); + List records = recordsResponse.records(); + if (records.size() != 1) { + return false; + } + return records.get(0).partitionKey().equals("3"); + } } From e0a67fd9ca949b14b90dc206231d90158bc35b38 Mon Sep 17 00:00:00 2001 From: mulugetam Date: Sat, 29 Mar 2025 06:50:04 -0700 Subject: [PATCH 119/550] Bump ZSTD lib to version 1.5.6-1. (#17674) * Bump ZSTD lib to version 1.5.6-1. Signed-off-by: Mulugeta Mammo * Update CHANGELOG.md for ZSTD version bump. Signed-off-by: Mulugeta Mammo * Update zstd-jni SHA. Signed-off-by: Mulugeta Mammo --------- Signed-off-by: Mulugeta Mammo Signed-off-by: mulugetam --- CHANGELOG.md | 1 + gradle/libs.versions.toml | 2 +- libs/compress/licenses/zstd-jni-1.5.5-5.jar.sha1 | 1 - libs/compress/licenses/zstd-jni-1.5.6-1.jar.sha1 | 1 + server/licenses/zstd-jni-1.5.5-5.jar.sha1 | 1 - server/licenses/zstd-jni-1.5.6-1.jar.sha1 | 1 + server/src/main/java/org/opensearch/bootstrap/Security.java | 2 +- .../src/test/java/org/opensearch/bootstrap/SecurityTests.java | 4 ++-- 8 files changed, 7 insertions(+), 6 deletions(-) delete mode 100644 libs/compress/licenses/zstd-jni-1.5.5-5.jar.sha1 create mode 100644 libs/compress/licenses/zstd-jni-1.5.6-1.jar.sha1 delete mode 100644 server/licenses/zstd-jni-1.5.5-5.jar.sha1 create mode 100644 server/licenses/zstd-jni-1.5.6-1.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 890d3f2b470d4..0bb3eed67949f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.azure:azure-storage-blob` from 12.29.1 to 12.30.0 ([#17667](https://github.com/opensearch-project/OpenSearch/pull/17667)) - Bump `tj-actions/changed-files` from 46.0.1 to 46.0.3 ([#17666](https://github.com/opensearch-project/OpenSearch/pull/17666)) - Bump `com.google.code.gson:gson` from 2.11.0 to 2.12.1 ([#17668](https://github.com/opensearch-project/OpenSearch/pull/17668)) +- Bump `com.github.luben:zstd-jni` from 1.5.5-1 to 1.5.6-1 ([#17674](https://github.com/opensearch-project/OpenSearch/pull/17674)) ### Changed diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index e0a34482e2d7b..cf4b66e2a2986 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -78,7 +78,7 @@ bytebuddy = "1.17.4" jmh = "1.35" # compression -zstd = "1.5.5-5" +zstd = "1.5.6-1" jzlib = "1.1.3" diff --git a/libs/compress/licenses/zstd-jni-1.5.5-5.jar.sha1 b/libs/compress/licenses/zstd-jni-1.5.5-5.jar.sha1 deleted file mode 100644 index 498c60c34e3da..0000000000000 --- a/libs/compress/licenses/zstd-jni-1.5.5-5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -74ffdc5f140080adacf5278287aadd950179f848 \ No newline at end of file diff --git a/libs/compress/licenses/zstd-jni-1.5.6-1.jar.sha1 b/libs/compress/licenses/zstd-jni-1.5.6-1.jar.sha1 new file mode 100644 index 0000000000000..0b0e63e9e598d --- /dev/null +++ b/libs/compress/licenses/zstd-jni-1.5.6-1.jar.sha1 @@ -0,0 +1 @@ +d6c1fbcf83d1d125fb8dbc7e3c5208729057d9a7 diff --git a/server/licenses/zstd-jni-1.5.5-5.jar.sha1 b/server/licenses/zstd-jni-1.5.5-5.jar.sha1 deleted file mode 100644 index 498c60c34e3da..0000000000000 --- a/server/licenses/zstd-jni-1.5.5-5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -74ffdc5f140080adacf5278287aadd950179f848 \ No newline at end of file diff --git a/server/licenses/zstd-jni-1.5.6-1.jar.sha1 b/server/licenses/zstd-jni-1.5.6-1.jar.sha1 new file mode 100644 index 0000000000000..21ecbb2972e33 --- /dev/null +++ b/server/licenses/zstd-jni-1.5.6-1.jar.sha1 @@ -0,0 +1 @@ +d6c1fbcf83d1d125fb8dbc7e3c5208729057d9a7 \ No newline at end of file diff --git a/server/src/main/java/org/opensearch/bootstrap/Security.java b/server/src/main/java/org/opensearch/bootstrap/Security.java index acf2d7ec6a5ac..9c93b0414bdda 100644 --- a/server/src/main/java/org/opensearch/bootstrap/Security.java +++ b/server/src/main/java/org/opensearch/bootstrap/Security.java @@ -256,7 +256,7 @@ static Policy readPolicy(URL policyFile, Map codebases) { // - netty-tcnative-boringssl-static-2.0.61.Final-linux-x86_64.jar // - kafka-server-common-3.6.1-test.jar // - lucene-core-9.11.0-snapshot-8a555eb.jar - // - zstd-jni-1.5.5-5.jar + // - zstd-jni-1.5.6-1.jar jarsWithPossibleClassifiers.put(codebase, matcher.group(2)); } else { String property = "codebase." + name; diff --git a/server/src/test/java/org/opensearch/bootstrap/SecurityTests.java b/server/src/test/java/org/opensearch/bootstrap/SecurityTests.java index ae57a1b0916cb..9bd5e46fe50a5 100644 --- a/server/src/test/java/org/opensearch/bootstrap/SecurityTests.java +++ b/server/src/test/java/org/opensearch/bootstrap/SecurityTests.java @@ -96,8 +96,8 @@ public void testReadPolicyWithCodebases() throws IOException { new URL("file://test-kafka-server-common-3.6.1-test.jar"), "test-lucene-core-9.11.0-snapshot-8a555eb.jar", new URL("file://test-lucene-core-9.11.0-snapshot-8a555eb.jar"), - "test-zstd-jni-1.5.5-5.jar", - new URL("file://test-zstd-jni-1.5.5-5.jar") + "test-zstd-jni-1.5.6-1.jar", + new URL("file://test-zstd-jni-1.5.6-1.jar") ); AccessController.doPrivileged( From fc78200cd33121c2a7f52250b19816870495dc26 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 31 Mar 2025 13:13:34 -0500 Subject: [PATCH 120/550] Bump org.jruby.joni:joni from 2.2.5 to 2.2.6 in /libs/grok (#17732) * Bump org.jruby.joni:joni from 2.2.5 to 2.2.6 in /libs/grok Bumps [org.jruby.joni:joni](https://github.com/jruby/joni) from 2.2.5 to 2.2.6. - [Commits](https://github.com/jruby/joni/compare/joni-2.2.5...joni-2.2.6) --- updated-dependencies: - dependency-name: org.jruby.joni:joni dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- CHANGELOG.md | 2 +- libs/grok/build.gradle | 2 +- libs/grok/licenses/joni-2.2.5.jar.sha1 | 1 - libs/grok/licenses/joni-2.2.6.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 3 deletions(-) delete mode 100644 libs/grok/licenses/joni-2.2.5.jar.sha1 create mode 100644 libs/grok/licenses/joni-2.2.6.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 0bb3eed67949f..f52eb1b77ee98 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,7 +21,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.nimbusds:nimbus-jose-jwt` from 9.41.1 to 10.0.2 ([#17607](https://github.com/opensearch-project/OpenSearch/pull/17607), [#17669](https://github.com/opensearch-project/OpenSearch/pull/17669)) - Bump `com.google.api:api-common` from 1.8.1 to 2.46.1 ([#17604](https://github.com/opensearch-project/OpenSearch/pull/17604)) - Bump `ch.qos.logback:logback-core` from 1.5.16 to 1.5.17 ([#17609](https://github.com/opensearch-project/OpenSearch/pull/17609)) -- Bump `org.jruby.joni:joni` from 2.2.3 to 2.2.5 ([#17608](https://github.com/opensearch-project/OpenSearch/pull/17608)) +- Bump `org.jruby.joni:joni` from 2.2.3 to 2.2.6 ([#17608](https://github.com/opensearch-project/OpenSearch/pull/17608), [#17732](https://github.com/opensearch-project/OpenSearch/pull/17732)) - Bump `dangoslen/dependabot-changelog-helper` from 3 to 4 ([#17498](https://github.com/opensearch-project/OpenSearch/pull/17498)) - Bump `com.google.api:gax` from 2.35.0 to 2.63.1 ([#17465](https://github.com/opensearch-project/OpenSearch/pull/17465)) - Bump `com.azure:azure-storage-blob` from 12.29.1 to 12.30.0 ([#17667](https://github.com/opensearch-project/OpenSearch/pull/17667)) diff --git a/libs/grok/build.gradle b/libs/grok/build.gradle index 97c68177a1164..a23f518ee9c5a 100644 --- a/libs/grok/build.gradle +++ b/libs/grok/build.gradle @@ -29,7 +29,7 @@ */ dependencies { - api 'org.jruby.joni:joni:2.2.5' + api 'org.jruby.joni:joni:2.2.6' // joni dependencies: api 'org.jruby.jcodings:jcodings:1.0.63' diff --git a/libs/grok/licenses/joni-2.2.5.jar.sha1 b/libs/grok/licenses/joni-2.2.5.jar.sha1 deleted file mode 100644 index 060581a9b8a28..0000000000000 --- a/libs/grok/licenses/joni-2.2.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4ebafe67efa7395678a34d07e7585bed5ef0cc72 \ No newline at end of file diff --git a/libs/grok/licenses/joni-2.2.6.jar.sha1 b/libs/grok/licenses/joni-2.2.6.jar.sha1 new file mode 100644 index 0000000000000..c097a8c30bb51 --- /dev/null +++ b/libs/grok/licenses/joni-2.2.6.jar.sha1 @@ -0,0 +1 @@ +864217ab7dcece78ad7bf3c045a602d871ff309a \ No newline at end of file From 6a24a5576904999f3ec07a840438129a030a8aef Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 31 Mar 2025 13:14:25 -0500 Subject: [PATCH 121/550] Bump lycheeverse/lychee-action from 2.3.0 to 2.4.0 (#17731) * Bump lycheeverse/lychee-action from 2.3.0 to 2.4.0 Bumps [lycheeverse/lychee-action](https://github.com/lycheeverse/lychee-action) from 2.3.0 to 2.4.0. - [Release notes](https://github.com/lycheeverse/lychee-action/releases) - [Commits](https://github.com/lycheeverse/lychee-action/compare/v2.3.0...v2.4.0) --- updated-dependencies: - dependency-name: lycheeverse/lychee-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/links.yml | 2 +- CHANGELOG.md | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index 53a7f2370e5ad..3b6e13f89e10a 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -13,7 +13,7 @@ jobs: - uses: actions/checkout@v4 - name: lychee Link Checker id: lychee - uses: lycheeverse/lychee-action@v2.3.0 + uses: lycheeverse/lychee-action@v2.4.0 with: args: --accept=200,403,429 --exclude-mail **/*.html **/*.md **/*.txt **/*.json --exclude-file .lychee.excludes fail: true diff --git a/CHANGELOG.md b/CHANGELOG.md index f52eb1b77ee98..47c2ef9dd5033 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,6 +28,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `tj-actions/changed-files` from 46.0.1 to 46.0.3 ([#17666](https://github.com/opensearch-project/OpenSearch/pull/17666)) - Bump `com.google.code.gson:gson` from 2.11.0 to 2.12.1 ([#17668](https://github.com/opensearch-project/OpenSearch/pull/17668)) - Bump `com.github.luben:zstd-jni` from 1.5.5-1 to 1.5.6-1 ([#17674](https://github.com/opensearch-project/OpenSearch/pull/17674)) +- Bump `lycheeverse/lychee-action` from 2.3.0 to 2.4.0 ([#17731](https://github.com/opensearch-project/OpenSearch/pull/17731)) ### Changed From 10fb8527e64dabbdc0a50ba0aae10ff30faecb8f Mon Sep 17 00:00:00 2001 From: Varun Bharadwaj Date: Mon, 31 Mar 2025 11:57:59 -0700 Subject: [PATCH 122/550] Add ingestion management APIs for pause, resume and get state (#17631) Signed-off-by: Varun Bharadwaj --- CHANGELOG.md | 3 +- .../missingdoclet/MissingDoclet.java | 3 +- .../plugin/kafka/IngestFromKafkaIT.java | 2 +- .../plugin/kafka/KafkaIngestionBaseIT.java | 49 +++- .../plugin/kafka/RemoteStoreKafkaIT.java | 156 ++++++++++++ .../kafka/TestContainerThreadLeakFilter.java | 6 +- .../plugin/kinesis/IngestFromKinesisIT.java | 2 +- .../org/opensearch/action/ActionModule.java | 22 ++ .../IngestionStateShardFailure.java | 67 ++++++ .../IngestionUpdateStateResponse.java | 106 +++++++++ .../streamingingestion/package-info.java | 10 + .../pause/PauseIngestionAction.java | 26 ++ .../pause/PauseIngestionRequest.java | 100 ++++++++ .../pause/PauseIngestionResponse.java | 38 +++ .../pause/TransportPauseIngestionAction.java | 147 ++++++++++++ .../pause/package-info.java | 10 + .../resume/ResumeIngestionAction.java | 26 ++ .../resume/ResumeIngestionRequest.java | 172 +++++++++++++ .../resume/ResumeIngestionResponse.java | 38 +++ .../TransportResumeIngestionAction.java | 147 ++++++++++++ .../resume/package-info.java | 10 + .../state/GetIngestionStateAction.java | 26 ++ .../state/GetIngestionStateRequest.java | 135 +++++++++++ .../state/GetIngestionStateResponse.java | 104 ++++++++ .../state/ShardIngestionState.java | 96 ++++++++ .../TransportGetIngestionStateAction.java | 225 ++++++++++++++++++ .../TransportUpdateIngestionStateAction.java | 144 +++++++++++ .../state/UpdateIngestionStateAction.java | 26 ++ .../state/UpdateIngestionStateRequest.java | 87 +++++++ .../state/UpdateIngestionStateResponse.java | 78 ++++++ .../state/package-info.java | 10 + .../pagination/ShardPaginationStrategy.java | 22 +- .../cluster/metadata/IndexMetadata.java | 52 +++- .../cluster/metadata/IngestionStatus.java | 36 +++ ...etadataStreamingIngestionStateService.java | 170 +++++++++++++ .../index/engine/IngestionEngine.java | 34 ++- .../shard/AbstractIndexShardComponent.java | 5 + .../opensearch/index/shard/IndexShard.java | 47 ++++ .../cluster/IndicesClusterStateService.java | 18 ++ .../BlockIngestionErrorStrategy.java | 6 + .../pollingingest/DefaultStreamPoller.java | 19 +- .../DropIngestionErrorStrategy.java | 5 + .../pollingingest/IngestionErrorStrategy.java | 5 + .../indices/pollingingest/StreamPoller.java | 2 + .../org/opensearch/rest/BaseRestHandler.java | 21 ++ .../indices/RestGetIngestionStateAction.java | 72 ++++++ .../indices/RestPauseIngestionAction.java | 61 +++++ .../indices/RestResumeIngestionAction.java | 59 +++++ .../transport/client/IndicesAdminClient.java | 24 ++ .../opensearch/transport/client/Requests.java | 35 +++ .../client/support/AbstractClient.java | 45 ++++ .../IngestionUpdateStateResponseTests.java | 57 +++++ .../pause/PauseIngestionRequestTests.java | 47 ++++ .../resume/ResumeIngestionRequestTests.java | 62 +++++ .../state/GetIngestionStateRequestTests.java | 47 ++++ .../state/GetIngestionStateResponseTests.java | 39 +++ .../state/ShardIngestionStateTests.java | 74 ++++++ ...TransportGetIngestionStateActionTests.java | 182 ++++++++++++++ ...nsportUpdateIngestionStateActionTests.java | 175 ++++++++++++++ .../UpdateIngestionStateRequestTests.java | 58 +++++ .../UpdateIngestionStateResponseTests.java | 46 ++++ .../DefaultStreamPollerTests.java | 24 +- 62 files changed, 3591 insertions(+), 29 deletions(-) create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/IngestionStateShardFailure.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/IngestionUpdateStateResponse.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/pause/PauseIngestionAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/pause/PauseIngestionRequest.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/pause/PauseIngestionResponse.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/pause/TransportPauseIngestionAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/pause/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/resume/ResumeIngestionAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/resume/ResumeIngestionRequest.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/resume/ResumeIngestionResponse.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/resume/TransportResumeIngestionAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/resume/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/state/GetIngestionStateAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/state/GetIngestionStateRequest.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/state/GetIngestionStateResponse.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/state/ShardIngestionState.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/state/TransportGetIngestionStateAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/state/TransportUpdateIngestionStateAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/state/UpdateIngestionStateAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/state/UpdateIngestionStateRequest.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/state/UpdateIngestionStateResponse.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/state/package-info.java create mode 100644 server/src/main/java/org/opensearch/cluster/metadata/IngestionStatus.java create mode 100644 server/src/main/java/org/opensearch/cluster/metadata/MetadataStreamingIngestionStateService.java create mode 100644 server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetIngestionStateAction.java create mode 100644 server/src/main/java/org/opensearch/rest/action/admin/indices/RestPauseIngestionAction.java create mode 100644 server/src/main/java/org/opensearch/rest/action/admin/indices/RestResumeIngestionAction.java create mode 100644 server/src/test/java/org/opensearch/action/admin/indices/streamingingestion/IngestionUpdateStateResponseTests.java create mode 100644 server/src/test/java/org/opensearch/action/admin/indices/streamingingestion/pause/PauseIngestionRequestTests.java create mode 100644 server/src/test/java/org/opensearch/action/admin/indices/streamingingestion/resume/ResumeIngestionRequestTests.java create mode 100644 server/src/test/java/org/opensearch/action/admin/indices/streamingingestion/state/GetIngestionStateRequestTests.java create mode 100644 server/src/test/java/org/opensearch/action/admin/indices/streamingingestion/state/GetIngestionStateResponseTests.java create mode 100644 server/src/test/java/org/opensearch/action/admin/indices/streamingingestion/state/ShardIngestionStateTests.java create mode 100644 server/src/test/java/org/opensearch/action/admin/indices/streamingingestion/state/TransportGetIngestionStateActionTests.java create mode 100644 server/src/test/java/org/opensearch/action/admin/indices/streamingingestion/state/TransportUpdateIngestionStateActionTests.java create mode 100644 server/src/test/java/org/opensearch/action/admin/indices/streamingingestion/state/UpdateIngestionStateRequestTests.java create mode 100644 server/src/test/java/org/opensearch/action/admin/indices/streamingingestion/state/UpdateIngestionStateResponseTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 47c2ef9dd5033..f7d0cd93d7005 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,10 +9,11 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Rule Based Auto-tagging] Add rule schema for auto tagging ([#17238](https://github.com/opensearch-project/OpenSearch/pull/17238)) - Renaming the node role search to warm ([#17573](https://github.com/opensearch-project/OpenSearch/pull/17573)) - Introduce a new search node role to hold search only shards ([#17620](https://github.com/opensearch-project/OpenSearch/pull/17620)) -- Fix systemd integTest on deb regarding path ownership check ([#17641](https://github.com/opensearch-project/OpenSearch/pull/17641)) +- Fix systemd integTest on deb regarding path ownership check ([#17641](https://github.com/opensearch-project/OpenSearch/pull/17641)) - Add dfs transformation function in XContentMapValues ([#17612](https://github.com/opensearch-project/OpenSearch/pull/17612)) - Added Kinesis support as a plugin for the pull-based ingestion ([#17615](https://github.com/opensearch-project/OpenSearch/pull/17615)) - [Security Manager Replacement] Create initial Java Agent to intercept Socket::connect calls ([#17724](https://github.com/opensearch-project/OpenSearch/pull/17724)) +- Add ingestion management APIs for pause, resume and get ingestion state ([#17631](https://github.com/opensearch-project/OpenSearch/pull/17631)) ### Changed - Migrate BC libs to their FIPS counterparts ([#14912](https://github.com/opensearch-project/OpenSearch/pull/14912)) diff --git a/doc-tools/missing-doclet/src/main/java/org/opensearch/missingdoclet/MissingDoclet.java b/doc-tools/missing-doclet/src/main/java/org/opensearch/missingdoclet/MissingDoclet.java index 470ab75b7930c..5536e91a51dd5 100644 --- a/doc-tools/missing-doclet/src/main/java/org/opensearch/missingdoclet/MissingDoclet.java +++ b/doc-tools/missing-doclet/src/main/java/org/opensearch/missingdoclet/MissingDoclet.java @@ -235,6 +235,7 @@ private void check(Element element) { case INTERFACE: case ENUM: case ANNOTATION_TYPE: + case RECORD: if (level(element) >= CLASS) { checkComment(element); for (var subElement : element.getEnclosedElements()) { @@ -343,7 +344,7 @@ private boolean isGenerated(Element element) { if (!isGenerated && element.getEnclosingElement() != null) { // check if enclosing element is generated return isGenerated(element.getEnclosingElement()); - } + } return isGenerated; } diff --git a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/IngestFromKafkaIT.java b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/IngestFromKafkaIT.java index f890d913ad8cf..86d8710f4daab 100644 --- a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/IngestFromKafkaIT.java +++ b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/IngestFromKafkaIT.java @@ -32,7 +32,7 @@ import static org.awaitility.Awaitility.await; /** - * Integration test for Kafka ingestion + * Integration test for Kafka ingestion. */ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class IngestFromKafkaIT extends KafkaIngestionBaseIT { diff --git a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/KafkaIngestionBaseIT.java b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/KafkaIngestionBaseIT.java index cfc9b4a766fcd..a9ae195332117 100644 --- a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/KafkaIngestionBaseIT.java +++ b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/KafkaIngestionBaseIT.java @@ -14,11 +14,16 @@ import org.apache.kafka.clients.producer.Producer; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.common.serialization.StringSerializer; +import org.opensearch.action.admin.indices.streamingingestion.pause.PauseIngestionResponse; +import org.opensearch.action.admin.indices.streamingingestion.resume.ResumeIngestionResponse; +import org.opensearch.action.admin.indices.streamingingestion.state.GetIngestionStateResponse; +import org.opensearch.action.pagination.PageParams; import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.transport.client.Requests; import org.junit.After; import org.junit.Before; @@ -28,6 +33,7 @@ import java.util.Locale; import java.util.Properties; import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import org.testcontainers.containers.KafkaContainer; @@ -45,6 +51,7 @@ public class KafkaIngestionBaseIT extends OpenSearchIntegTestCase { protected KafkaContainer kafka; protected Producer producer; + protected int numKafkaPartitions = 1; @Override protected Collection> nodePlugins() { @@ -53,7 +60,7 @@ protected Collection> nodePlugins() { @Before private void setup() { - setupKafka(); + setupKafka(numKafkaPartitions); } @After @@ -61,7 +68,7 @@ private void cleanup() { stopKafka(); } - private void setupKafka() { + private void setupKafka(int numKafkaPartitions) { kafka = new KafkaContainer(DockerImageName.parse("confluentinc/cp-kafka:6.2.1")) // disable topic auto creation .withEnv("KAFKA_AUTO_CREATE_TOPICS_ENABLE", "false"); @@ -69,7 +76,7 @@ private void setupKafka() { // setup producer String boostrapServers = kafka.getBootstrapServers(); - KafkaUtils.createTopic(topicName, 1, boostrapServers); + KafkaUtils.createTopic(topicName, numKafkaPartitions, boostrapServers); Properties props = new Properties(); props.put("bootstrap.servers", kafka.getBootstrapServers()); producer = new KafkaProducer<>(props, new StringSerializer(), new StringSerializer()); @@ -112,6 +119,11 @@ protected void waitForSearchableDocs(long docCount, List nodes) throws E }, 1, TimeUnit.MINUTES); } + protected long getSearchableDocCount(String node) throws Exception { + final SearchResponse response = client(node).prepareSearch(indexName).setSize(0).setPreference("_only_local").get(); + return response.getHits().getTotalHits().value(); + } + protected void waitForState(Callable checkState) throws Exception { assertBusy(() -> { if (checkState.call() == false) { @@ -124,7 +136,33 @@ protected String getSettings(String indexName, String setting) { return client().admin().indices().prepareGetSettings(indexName).get().getSetting(indexName, setting); } + protected GetIngestionStateResponse getIngestionState(String indexName) throws ExecutionException, InterruptedException { + return client().admin().indices().getIngestionState(Requests.getIngestionStateRequest(indexName)).get(); + } + + protected GetIngestionStateResponse getIngestionState(String[] indexNames, int[] shards) throws ExecutionException, + InterruptedException { + return client().admin().indices().getIngestionState(Requests.getIngestionStateRequest(indexNames, shards, null)).get(); + } + + protected GetIngestionStateResponse getIngestionState(String[] indexNames, int[] shards, PageParams pageParams) + throws ExecutionException, InterruptedException { + return client().admin().indices().getIngestionState(Requests.getIngestionStateRequest(indexNames, shards, pageParams)).get(); + } + + protected PauseIngestionResponse pauseIngestion(String indexName) throws ExecutionException, InterruptedException { + return client().admin().indices().pauseIngestion(Requests.pauseIngestionRequest(indexName)).get(); + } + + protected ResumeIngestionResponse resumeIngestion(String indexName) throws ExecutionException, InterruptedException { + return client().admin().indices().resumeIngestion(Requests.resumeIngestionRequest(indexName)).get(); + } + protected void createIndexWithDefaultSettings(int numShards, int numReplicas) { + createIndexWithDefaultSettings(indexName, numShards, numReplicas); + } + + protected void createIndexWithDefaultSettings(String indexName, int numShards, int numReplicas) { createIndex( indexName, Settings.builder() @@ -142,4 +180,9 @@ protected void createIndexWithDefaultSettings(int numShards, int numReplicas) { "{\"properties\":{\"name\":{\"type\": \"text\"},\"age\":{\"type\": \"integer\"}}}}" ); } + + protected void recreateKafkaTopics(int numKafkaPartitions) { + cleanup(); + setupKafka(numKafkaPartitions); + } } diff --git a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/RemoteStoreKafkaIT.java b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/RemoteStoreKafkaIT.java index ad4bc2bf77071..54adeaa1396e5 100644 --- a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/RemoteStoreKafkaIT.java +++ b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/RemoteStoreKafkaIT.java @@ -8,7 +8,13 @@ package org.opensearch.plugin.kafka; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.opensearch.action.admin.indices.streamingingestion.pause.PauseIngestionResponse; +import org.opensearch.action.admin.indices.streamingingestion.resume.ResumeIngestionResponse; +import org.opensearch.action.admin.indices.streamingingestion.state.GetIngestionStateResponse; +import org.opensearch.action.pagination.PageParams; import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.allocation.command.AllocateReplicaAllocationCommand; @@ -19,7 +25,11 @@ import org.opensearch.transport.client.Requests; import java.nio.file.Path; +import java.util.ArrayList; import java.util.Arrays; +import java.util.List; +import java.util.Set; +import java.util.concurrent.ExecutionException; import static org.hamcrest.Matchers.is; @@ -27,6 +37,7 @@ * Integration tests for segment replication with remote store using kafka as ingestion source. */ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +@ThreadLeakFilters(filters = TestContainerThreadLeakFilter.class) public class RemoteStoreKafkaIT extends KafkaIngestionBaseIT { private static final String REPOSITORY_NAME = "test-remote-store-repo"; private Path absolutePath; @@ -154,6 +165,151 @@ public void testErrorStrategy() throws Exception { waitForSearchableDocs(2, Arrays.asList(node)); } + public void testPauseAndResumeIngestion() throws Exception { + // setup nodes and index + produceData("1", "name1", "24"); + produceData("2", "name2", "20"); + internalCluster().startClusterManagerOnlyNode(); + final String nodeA = internalCluster().startDataOnlyNode(); + final String nodeB = internalCluster().startDataOnlyNode(); + + createIndexWithDefaultSettings(1, 1); + ensureGreen(indexName); + waitForSearchableDocs(2, Arrays.asList(nodeA, nodeB)); + + // pause ingestion + PauseIngestionResponse pauseResponse = pauseIngestion(indexName); + assertTrue(pauseResponse.isAcknowledged()); + assertTrue(pauseResponse.isShardsAcknowledged()); + waitForState(() -> { + GetIngestionStateResponse ingestionState = getIngestionState(indexName); + return Arrays.stream(ingestionState.getShardStates()) + .allMatch(state -> state.isPollerPaused() && state.pollerState().equalsIgnoreCase("paused")); + }); + + // verify ingestion state is persisted + produceData("3", "name3", "30"); + produceData("4", "name4", "31"); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodeA)); + ensureYellowAndNoInitializingShards(indexName); + assertTrue(nodeB.equals(primaryNodeName(indexName))); + + final String nodeC = internalCluster().startDataOnlyNode(); + client().admin().cluster().prepareReroute().add(new AllocateReplicaAllocationCommand(indexName, 0, nodeC)).get(); + ensureGreen(indexName); + assertTrue(nodeC.equals(replicaNodeName(indexName))); + assertEquals(2, getSearchableDocCount(nodeB)); + waitForState(() -> { + GetIngestionStateResponse ingestionState = getIngestionState(indexName); + return Arrays.stream(ingestionState.getShardStates()) + .allMatch(state -> state.isPollerPaused() && state.pollerState().equalsIgnoreCase("paused")); + }); + + // resume ingestion + ResumeIngestionResponse resumeResponse = resumeIngestion(indexName); + assertTrue(resumeResponse.isAcknowledged()); + assertTrue(resumeResponse.isShardsAcknowledged()); + waitForState(() -> { + GetIngestionStateResponse ingestionState = getIngestionState(indexName); + return Arrays.stream(ingestionState.getShardStates()) + .allMatch( + state -> state.isPollerPaused() == false + && (state.pollerState().equalsIgnoreCase("polling") || state.pollerState().equalsIgnoreCase("processing")) + ); + }); + waitForSearchableDocs(4, Arrays.asList(nodeB, nodeC)); + } + + public void testDefaultGetIngestionState() throws ExecutionException, InterruptedException { + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNode(); + internalCluster().startDataOnlyNode(); + createIndexWithDefaultSettings(1, 1); + ensureGreen(indexName); + + GetIngestionStateResponse ingestionState = getIngestionState(new String[] { indexName }, new int[] { 0 }); + assertEquals(0, ingestionState.getFailedShards()); + assertEquals(1, ingestionState.getSuccessfulShards()); + assertEquals(1, ingestionState.getTotalShards()); + assertEquals(1, ingestionState.getShardStates().length); + assertEquals(0, ingestionState.getShardStates()[0].shardId()); + assertEquals("POLLING", ingestionState.getShardStates()[0].pollerState()); + assertEquals("DROP", ingestionState.getShardStates()[0].errorPolicy()); + assertFalse(ingestionState.getShardStates()[0].isPollerPaused()); + + GetIngestionStateResponse ingestionStateForInvalidShard = getIngestionState(new String[] { indexName }, new int[] { 1 }); + assertEquals(0, ingestionStateForInvalidShard.getTotalShards()); + } + + public void testPaginatedGetIngestionState() throws ExecutionException, InterruptedException { + recreateKafkaTopics(5); + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNode(); + internalCluster().startDataOnlyNode(); + createIndexWithDefaultSettings("index1", 5, 0); + createIndexWithDefaultSettings("index2", 5, 0); + ensureGreen("index1"); + ensureGreen("index2"); + + List ingestionStateResponseList = new ArrayList<>(); + GetIngestionStateResponse ingestionStatePage = null; + while (ingestionStatePage == null || ingestionStatePage.getNextPageToken() != null) { + String nextToken = ingestionStatePage == null ? null : ingestionStatePage.getNextPageToken(); + PageParams pageParams = new PageParams(nextToken, "asc", 3); + ingestionStatePage = getIngestionState(new String[] { "index1", "index2" }, new int[] { 0, 1, 2, 3, 4 }, pageParams); + ingestionStateResponseList.add(ingestionStatePage); + } + + // we have 2 index, each with 5 shards, total of 10 shards + // for page size of 3, we expect 4 pages in total + assertEquals(4, ingestionStateResponseList.size()); + + // validate page 1 + GetIngestionStateResponse responsePage1 = ingestionStateResponseList.get(0); + assertEquals(3, responsePage1.getTotalShards()); + assertEquals(3, responsePage1.getSuccessfulShards()); + assertEquals(3, responsePage1.getShardStates().length); + assertTrue(Arrays.stream(responsePage1.getShardStates()).allMatch(shardIngestionState -> { + boolean shardsMatch = Set.of(0, 1, 2).contains(shardIngestionState.shardId()); + boolean indexMatch = "index1".equalsIgnoreCase(shardIngestionState.index()); + return indexMatch && shardsMatch; + })); + + // validate page 2 + GetIngestionStateResponse responsePage2 = ingestionStateResponseList.get(1); + assertEquals(3, responsePage2.getTotalShards()); + assertEquals(3, responsePage2.getSuccessfulShards()); + assertEquals(3, responsePage2.getShardStates().length); + assertTrue(Arrays.stream(responsePage2.getShardStates()).allMatch(shardIngestionState -> { + boolean matchIndex1 = Set.of(3, 4).contains(shardIngestionState.shardId()) + && "index1".equalsIgnoreCase(shardIngestionState.index()); + boolean matchIndex2 = shardIngestionState.shardId() == 0 && "index2".equalsIgnoreCase(shardIngestionState.index()); + return matchIndex1 || matchIndex2; + })); + + // validate page 3 + GetIngestionStateResponse responsePage3 = ingestionStateResponseList.get(2); + assertEquals(3, responsePage3.getTotalShards()); + assertEquals(3, responsePage3.getSuccessfulShards()); + assertEquals(3, responsePage3.getShardStates().length); + assertTrue(Arrays.stream(responsePage3.getShardStates()).allMatch(shardIngestionState -> { + boolean shardsMatch = Set.of(1, 2, 3).contains(shardIngestionState.shardId()); + boolean indexMatch = "index2".equalsIgnoreCase(shardIngestionState.index()); + return indexMatch && shardsMatch; + })); + + // validate page 4 + GetIngestionStateResponse responsePage4 = ingestionStateResponseList.get(3); + assertEquals(1, responsePage4.getTotalShards()); + assertEquals(1, responsePage4.getSuccessfulShards()); + assertEquals(1, responsePage4.getShardStates().length); + assertTrue(Arrays.stream(responsePage4.getShardStates()).allMatch(shardIngestionState -> { + boolean shardsMatch = shardIngestionState.shardId() == 4; + boolean indexMatch = "index2".equalsIgnoreCase(shardIngestionState.index()); + return indexMatch && shardsMatch; + })); + } + private void verifyRemoteStoreEnabled(String node) { GetSettingsResponse settingsResponse = client(node).admin().indices().prepareGetSettings(indexName).get(); String remoteStoreEnabled = settingsResponse.getIndexToSettings().get(indexName).get("index.remote_store.enabled"); diff --git a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/TestContainerThreadLeakFilter.java b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/TestContainerThreadLeakFilter.java index 91e2c83ebfa48..0389676daa208 100644 --- a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/TestContainerThreadLeakFilter.java +++ b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/TestContainerThreadLeakFilter.java @@ -14,11 +14,13 @@ * The {@link org.testcontainers.images.TimeLimitedLoggedPullImageResultCallback} instance used by test containers, * for example {@link org.testcontainers.containers.KafkaContainer} creates a watcher daemon thread which is never * stopped. This filter excludes that thread from the thread leak detection logic. It also excludes ryuk resource reaper - * thread which is not closed on time. + * thread and pollers which is not closed on time. */ public final class TestContainerThreadLeakFilter implements ThreadFilter { @Override public boolean reject(Thread t) { - return t.getName().startsWith("testcontainers-pull-watchdog-") || t.getName().startsWith("testcontainers-ryuk"); + return t.getName().startsWith("testcontainers-pull-watchdog-") + || t.getName().startsWith("testcontainers-ryuk") + || t.getName().startsWith("stream-poller-consumer"); } } diff --git a/plugins/ingestion-kinesis/src/internalClusterTest/java/org/opensearch/plugin/kinesis/IngestFromKinesisIT.java b/plugins/ingestion-kinesis/src/internalClusterTest/java/org/opensearch/plugin/kinesis/IngestFromKinesisIT.java index ea668ff8bfeec..dbaf5ae8a0f16 100644 --- a/plugins/ingestion-kinesis/src/internalClusterTest/java/org/opensearch/plugin/kinesis/IngestFromKinesisIT.java +++ b/plugins/ingestion-kinesis/src/internalClusterTest/java/org/opensearch/plugin/kinesis/IngestFromKinesisIT.java @@ -132,7 +132,7 @@ public void testKinesisIngestion_RewindByOffset() throws InterruptedException { ); RangeQueryBuilder query = new RangeQueryBuilder("age").gte(0); - await().atMost(10, TimeUnit.SECONDS).untilAsserted(() -> { + await().atMost(60, TimeUnit.SECONDS).untilAsserted(() -> { refresh("test_rewind_by_offset"); SearchResponse response = client().prepareSearch("test_rewind_by_offset").setQuery(query).get(); assertThat(response.getHits().getTotalHits().value(), is(2L)); diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index 2a8a675c6fcea..f1cc400d1a4f8 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -198,6 +198,14 @@ import org.opensearch.action.admin.indices.shrink.TransportResizeAction; import org.opensearch.action.admin.indices.stats.IndicesStatsAction; import org.opensearch.action.admin.indices.stats.TransportIndicesStatsAction; +import org.opensearch.action.admin.indices.streamingingestion.pause.PauseIngestionAction; +import org.opensearch.action.admin.indices.streamingingestion.pause.TransportPauseIngestionAction; +import org.opensearch.action.admin.indices.streamingingestion.resume.ResumeIngestionAction; +import org.opensearch.action.admin.indices.streamingingestion.resume.TransportResumeIngestionAction; +import org.opensearch.action.admin.indices.streamingingestion.state.GetIngestionStateAction; +import org.opensearch.action.admin.indices.streamingingestion.state.TransportGetIngestionStateAction; +import org.opensearch.action.admin.indices.streamingingestion.state.TransportUpdateIngestionStateAction; +import org.opensearch.action.admin.indices.streamingingestion.state.UpdateIngestionStateAction; import org.opensearch.action.admin.indices.template.delete.DeleteComponentTemplateAction; import org.opensearch.action.admin.indices.template.delete.DeleteComposableIndexTemplateAction; import org.opensearch.action.admin.indices.template.delete.DeleteIndexTemplateAction; @@ -401,6 +409,7 @@ import org.opensearch.rest.action.admin.indices.RestGetFieldMappingAction; import org.opensearch.rest.action.admin.indices.RestGetIndexTemplateAction; import org.opensearch.rest.action.admin.indices.RestGetIndicesAction; +import org.opensearch.rest.action.admin.indices.RestGetIngestionStateAction; import org.opensearch.rest.action.admin.indices.RestGetMappingAction; import org.opensearch.rest.action.admin.indices.RestGetSettingsAction; import org.opensearch.rest.action.admin.indices.RestIndexDeleteAliasesAction; @@ -410,6 +419,7 @@ import org.opensearch.rest.action.admin.indices.RestIndicesShardStoresAction; import org.opensearch.rest.action.admin.indices.RestIndicesStatsAction; import org.opensearch.rest.action.admin.indices.RestOpenIndexAction; +import org.opensearch.rest.action.admin.indices.RestPauseIngestionAction; import org.opensearch.rest.action.admin.indices.RestPutComponentTemplateAction; import org.opensearch.rest.action.admin.indices.RestPutComposableIndexTemplateAction; import org.opensearch.rest.action.admin.indices.RestPutIndexTemplateAction; @@ -418,6 +428,7 @@ import org.opensearch.rest.action.admin.indices.RestRefreshAction; import org.opensearch.rest.action.admin.indices.RestResizeHandler; import org.opensearch.rest.action.admin.indices.RestResolveIndexAction; +import org.opensearch.rest.action.admin.indices.RestResumeIngestionAction; import org.opensearch.rest.action.admin.indices.RestRolloverIndexAction; import org.opensearch.rest.action.admin.indices.RestSimulateIndexTemplateAction; import org.opensearch.rest.action.admin.indices.RestSimulateTemplateAction; @@ -806,6 +817,12 @@ public void reg actions.register(GetSearchPipelineAction.INSTANCE, GetSearchPipelineTransportAction.class); actions.register(DeleteSearchPipelineAction.INSTANCE, DeleteSearchPipelineTransportAction.class); + // Pull-based ingestion actions + actions.register(PauseIngestionAction.INSTANCE, TransportPauseIngestionAction.class); + actions.register(ResumeIngestionAction.INSTANCE, TransportResumeIngestionAction.class); + actions.register(GetIngestionStateAction.INSTANCE, TransportGetIngestionStateAction.class); + actions.register(UpdateIngestionStateAction.INSTANCE, TransportUpdateIngestionStateAction.class); + return unmodifiableMap(actions.getRegistry()); } @@ -1041,6 +1058,11 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestGetDecommissionStateAction()); registerHandler.accept(new RestRemoteStoreStatsAction()); registerHandler.accept(new RestRestoreRemoteStoreAction()); + + // pull-based ingestion API + registerHandler.accept(new RestPauseIngestionAction()); + registerHandler.accept(new RestResumeIngestionAction()); + registerHandler.accept(new RestGetIngestionStateAction()); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/IngestionStateShardFailure.java b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/IngestionStateShardFailure.java new file mode 100644 index 0000000000000..ae68b51a2c80c --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/IngestionStateShardFailure.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.streamingingestion; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Indicates ingestion failures at index and shard level. + * + * @opensearch.experimental + */ +@ExperimentalApi +public record IngestionStateShardFailure(String index, int shard, String errorMessage) implements Writeable, ToXContentFragment { + + private static final String SHARD = "shard"; + private static final String ERROR = "error"; + + public IngestionStateShardFailure(StreamInput in) throws IOException { + this(in.readString(), in.readVInt(), in.readString()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(index); + out.writeVInt(shard); + out.writeString(errorMessage); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(SHARD, shard); + builder.field(ERROR, errorMessage); + return builder.endObject(); + } + + /** + * Groups provided shard ingestion state failures by index name. + */ + public static Map> groupShardFailuresByIndex(IngestionStateShardFailure[] shardFailures) { + Map> shardFailuresByIndex = new HashMap<>(); + + for (IngestionStateShardFailure shardFailure : shardFailures) { + shardFailuresByIndex.computeIfAbsent(shardFailure.index(), (index) -> new ArrayList<>()); + shardFailuresByIndex.get(shardFailure.index()).add(shardFailure); + } + + return shardFailuresByIndex; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/IngestionUpdateStateResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/IngestionUpdateStateResponse.java new file mode 100644 index 0000000000000..4ae95556d0c4f --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/IngestionUpdateStateResponse.java @@ -0,0 +1,106 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.streamingingestion; + +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +/** + * Transport response for ingestion state updates. + * + * @opensearch.experimental + */ +@ExperimentalApi +public class IngestionUpdateStateResponse extends AcknowledgedResponse { + protected static final String SHARD_ACK = "shards_acknowledged"; + protected static final String ERROR = "error"; + protected static final String FAILURES = "failures"; + + protected boolean shardsAcknowledged; + protected IngestionStateShardFailure[] shardFailuresList; + protected String errorMessage; + + public IngestionUpdateStateResponse(StreamInput in) throws IOException { + super(in); + shardFailuresList = in.readArray(IngestionStateShardFailure::new, IngestionStateShardFailure[]::new); + errorMessage = in.readString(); + shardsAcknowledged = in.readBoolean(); + } + + public IngestionUpdateStateResponse( + final boolean acknowledged, + final boolean shardsAcknowledged, + final IngestionStateShardFailure[] shardFailuresList, + String errorMessage + ) { + super(acknowledged); + this.shardFailuresList = shardFailuresList; + this.shardsAcknowledged = shardsAcknowledged; + this.errorMessage = errorMessage; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeArray(shardFailuresList); + out.writeString(errorMessage); + out.writeBoolean(shardsAcknowledged); + } + + @Override + protected void addCustomFields(final XContentBuilder builder, final Params params) throws IOException { + super.addCustomFields(builder, params); + builder.field(SHARD_ACK, shardsAcknowledged); + + if (Strings.isEmpty(errorMessage) == false) { + builder.field(ERROR, errorMessage); + } + + if (shardFailuresList.length > 0) { + Map> shardFailuresByIndex = IngestionStateShardFailure.groupShardFailuresByIndex( + shardFailuresList + ); + builder.startObject(FAILURES); + for (Map.Entry> indexShardFailures : shardFailuresByIndex.entrySet()) { + builder.startArray(indexShardFailures.getKey()); + for (IngestionStateShardFailure shardFailure : indexShardFailures.getValue()) { + shardFailure.toXContent(builder, params); + } + builder.endArray(); + } + builder.endObject(); + } + } + + @Override + public String toString() { + return Strings.toString(MediaTypeRegistry.JSON, this); + } + + public boolean isShardsAcknowledged() { + return shardsAcknowledged; + } + + public IngestionStateShardFailure[] getShardFailures() { + return shardFailuresList; + } + + public String getErrorMessage() { + return errorMessage; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/package-info.java new file mode 100644 index 0000000000000..6f5fc33671130 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Streaming ingestion transport handlers. */ +package org.opensearch.action.admin.indices.streamingingestion; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/pause/PauseIngestionAction.java b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/pause/PauseIngestionAction.java new file mode 100644 index 0000000000000..69c77ad101d81 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/pause/PauseIngestionAction.java @@ -0,0 +1,26 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.streamingingestion.pause; + +import org.opensearch.action.ActionType; + +/** + * Transport action for pausing ingestion. + * + * @opensearch.experimental + */ +public class PauseIngestionAction extends ActionType { + + public static final PauseIngestionAction INSTANCE = new PauseIngestionAction(); + public static final String NAME = "indices:admin/ingestion/pause"; + + private PauseIngestionAction() { + super(NAME, PauseIngestionResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/pause/PauseIngestionRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/pause/PauseIngestionRequest.java new file mode 100644 index 0000000000000..fc6e30fb01cde --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/pause/PauseIngestionRequest.java @@ -0,0 +1,100 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.streamingingestion.pause; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.IndicesRequest; +import org.opensearch.action.support.IndicesOptions; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.util.CollectionUtils; + +import java.io.IOException; + +import static org.opensearch.action.ValidateActions.addValidationError; + +/** + * A request to pause ingestion. + * + * @opensearch.experimental + */ +@ExperimentalApi +public class PauseIngestionRequest extends AcknowledgedRequest implements IndicesRequest.Replaceable { + + private String[] indices; + private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen(); + + public PauseIngestionRequest(StreamInput in) throws IOException { + super(in); + this.indices = in.readStringArray(); + this.indicesOptions = IndicesOptions.readIndicesOptions(in); + } + + public PauseIngestionRequest(String[] indices) { + this.indices = indices; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (CollectionUtils.isEmpty(indices)) { + validationException = addValidationError("index is missing", validationException); + } + return validationException; + } + + /** + * Returns the indices to be paused. + */ + @Override + public String[] indices() { + return indices; + } + + /** + * Sets the indices to be paused. + */ + @Override + public PauseIngestionRequest indices(String... indices) { + this.indices = indices; + return this; + } + + /** + * Specifies what type of requested indices to ignore and how to deal with wildcard expressions. + * For example indices that don't exist. + * + * @return the desired behaviour regarding indices to ignore and wildcard indices expressions + */ + @Override + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + /** + * Specifies what type of requested indices to ignore and how to deal wild wildcard expressions. + * For example indices that don't exist. + * + * @param indicesOptions the desired behaviour regarding indices to ignore and wildcard indices expressions + * @return the request itself + */ + public PauseIngestionRequest indicesOptions(IndicesOptions indicesOptions) { + this.indicesOptions = indicesOptions; + return this; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(indices); + indicesOptions.writeIndicesOptions(out); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/pause/PauseIngestionResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/pause/PauseIngestionResponse.java new file mode 100644 index 0000000000000..14b21924ba689 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/pause/PauseIngestionResponse.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.streamingingestion.pause; + +import org.opensearch.action.admin.indices.streamingingestion.IngestionStateShardFailure; +import org.opensearch.action.admin.indices.streamingingestion.IngestionUpdateStateResponse; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.common.io.stream.StreamInput; + +import java.io.IOException; + +/** + * Transport response for pausing ingestion. + * + * @opensearch.experimental + */ +@ExperimentalApi +public class PauseIngestionResponse extends IngestionUpdateStateResponse { + + PauseIngestionResponse(StreamInput in) throws IOException { + super(in); + } + + public PauseIngestionResponse( + final boolean acknowledged, + final boolean shardsAcknowledged, + final IngestionStateShardFailure[] shardFailuresList, + String errorMessage + ) { + super(acknowledged, shardsAcknowledged, shardFailuresList, errorMessage); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/pause/TransportPauseIngestionAction.java b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/pause/TransportPauseIngestionAction.java new file mode 100644 index 0000000000000..aacf67974e5fa --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/pause/TransportPauseIngestionAction.java @@ -0,0 +1,147 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.streamingingestion.pause; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.admin.indices.streamingingestion.IngestionStateShardFailure; +import org.opensearch.action.admin.indices.streamingingestion.state.UpdateIngestionStateRequest; +import org.opensearch.action.admin.indices.streamingingestion.state.UpdateIngestionStateResponse; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.DestructiveOperations; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.metadata.MetadataStreamingIngestionStateService; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.index.Index; +import org.opensearch.tasks.Task; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.Arrays; + +/** + * Pause ingestion transport action. + * + * @opensearch.experimental + */ +public class TransportPauseIngestionAction extends TransportClusterManagerNodeAction { + + private static final Logger logger = LogManager.getLogger(TransportPauseIngestionAction.class); + + private final MetadataStreamingIngestionStateService ingestionStateService; + private final DestructiveOperations destructiveOperations; + + @Inject + public TransportPauseIngestionAction( + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + MetadataStreamingIngestionStateService ingestionStateService, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + DestructiveOperations destructiveOperations + ) { + super( + PauseIngestionAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + PauseIngestionRequest::new, + indexNameExpressionResolver + ); + this.ingestionStateService = ingestionStateService; + this.destructiveOperations = destructiveOperations; + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected PauseIngestionResponse read(StreamInput in) throws IOException { + return new PauseIngestionResponse(in); + } + + @Override + protected void doExecute(Task task, PauseIngestionRequest request, ActionListener listener) { + destructiveOperations.failDestructive(request.indices()); + super.doExecute(task, request, listener); + } + + @Override + protected ClusterBlockException checkBlock(PauseIngestionRequest request, ClusterState state) { + return state.blocks() + .indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndexNames(state, request)); + } + + @Override + protected void clusterManagerOperation( + final PauseIngestionRequest request, + final ClusterState state, + final ActionListener listener + ) { + throw new UnsupportedOperationException("The task parameter is required"); + } + + @Override + protected void clusterManagerOperation( + final Task task, + final PauseIngestionRequest request, + final ClusterState state, + final ActionListener listener + ) throws Exception { + final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); + if (concreteIndices == null || concreteIndices.length == 0) { + listener.onResponse(new PauseIngestionResponse(true, false, new IngestionStateShardFailure[0], "")); + return; + } + + String[] indices = Arrays.stream(concreteIndices).map(Index::getName).toArray(String[]::new); + UpdateIngestionStateRequest updateIngestionStateRequest = new UpdateIngestionStateRequest(indices, new int[0]); + updateIngestionStateRequest.timeout(request.clusterManagerNodeTimeout()); + updateIngestionStateRequest.setIngestionPaused(true); + + ingestionStateService.updateIngestionPollerState( + "pause-ingestion", + concreteIndices, + updateIngestionStateRequest, + new ActionListener<>() { + + @Override + public void onResponse(UpdateIngestionStateResponse updateIngestionStateResponse) { + boolean shardsAcked = updateIngestionStateResponse.isAcknowledged() + && updateIngestionStateResponse.getFailedShards() == 0; + PauseIngestionResponse pauseIngestionResponse = new PauseIngestionResponse( + true, + shardsAcked, + updateIngestionStateResponse.getShardFailureList(), + updateIngestionStateResponse.getErrorMessage() + ); + listener.onResponse(pauseIngestionResponse); + } + + @Override + public void onFailure(Exception e) { + logger.debug("Error pausing ingestion", e); + listener.onFailure(e); + } + } + ); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/pause/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/pause/package-info.java new file mode 100644 index 0000000000000..7ef6f831d5370 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/pause/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Pause ingestion transport handlers. */ +package org.opensearch.action.admin.indices.streamingingestion.pause; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/resume/ResumeIngestionAction.java b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/resume/ResumeIngestionAction.java new file mode 100644 index 0000000000000..0f510ea7f0ec1 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/resume/ResumeIngestionAction.java @@ -0,0 +1,26 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.streamingingestion.resume; + +import org.opensearch.action.ActionType; + +/** + * Transport action for resuming ingestion. + * + * @opensearch.experimental + */ +public class ResumeIngestionAction extends ActionType { + + public static final ResumeIngestionAction INSTANCE = new ResumeIngestionAction(); + public static final String NAME = "indices:admin/ingestion/resume"; + + private ResumeIngestionAction() { + super(NAME, ResumeIngestionResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/resume/ResumeIngestionRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/resume/ResumeIngestionRequest.java new file mode 100644 index 0000000000000..29548c86c107b --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/resume/ResumeIngestionRequest.java @@ -0,0 +1,172 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.streamingingestion.resume; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.IndicesRequest; +import org.opensearch.action.support.IndicesOptions; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.common.util.CollectionUtils; + +import java.io.IOException; +import java.util.Arrays; + +import static org.opensearch.action.ValidateActions.addValidationError; + +/** + * A request to resume ingestion. + * + * @opensearch.experimental + */ +@ExperimentalApi +public class ResumeIngestionRequest extends AcknowledgedRequest implements IndicesRequest.Replaceable { + + private String[] indices; + private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen(); + // todo: support reset settings + private final ResetSettings[] resetSettingsList; + + public ResumeIngestionRequest(StreamInput in) throws IOException { + super(in); + this.indices = in.readStringArray(); + this.indicesOptions = IndicesOptions.readIndicesOptions(in); + this.resetSettingsList = in.readArray(ResetSettings::new, ResetSettings[]::new); + } + + /** + * Constructs a new resume ingestion request. + */ + public ResumeIngestionRequest(String[] indices) { + this(indices, new ResetSettings[0]); + } + + /** + * Constructs a new resume ingestion request with reset settings. + */ + public ResumeIngestionRequest(String[] indices, ResetSettings[] resetSettingsList) { + this.indices = indices; + this.resetSettingsList = resetSettingsList; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (CollectionUtils.isEmpty(indices)) { + validationException = addValidationError("index is missing", validationException); + } + + if (resetSettingsList.length > 0) { + boolean invalidResetSettingsFound = Arrays.stream(resetSettingsList) + .anyMatch( + resetSettings -> resetSettings.getShard() < 0 || resetSettings.getMode() == null || resetSettings.getValue() == null + ); + if (invalidResetSettingsFound) { + validationException = addValidationError("ResetSettings is missing either shard, mode or value", validationException); + } + + // todo: remove this when reset settings support is added + validationException = addValidationError("reset settings is currently not supported", validationException); + } + return validationException; + } + + /** + * The indices to be resumed + */ + @Override + public String[] indices() { + return indices; + } + + /** + * Sets the indices to be resumed + */ + @Override + public ResumeIngestionRequest indices(String... indices) { + this.indices = indices; + return this; + } + + /** + * Specifies what type of requested indices to ignore and how to deal with wildcard expressions. + * For example indices that don't exist. + * + * @return the desired behaviour regarding indices to ignore and wildcard indices expressions + */ + @Override + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + /** + * Specifies what type of requested indices to ignore and how to deal wild wildcard expressions. + * For example indices that don't exist. + * + * @param indicesOptions the desired behaviour regarding indices to ignore and wildcard indices expressions + * @return the request itself + */ + public ResumeIngestionRequest indicesOptions(IndicesOptions indicesOptions) { + this.indicesOptions = indicesOptions; + return this; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(indices); + indicesOptions.writeIndicesOptions(out); + out.writeArray(resetSettingsList); + } + + /** + * Represents reset settings for a given shard to be applied as part of resume operation. + * @opensearch.experimental + */ + @ExperimentalApi + public static class ResetSettings implements Writeable { + private final int shard; + private final String mode; + private final String value; + + public ResetSettings(int shard, String mode, String value) { + this.shard = shard; + this.mode = mode; + this.value = value; + } + + public ResetSettings(StreamInput in) throws IOException { + this.shard = in.readVInt(); + this.mode = in.readString(); + this.value = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(shard); + out.writeString(mode); + out.writeString(value); + } + + public int getShard() { + return shard; + } + + public String getMode() { + return mode; + } + + public String getValue() { + return value; + } + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/resume/ResumeIngestionResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/resume/ResumeIngestionResponse.java new file mode 100644 index 0000000000000..4d9007ee267d8 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/resume/ResumeIngestionResponse.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.streamingingestion.resume; + +import org.opensearch.action.admin.indices.streamingingestion.IngestionStateShardFailure; +import org.opensearch.action.admin.indices.streamingingestion.IngestionUpdateStateResponse; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.common.io.stream.StreamInput; + +import java.io.IOException; + +/** + * Transport response for resume ingestion. + * + * @opensearch.experimental + */ +@ExperimentalApi +public class ResumeIngestionResponse extends IngestionUpdateStateResponse { + + ResumeIngestionResponse(StreamInput in) throws IOException { + super(in); + } + + public ResumeIngestionResponse( + final boolean acknowledged, + final boolean shardsAcknowledged, + final IngestionStateShardFailure[] shardFailuresList, + String errorMessage + ) { + super(acknowledged, shardsAcknowledged, shardFailuresList, errorMessage); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/resume/TransportResumeIngestionAction.java b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/resume/TransportResumeIngestionAction.java new file mode 100644 index 0000000000000..b6ba44b9f529a --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/resume/TransportResumeIngestionAction.java @@ -0,0 +1,147 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.streamingingestion.resume; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.admin.indices.streamingingestion.IngestionStateShardFailure; +import org.opensearch.action.admin.indices.streamingingestion.state.UpdateIngestionStateRequest; +import org.opensearch.action.admin.indices.streamingingestion.state.UpdateIngestionStateResponse; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.DestructiveOperations; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.metadata.MetadataStreamingIngestionStateService; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.index.Index; +import org.opensearch.tasks.Task; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.Arrays; + +/** + * Transport action to resume ingestion. + * + * @opensearch.experimental + */ +public class TransportResumeIngestionAction extends TransportClusterManagerNodeAction { + + private static final Logger logger = LogManager.getLogger(TransportResumeIngestionAction.class); + + private final MetadataStreamingIngestionStateService ingestionStateService; + private final DestructiveOperations destructiveOperations; + + @Inject + public TransportResumeIngestionAction( + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + MetadataStreamingIngestionStateService ingestionStateService, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + DestructiveOperations destructiveOperations + ) { + super( + ResumeIngestionAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + ResumeIngestionRequest::new, + indexNameExpressionResolver + ); + this.ingestionStateService = ingestionStateService; + this.destructiveOperations = destructiveOperations; + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected ResumeIngestionResponse read(StreamInput in) throws IOException { + return new ResumeIngestionResponse(in); + } + + @Override + protected void doExecute(Task task, ResumeIngestionRequest request, ActionListener listener) { + destructiveOperations.failDestructive(request.indices()); + super.doExecute(task, request, listener); + } + + @Override + protected ClusterBlockException checkBlock(ResumeIngestionRequest request, ClusterState state) { + return state.blocks() + .indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndexNames(state, request)); + } + + @Override + protected void clusterManagerOperation( + final ResumeIngestionRequest request, + final ClusterState state, + final ActionListener listener + ) { + throw new UnsupportedOperationException("The task parameter is required"); + } + + @Override + protected void clusterManagerOperation( + final Task task, + final ResumeIngestionRequest request, + final ClusterState state, + final ActionListener listener + ) throws Exception { + final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); + if (concreteIndices == null || concreteIndices.length == 0) { + listener.onResponse(new ResumeIngestionResponse(true, false, new IngestionStateShardFailure[0], "")); + return; + } + + String[] indices = Arrays.stream(concreteIndices).map(Index::getName).toArray(String[]::new); + UpdateIngestionStateRequest updateIngestionStateRequest = new UpdateIngestionStateRequest(indices, new int[0]); + updateIngestionStateRequest.timeout(request.clusterManagerNodeTimeout()); + updateIngestionStateRequest.setIngestionPaused(false); + + ingestionStateService.updateIngestionPollerState( + "resume-ingestion", + concreteIndices, + updateIngestionStateRequest, + new ActionListener<>() { + + @Override + public void onResponse(UpdateIngestionStateResponse updateIngestionStateResponse) { + boolean shardsAcked = updateIngestionStateResponse.isAcknowledged() + && updateIngestionStateResponse.getFailedShards() == 0; + ResumeIngestionResponse response = new ResumeIngestionResponse( + true, + shardsAcked, + updateIngestionStateResponse.getShardFailureList(), + updateIngestionStateResponse.getErrorMessage() + ); + listener.onResponse(response); + } + + @Override + public void onFailure(Exception e) { + logger.debug("Error resuming ingestion", e); + listener.onFailure(e); + } + } + ); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/resume/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/resume/package-info.java new file mode 100644 index 0000000000000..1b7e92339e08f --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/resume/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Resume ingestion transport handlers. */ +package org.opensearch.action.admin.indices.streamingingestion.resume; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/state/GetIngestionStateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/state/GetIngestionStateAction.java new file mode 100644 index 0000000000000..11f0f7aae79bc --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/state/GetIngestionStateAction.java @@ -0,0 +1,26 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.streamingingestion.state; + +import org.opensearch.action.ActionType; + +/** + * Transport action for getting ingestion state. + * + * @opensearch.experimental + */ +public class GetIngestionStateAction extends ActionType { + + public static final GetIngestionStateAction INSTANCE = new GetIngestionStateAction(); + public static final String NAME = "indices:monitor/ingestion/state"; + + private GetIngestionStateAction() { + super(NAME, GetIngestionStateResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/state/GetIngestionStateRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/state/GetIngestionStateRequest.java new file mode 100644 index 0000000000000..f96f681d2eebe --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/state/GetIngestionStateRequest.java @@ -0,0 +1,135 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.streamingingestion.state; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.pagination.PageParams; +import org.opensearch.action.support.broadcast.BroadcastRequest; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.opensearch.action.ValidateActions.addValidationError; +import static org.opensearch.action.pagination.PageParams.PARAM_ASC_SORT_VALUE; + +/** + * Request to get current ingestion state when using pull-based ingestion. This request supports retrieving index and + * shard level state. By default, all shards of an index are included. + * + * @opensearch.experimental + */ +@ExperimentalApi +public class GetIngestionStateRequest extends BroadcastRequest { + public static final int DEFAULT_PAGE_SIZE = 1000; + public static final String DEFAULT_SORT_VALUE = PARAM_ASC_SORT_VALUE; + + private int[] shards; + private PageParams pageParams; + + // holds the pairs to consider when using pagination + private List indexShardPairsList; + + public GetIngestionStateRequest(String[] indices) { + super(); + this.indices = indices; + this.shards = new int[] {}; + this.pageParams = new PageParams(null, DEFAULT_SORT_VALUE, DEFAULT_PAGE_SIZE); + this.indexShardPairsList = new ArrayList<>(); + } + + public GetIngestionStateRequest(StreamInput in) throws IOException { + super(in); + this.shards = in.readVIntArray(); + this.pageParams = in.readOptionalWriteable(PageParams::new); + this.indexShardPairsList = in.readList(IndexShardPair::new); + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (indices == null) { + validationException = addValidationError("index is missing", validationException); + } else if (indices.length != Arrays.stream(indices).collect(Collectors.toSet()).size()) { + validationException = addValidationError("duplicate index names provided", validationException); + } + return validationException; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVIntArray(shards); + out.writeOptionalWriteable(pageParams); + out.writeList(indexShardPairsList); + } + + public int[] getShards() { + return shards; + } + + public void setShards(int[] shards) { + this.shards = shards; + } + + public PageParams getPageParams() { + return pageParams; + } + + public void setPageParams(PageParams pageParams) { + this.pageParams = pageParams; + } + + public void addIndexShardPair(String indexName, int shard) { + indexShardPairsList.add(new IndexShardPair(indexName, shard)); + } + + /** + * Returns a map of index name and respective shards to be considered. + */ + public Map> getIndexShardPairsAsMap() { + Map> indexShardMap = new HashMap<>(); + for (IndexShardPair indexShardPair : indexShardPairsList) { + indexShardMap.computeIfAbsent(indexShardPair.indexName, indexName -> new HashSet<>()).add(indexShardPair.shard); + } + + return indexShardMap; + } + + private class IndexShardPair implements Writeable { + String indexName; + int shard; + + public IndexShardPair(StreamInput in) throws IOException { + this.indexName = in.readString(); + this.shard = in.readVInt(); + } + + public IndexShardPair(String indexName, int shard) { + this.indexName = indexName; + this.shard = shard; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(indexName); + out.writeVInt(shard); + } + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/state/GetIngestionStateResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/state/GetIngestionStateResponse.java new file mode 100644 index 0000000000000..1de74b0a42ca0 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/state/GetIngestionStateResponse.java @@ -0,0 +1,104 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.streamingingestion.state; + +import org.opensearch.action.support.broadcast.BroadcastResponse; +import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Comparator; +import java.util.List; +import java.util.Map; + +/** + * Transport response for retrieving ingestion state. + * + * @opensearch.experimental + */ +@ExperimentalApi +public class GetIngestionStateResponse extends BroadcastResponse { + private static final String INGESTION_STATE = "ingestion_state"; + private static final String NEXT_PAGE_TOKEN = "next_page_token"; + + private ShardIngestionState[] shardStates; + @Nullable + private String nextPageToken; + + public GetIngestionStateResponse(StreamInput in) throws IOException { + super(in); + shardStates = in.readArray(ShardIngestionState::new, ShardIngestionState[]::new); + nextPageToken = in.readOptionalString(); + } + + public GetIngestionStateResponse( + ShardIngestionState[] shardStates, + int totalShards, + int successfulShards, + int failedShards, + @Nullable String nextPageToken, + List shardFailures + ) { + super(totalShards, successfulShards, failedShards, shardFailures); + this.shardStates = shardStates; + this.nextPageToken = nextPageToken; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeArray(shardStates); + out.writeOptionalString(nextPageToken); + } + + @Override + protected void addCustomXContentFields(XContentBuilder builder, Params params) throws IOException { + super.addCustomXContentFields(builder, params); + if (Strings.isEmpty(nextPageToken) == false) { + builder.field(NEXT_PAGE_TOKEN, nextPageToken); + } + + Map> shardStateByIndex = ShardIngestionState.groupShardStateByIndex(shardStates); + builder.startObject(INGESTION_STATE); + + for (Map.Entry> indexShardIngestionStateEntry : shardStateByIndex.entrySet()) { + builder.startArray(indexShardIngestionStateEntry.getKey()); + indexShardIngestionStateEntry.getValue().sort(Comparator.comparingInt(ShardIngestionState::shardId)); + for (ShardIngestionState shardIngestionState : indexShardIngestionStateEntry.getValue()) { + shardIngestionState.toXContent(builder, params); + } + builder.endArray(); + } + + builder.endObject(); + } + + public ShardIngestionState[] getShardStates() { + return shardStates; + } + + @Override + public String toString() { + return Strings.toString(MediaTypeRegistry.JSON, this, true, false); + } + + public void setNextPageToken(String nextPageToken) { + this.nextPageToken = nextPageToken; + } + + public String getNextPageToken() { + return nextPageToken; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/state/ShardIngestionState.java b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/state/ShardIngestionState.java new file mode 100644 index 0000000000000..9ba8d57b465cb --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/state/ShardIngestionState.java @@ -0,0 +1,96 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.streamingingestion.state; + +import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Represents ingestion shard state. + * + * @opensearch.experimental + */ +@ExperimentalApi +public record ShardIngestionState(String index, int shardId, String pollerState, String errorPolicy, boolean isPollerPaused) + implements + Writeable, + ToXContentFragment { + + private static final String SHARD = "shard"; + private static final String POLLER_STATE = "poller_state"; + private static final String ERROR_POLICY = "error_policy"; + private static final String POLLER_PAUSED = "poller_paused"; + + public ShardIngestionState() { + this("", -1, "", "", false); + } + + public ShardIngestionState(StreamInput in) throws IOException { + this(in.readString(), in.readVInt(), in.readOptionalString(), in.readOptionalString(), in.readBoolean()); + } + + public ShardIngestionState( + String index, + int shardId, + @Nullable String pollerState, + @Nullable String errorPolicy, + boolean isPollerPaused + ) { + this.index = index; + this.shardId = shardId; + this.pollerState = pollerState; + this.errorPolicy = errorPolicy; + this.isPollerPaused = isPollerPaused; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(index); + out.writeVInt(shardId); + out.writeOptionalString(pollerState); + out.writeOptionalString(errorPolicy); + out.writeBoolean(isPollerPaused); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(SHARD, shardId); + builder.field(POLLER_STATE, pollerState); + builder.field(ERROR_POLICY, errorPolicy); + builder.field(POLLER_PAUSED, isPollerPaused); + builder.endObject(); + return builder; + } + + /** + * Groups list of ShardIngestionStates by the index name. + */ + public static Map> groupShardStateByIndex(ShardIngestionState[] shardIngestionStates) { + Map> shardIngestionStatesByIndex = new HashMap<>(); + + for (ShardIngestionState state : shardIngestionStates) { + shardIngestionStatesByIndex.computeIfAbsent(state.index(), (index) -> new ArrayList<>()); + shardIngestionStatesByIndex.get(state.index()).add(state); + } + + return shardIngestionStatesByIndex; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/state/TransportGetIngestionStateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/state/TransportGetIngestionStateAction.java new file mode 100644 index 0000000000000..388d56631b22a --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/state/TransportGetIngestionStateAction.java @@ -0,0 +1,225 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.streamingingestion.state; + +import org.apache.lucene.store.AlreadyClosedException; +import org.opensearch.OpenSearchException; +import org.opensearch.action.admin.cluster.state.ClusterStateRequest; +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.action.pagination.ShardPaginationStrategy; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.broadcast.node.TransportBroadcastByNodeAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardsIterator; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.index.IndexService; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.ShardNotFoundException; +import org.opensearch.indices.IndicesService; +import org.opensearch.tasks.Task; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; +import org.opensearch.transport.client.node.NodeClient; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +/** + * Transport action for retrieving ingestion state. + * + * @opensearch.experimental + */ +public class TransportGetIngestionStateAction extends TransportBroadcastByNodeAction< + GetIngestionStateRequest, + GetIngestionStateResponse, + ShardIngestionState> { + + private final IndicesService indicesService; + private final ClusterService clusterService; + private final NodeClient client; + + @Inject + public TransportGetIngestionStateAction( + ClusterService clusterService, + TransportService transportService, + IndicesService indicesService, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + NodeClient client + ) { + super( + GetIngestionStateAction.NAME, + clusterService, + transportService, + actionFilters, + indexNameExpressionResolver, + GetIngestionStateRequest::new, + ThreadPool.Names.MANAGEMENT + ); + this.indicesService = indicesService; + this.clusterService = clusterService; + this.client = client; + } + + /** + * Retrieves the cluster state and identifies the (index,shard) pairs to be considered for pagination. Ingestion + * state is then retrieved the these index and shard pairs. + */ + @Override + protected void doExecute(Task task, GetIngestionStateRequest request, ActionListener listener) { + if (request.getPageParams() != null) { + final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + clusterStateRequest.setShouldCancelOnTimeout(true); + clusterStateRequest.setParentTask(client.getLocalNodeId(), task.getId()); + clusterStateRequest.clear().indices(request.indices()).routingTable(true).metadata(true); + + client.admin().cluster().state(clusterStateRequest, new ActionListener<>() { + + @Override + public void onResponse(ClusterStateResponse clusterStateResponse) { + try { + executePaginatedGetIngestionAction(task, request, listener, clusterStateResponse); + } catch (Exception e) { + listener.onFailure(e); + } + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(new OpenSearchException("Failed to retrieve cluster state", e)); + } + }); + } else { + super.doExecute(task, request, listener); + } + } + + private void executePaginatedGetIngestionAction( + Task task, + GetIngestionStateRequest request, + ActionListener listener, + ClusterStateResponse clusterStateResponse + ) { + ShardPaginationStrategy paginationStrategy = new ShardPaginationStrategy( + request.getPageParams(), + clusterStateResponse.getState(), + request.getShards() + ); + for (ShardRouting shardRouting : paginationStrategy.getRequestedEntities()) { + // add pairs to be considered for the current page + request.addIndexShardPair(shardRouting.getIndexName(), shardRouting.getId()); + } + + super.doExecute(task, request, new ActionListener<>() { + @Override + public void onResponse(GetIngestionStateResponse getIngestionStateResponse) { + getIngestionStateResponse.setNextPageToken(paginationStrategy.getResponseToken().getNextToken()); + listener.onResponse(getIngestionStateResponse); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }); + } + + /** + * Indicates the shards to consider. + */ + @Override + protected ShardsIterator shards(ClusterState clusterState, GetIngestionStateRequest request, String[] concreteIndices) { + Set shardSet = Arrays.stream(request.getShards()).boxed().collect(Collectors.toSet()); + + // add filters for index and shard from the request + Predicate shardFilter = ShardRouting::primary; + if (shardSet.isEmpty() == false) { + shardFilter = shardFilter.and(shardRouting -> shardSet.contains(shardRouting.shardId().getId())); + } + + // add filters for index and shard for current page when pagination is enabled + Map> indexShardPairsForPage = request.getIndexShardPairsAsMap(); + if (indexShardPairsForPage.isEmpty() == false) { + shardFilter = shardFilter.and( + shardRouting -> indexShardPairsForPage.containsKey(shardRouting.getIndexName()) + && indexShardPairsForPage.get(shardRouting.getIndexName()).contains(shardRouting.getId()) + ); + } + + return clusterState.routingTable().allShardsSatisfyingPredicate(request.indices(), shardFilter); + } + + @Override + protected ClusterBlockException checkGlobalBlock(ClusterState state, GetIngestionStateRequest request) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + } + + @Override + protected ClusterBlockException checkRequestBlock(ClusterState state, GetIngestionStateRequest request, String[] concreteIndices) { + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, request.indices()); + } + + @Override + protected ShardIngestionState readShardResult(StreamInput in) throws IOException { + return new ShardIngestionState(in); + } + + @Override + protected GetIngestionStateResponse newResponse( + GetIngestionStateRequest request, + int totalShards, + int successfulShards, + int failedShards, + List responses, + List shardFailures, + ClusterState clusterState + ) { + return new GetIngestionStateResponse( + responses.toArray(new ShardIngestionState[0]), + totalShards, + successfulShards, + failedShards, + null, + shardFailures + ); + } + + @Override + protected GetIngestionStateRequest readRequestFrom(StreamInput in) throws IOException { + return new GetIngestionStateRequest(in); + } + + @Override + protected ShardIngestionState shardOperation(GetIngestionStateRequest request, ShardRouting shardRouting) { + IndexService indexService = indicesService.indexServiceSafe(shardRouting.shardId().getIndex()); + IndexShard indexShard = indexService.getShard(shardRouting.shardId().id()); + if (indexShard.routingEntry() == null) { + throw new ShardNotFoundException(indexShard.shardId()); + } + + try { + return indexShard.getIngestionState(); + } catch (final AlreadyClosedException e) { + throw new ShardNotFoundException(indexShard.shardId()); + } + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/state/TransportUpdateIngestionStateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/state/TransportUpdateIngestionStateAction.java new file mode 100644 index 0000000000000..5e8da1809dc6f --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/state/TransportUpdateIngestionStateAction.java @@ -0,0 +1,144 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.streamingingestion.state; + +import org.apache.lucene.store.AlreadyClosedException; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.broadcast.node.TransportBroadcastByNodeAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.metadata.IngestionStatus; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardsIterator; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.index.IndexService; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.ShardNotFoundException; +import org.opensearch.indices.IndicesService; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Set; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +/** + * Transport action for updating ingestion state on provided shards. Shard level failures are provided if there are + * errors during updating shard state. + * + *

This is for internal use and will not be exposed to the user directly.

+ * + * @opensearch.experimental + */ +public class TransportUpdateIngestionStateAction extends TransportBroadcastByNodeAction< + UpdateIngestionStateRequest, + UpdateIngestionStateResponse, + ShardIngestionState> { + + private final IndicesService indicesService; + + @Inject + public TransportUpdateIngestionStateAction( + ClusterService clusterService, + TransportService transportService, + IndicesService indicesService, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + super( + UpdateIngestionStateAction.NAME, + clusterService, + transportService, + actionFilters, + indexNameExpressionResolver, + UpdateIngestionStateRequest::new, + ThreadPool.Names.MANAGEMENT + ); + this.indicesService = indicesService; + } + + /** + * Indicates the shards to consider. + */ + @Override + protected ShardsIterator shards(ClusterState clusterState, UpdateIngestionStateRequest request, String[] concreteIndices) { + Set shardSet = Arrays.stream(request.getShards()).boxed().collect(Collectors.toSet()); + + Predicate shardFilter = ShardRouting::primary; + if (shardSet.isEmpty() == false) { + shardFilter = shardFilter.and(shardRouting -> shardSet.contains(shardRouting.shardId().getId())); + } + + return clusterState.routingTable().allShardsSatisfyingPredicate(request.getIndex(), shardFilter); + } + + @Override + protected ClusterBlockException checkGlobalBlock(ClusterState state, UpdateIngestionStateRequest request) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } + + @Override + protected ClusterBlockException checkRequestBlock(ClusterState state, UpdateIngestionStateRequest request, String[] concreteIndices) { + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, request.indices()); + } + + @Override + protected ShardIngestionState readShardResult(StreamInput in) throws IOException { + return new ShardIngestionState(in); + } + + @Override + protected UpdateIngestionStateResponse newResponse( + UpdateIngestionStateRequest request, + int totalShards, + int successfulShards, + int failedShards, + List responses, + List shardFailures, + ClusterState clusterState + ) { + return new UpdateIngestionStateResponse(true, totalShards, successfulShards, failedShards, shardFailures); + } + + @Override + protected UpdateIngestionStateRequest readRequestFrom(StreamInput in) throws IOException { + return new UpdateIngestionStateRequest(in); + } + + /** + * Updates shard ingestion states depending on the requested changes. + */ + @Override + protected ShardIngestionState shardOperation(UpdateIngestionStateRequest request, ShardRouting shardRouting) { + IndexService indexService = indicesService.indexServiceSafe(shardRouting.shardId().getIndex()); + IndexShard indexShard = indexService.getShard(shardRouting.shardId().id()); + if (indexShard.routingEntry() == null) { + throw new ShardNotFoundException(indexShard.shardId()); + } + + try { + if (request.getIngestionPaused() != null) { + // update pause/resume state + indexShard.updateShardIngestionState(new IngestionStatus(request.getIngestionPaused())); + } + + return indexShard.getIngestionState(); + } catch (final AlreadyClosedException e) { + throw new ShardNotFoundException(indexShard.shardId()); + } + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/state/UpdateIngestionStateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/state/UpdateIngestionStateAction.java new file mode 100644 index 0000000000000..667eda8784bd8 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/state/UpdateIngestionStateAction.java @@ -0,0 +1,26 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.streamingingestion.state; + +import org.opensearch.action.ActionType; + +/** + * Transport action for updating ingestion state. + * + * @opensearch.experimental + */ +public class UpdateIngestionStateAction extends ActionType { + + public static final UpdateIngestionStateAction INSTANCE = new UpdateIngestionStateAction(); + public static final String NAME = "indices:admin/ingestion/updateState"; + + private UpdateIngestionStateAction() { + super(NAME, UpdateIngestionStateResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/state/UpdateIngestionStateRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/state/UpdateIngestionStateRequest.java new file mode 100644 index 0000000000000..a5998695a0be6 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/state/UpdateIngestionStateRequest.java @@ -0,0 +1,87 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.streamingingestion.state; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.support.broadcast.BroadcastRequest; +import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; + +import java.io.IOException; + +import static org.opensearch.action.ValidateActions.addValidationError; + +/** + * Holds metadata required for updating ingestion state. + * + *

This is for internal use only and will not be exposed to the user.

+ * + * @opensearch.experimental + */ +@ExperimentalApi +public class UpdateIngestionStateRequest extends BroadcastRequest { + private String[] index; + private int[] shards; + + // Following will be optional parameters and will be used to decide when to update shard ingestion state if non-null values are provided + @Nullable + private Boolean ingestionPaused; + + public UpdateIngestionStateRequest(String[] index, int[] shards) { + super(); + this.index = index; + this.shards = shards; + } + + public UpdateIngestionStateRequest(StreamInput in) throws IOException { + super(in); + this.index = in.readStringArray(); + this.shards = in.readVIntArray(); + this.ingestionPaused = in.readOptionalBoolean(); + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (index == null) { + validationException = addValidationError("index is missing", validationException); + } + return validationException; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(index); + out.writeVIntArray(shards); + out.writeOptionalBoolean(ingestionPaused); + } + + public String[] getIndex() { + return index; + } + + public int[] getShards() { + return shards; + } + + public void setShards(int[] shards) { + this.shards = shards; + } + + public Boolean getIngestionPaused() { + return ingestionPaused; + } + + public void setIngestionPaused(boolean ingestionPaused) { + this.ingestionPaused = ingestionPaused; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/state/UpdateIngestionStateResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/state/UpdateIngestionStateResponse.java new file mode 100644 index 0000000000000..ae4f22d95a524 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/state/UpdateIngestionStateResponse.java @@ -0,0 +1,78 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.streamingingestion.state; + +import org.opensearch.action.admin.indices.streamingingestion.IngestionStateShardFailure; +import org.opensearch.action.support.broadcast.BroadcastResponse; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.List; + +/** + * Transport response for updating ingestion state. + * + * @opensearch.experimental + */ +@ExperimentalApi +public class UpdateIngestionStateResponse extends BroadcastResponse { + + private boolean acknowledged; + private String errorMessage = ""; + private final IngestionStateShardFailure[] shardFailureList; + + public UpdateIngestionStateResponse(StreamInput in) throws IOException { + super(in); + acknowledged = in.readBoolean(); + errorMessage = in.readString(); + shardFailureList = in.readArray(IngestionStateShardFailure::new, IngestionStateShardFailure[]::new); + } + + public UpdateIngestionStateResponse( + boolean acknowledged, + int totalShards, + int successfulShards, + int failedShards, + List shardFailures + ) { + super(totalShards, successfulShards, failedShards, shardFailures); + this.acknowledged = acknowledged; + this.shardFailureList = shardFailures.stream() + .map(shardFailure -> new IngestionStateShardFailure(shardFailure.index(), shardFailure.shardId(), shardFailure.reason())) + .toArray(IngestionStateShardFailure[]::new); + + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(acknowledged); + out.writeString(errorMessage); + out.writeArray(shardFailureList); + } + + public IngestionStateShardFailure[] getShardFailureList() { + return shardFailureList; + } + + public boolean isAcknowledged() { + return acknowledged; + } + + public String getErrorMessage() { + return errorMessage; + } + + public void setErrorMessage(String errorMessage) { + this.errorMessage = errorMessage; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/state/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/state/package-info.java new file mode 100644 index 0000000000000..ec3801caf1f28 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/streamingingestion/state/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Ingestion state transport handlers. */ +package org.opensearch.action.admin.indices.streamingingestion.state; diff --git a/server/src/main/java/org/opensearch/action/pagination/ShardPaginationStrategy.java b/server/src/main/java/org/opensearch/action/pagination/ShardPaginationStrategy.java index 1eb364c883e60..195517481b8ec 100644 --- a/server/src/main/java/org/opensearch/action/pagination/ShardPaginationStrategy.java +++ b/server/src/main/java/org/opensearch/action/pagination/ShardPaginationStrategy.java @@ -16,10 +16,13 @@ import org.opensearch.cluster.routing.ShardRouting; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.function.Predicate; +import java.util.stream.Collectors; import static org.opensearch.action.pagination.IndexPaginationStrategy.ASC_COMPARATOR; import static org.opensearch.action.pagination.IndexPaginationStrategy.DESC_COMPARATOR; @@ -37,6 +40,10 @@ public class ShardPaginationStrategy implements PaginationStrategy private PageData pageData; public ShardPaginationStrategy(PageParams pageParams, ClusterState clusterState) { + this(pageParams, clusterState, new int[0]); + } + + public ShardPaginationStrategy(PageParams pageParams, ClusterState clusterState, int[] shardIDs) { ShardStrategyToken shardStrategyToken = getShardStrategyToken(pageParams.getRequestedToken()); // Get list of indices metadata sorted by their creation time and filtered by the last sent index List filteredIndices = getEligibleIndices( @@ -50,7 +57,8 @@ public ShardPaginationStrategy(PageParams pageParams, ClusterState clusterState) filteredIndices, clusterState.getRoutingTable().getIndicesRouting(), shardStrategyToken, - pageParams.getSize() + pageParams.getSize(), + shardIDs ); } @@ -89,17 +97,19 @@ private static Predicate indexNameFilter(String lastIndexName) { /** * Will be used to get the list of shards and respective indices to which they belong, - * which are to be displayed in a page. + * which are to be displayed in a page. Optionally, if shardIDs are provided, only those will be considered. * Note: All shards for a shardID will always be present in the same page. */ private PageData getPageData( List filteredIndices, Map indicesRouting, final ShardStrategyToken token, - final int numShardsRequired + final int numShardsRequired, + int[] shardIDs ) { List shardRoutings = new ArrayList<>(); List indices = new ArrayList<>(); + Set shardIdSet = Arrays.stream(shardIDs).boxed().collect(Collectors.toSet()); int shardCount = 0; IndexMetadata lastAddedIndex = null; @@ -108,10 +118,14 @@ private PageData getPageData( String indexName = indexMetadata.getIndex().getName(); boolean indexShardsAdded = false; // Always start from shardID 0 for all indices except for the first one which might be same as the last sent - // index. To identify if an index is same as last sent index, verify both the index name and creaton time. + // index. To identify if an index is same as last sent index, verify both the index name and creation time. int startShardId = shardCount == 0 ? getStartShardIdForPageIndex(token, indexName, indexMetadata.getCreationDate()) : 0; Map indexShardRoutingTable = indicesRouting.get(indexName).getShards(); for (; startShardId < indexShardRoutingTable.size(); startShardId++) { + if (shardIdSet.isEmpty() == false && shardIdSet.contains(startShardId) == false) { + continue; + } + if (indexShardRoutingTable.get(startShardId).size() > numShardsRequired) { throw new IllegalArgumentException( "size value should be greater than the replica count of all indices, so that all primary and replicas of a shard show up in single page" diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index f70f136fc9a54..72afd44eadef8 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -874,6 +874,7 @@ public Iterator> settings() { private final boolean isAppendOnlyIndex; private final Context context; + private final IngestionStatus ingestionStatus; private IndexMetadata( final Index index, @@ -905,7 +906,8 @@ private IndexMetadata( final int indexTotalShardsPerNodeLimit, final int indexTotalPrimaryShardsPerNodeLimit, boolean isAppendOnlyIndex, - final Context context + final Context context, + final IngestionStatus ingestionStatus ) { this.index = index; @@ -945,6 +947,7 @@ private IndexMetadata( this.indexTotalPrimaryShardsPerNodeLimit = indexTotalPrimaryShardsPerNodeLimit; this.isAppendOnlyIndex = isAppendOnlyIndex; this.context = context; + this.ingestionStatus = ingestionStatus; assert numberOfShards * routingFactor == routingNumShards : routingNumShards + " must be a multiple of " + numberOfShards; } @@ -1031,6 +1034,10 @@ public boolean useIngestionSource() { return ingestionSourceType != null && !(NONE_INGESTION_SOURCE_TYPE.equals(ingestionSourceType)); } + public IngestionStatus getIngestionStatus() { + return ingestionStatus; + } + /** * Return the {@link Version} on which this index has been upgraded. This * information is typically useful for backward compatibility. @@ -1216,6 +1223,9 @@ public boolean equals(Object o) { if (!Objects.equals(context, that.context)) { return false; } + if (Objects.equals(ingestionStatus, that.ingestionStatus) == false) { + return false; + } return true; } @@ -1235,6 +1245,7 @@ public int hashCode() { result = 31 * result + rolloverInfos.hashCode(); result = 31 * result + Boolean.hashCode(isSystem); result = 31 * result + Objects.hashCode(context); + result = 31 * result + Objects.hashCode(ingestionStatus); return result; } @@ -1280,6 +1291,7 @@ static class IndexMetadataDiff implements Diff { private final Diff> rolloverInfos; private final boolean isSystem; private final Context context; + private final IngestionStatus ingestionStatus; IndexMetadataDiff(IndexMetadata before, IndexMetadata after) { index = after.index.getName(); @@ -1303,6 +1315,7 @@ static class IndexMetadataDiff implements Diff { rolloverInfos = DiffableUtils.diff(before.rolloverInfos, after.rolloverInfos, DiffableUtils.getStringKeySerializer()); isSystem = after.isSystem; context = after.context; + ingestionStatus = after.ingestionStatus; } private static final DiffableUtils.DiffableValueReader ALIAS_METADATA_DIFF_VALUE_READER = @@ -1339,6 +1352,11 @@ static class IndexMetadataDiff implements Diff { } else { context = null; } + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + ingestionStatus = in.readOptionalWriteable(IngestionStatus::new); + } else { + ingestionStatus = null; + } } @Override @@ -1361,6 +1379,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_17_0)) { out.writeOptionalWriteable(context); } + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeOptionalWriteable(ingestionStatus); + } } @Override @@ -1381,6 +1402,7 @@ public IndexMetadata apply(IndexMetadata part) { builder.rolloverInfos.putAll(rolloverInfos.apply(part.rolloverInfos)); builder.system(isSystem); builder.context(context); + builder.ingestionStatus(ingestionStatus); // TODO: support ingestion source return builder.build(); } @@ -1427,6 +1449,10 @@ public static IndexMetadata readFrom(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(Version.V_2_17_0)) { builder.context(in.readOptionalWriteable(Context::new)); } + + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + builder.ingestionStatus(in.readOptionalWriteable(IngestionStatus::new)); + } return builder.build(); } @@ -1468,6 +1494,10 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_17_0)) { out.writeOptionalWriteable(context); } + + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeOptionalWriteable(ingestionStatus); + } } @Override @@ -1530,6 +1560,8 @@ public String toString() { .append(isSystem) .append(", context=") .append(context) + .append(", ingestionStatus=") + .append(ingestionStatus) .append("}") .toString(); } @@ -1578,6 +1610,7 @@ public static class Builder { private Integer routingNumShards; private boolean isSystem; private Context context; + private IngestionStatus ingestionStatus; public Builder(String index) { this.index = index; @@ -1606,6 +1639,7 @@ public Builder(IndexMetadata indexMetadata) { this.rolloverInfos = new HashMap<>(indexMetadata.rolloverInfos); this.isSystem = indexMetadata.isSystem; this.context = indexMetadata.context; + this.ingestionStatus = indexMetadata.ingestionStatus; } public Builder index(String index) { @@ -1836,6 +1870,15 @@ public Context context() { return context; } + public Builder ingestionStatus(IngestionStatus ingestionStatus) { + this.ingestionStatus = ingestionStatus; + return this; + } + + public IngestionStatus getIngestionStatus() { + return ingestionStatus; + } + public IndexMetadata build() { final Map tmpAliases = aliases; Settings tmpSettings = settings; @@ -1943,6 +1986,10 @@ public IndexMetadata build() { final String uuid = settings.get(SETTING_INDEX_UUID, INDEX_UUID_NA_VALUE); + if (ingestionStatus == null) { + ingestionStatus = IngestionStatus.getDefaultValue(); + } + return new IndexMetadata( new Index(index, uuid), version, @@ -1973,7 +2020,8 @@ public IndexMetadata build() { indexTotalShardsPerNodeLimit, indexTotalPrimaryShardsPerNodeLimit, isAppendOnlyIndex, - context + context, + ingestionStatus ); } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IngestionStatus.java b/server/src/main/java/org/opensearch/cluster/metadata/IngestionStatus.java new file mode 100644 index 0000000000000..4a78d8eadfaf7 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/metadata/IngestionStatus.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.metadata; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; + +import java.io.IOException; + +/** + * Indicates pull-based ingestion status. + */ +@ExperimentalApi +public record IngestionStatus(boolean isPaused) implements Writeable { + + public IngestionStatus(StreamInput in) throws IOException { + this(in.readBoolean()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeBoolean(isPaused); + } + + public static IngestionStatus getDefaultValue() { + return new IngestionStatus(false); + } +} diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataStreamingIngestionStateService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataStreamingIngestionStateService.java new file mode 100644 index 0000000000000..70e5abe81cbee --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataStreamingIngestionStateService.java @@ -0,0 +1,170 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.metadata; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.OpenSearchException; +import org.opensearch.action.admin.indices.streamingingestion.state.TransportUpdateIngestionStateAction; +import org.opensearch.action.admin.indices.streamingingestion.state.UpdateIngestionStateRequest; +import org.opensearch.action.admin.indices.streamingingestion.state.UpdateIngestionStateResponse; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.ClusterStateUpdateTask; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Priority; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.Index; + +import java.util.Collections; + +/** + * Service responsible for submitting metadata updates (for example, ingestion pause/resume state change updates). + * + * @opensearch.experimental + */ +public class MetadataStreamingIngestionStateService { + private static final Logger logger = LogManager.getLogger(MetadataStreamingIngestionStateService.class); + + private final ClusterService clusterService; + private final TransportUpdateIngestionStateAction transportUpdateIngestionStateAction; + + @Inject + public MetadataStreamingIngestionStateService( + ClusterService clusterService, + TransportUpdateIngestionStateAction transportUpdateIngestionStateAction + ) { + this.clusterService = clusterService; + this.transportUpdateIngestionStateAction = transportUpdateIngestionStateAction; + } + + /** + * This method updates the ingestion poller state in two phases for provided index shards. + *
    + *
  • Phase 1: Publishes cluster state update to pause/resume ingestion. This phase finishes once the update is acknowledge
  • + *
  • Phase 2: Runs transport action to update cluster state on individual shards and collects success/failure responses.
  • + *
+ * + *

The two phase approach is taken in order to give real time feedback to the user if the ingestion update was a success or failure. + * Note that the second phase could be a no-op if the shard already processed the cluster state update. + */ + public void updateIngestionPollerState( + String source, + Index[] concreteIndices, + UpdateIngestionStateRequest request, + ActionListener listener + ) { + if (concreteIndices == null || concreteIndices.length == 0) { + throw new IllegalArgumentException("Index is missing"); + } + + if (request.getIngestionPaused() == null) { + throw new IllegalArgumentException("Ingestion poller target state is missing"); + } + + clusterService.submitStateUpdateTask(source, new ClusterStateUpdateTask(Priority.URGENT) { + + @Override + public ClusterState execute(ClusterState currentState) { + return getUpdatedIngestionPausedClusterState(concreteIndices, currentState, request.getIngestionPaused()); + } + + @Override + public void clusterStateProcessed(final String source, final ClusterState oldState, final ClusterState newState) { + if (oldState == newState) { + logger.debug("Cluster state did not change when trying to set ingestionPaused={}", request.getIngestionPaused()); + listener.onResponse(new UpdateIngestionStateResponse(false, 0, 0, 0, Collections.emptyList())); + } else { + // todo: should we run this on a different thread? + processUpdateIngestionRequestOnShards(request, new ActionListener<>() { + + @Override + public void onResponse(UpdateIngestionStateResponse updateIngestionStateResponse) { + listener.onResponse(updateIngestionStateResponse); + } + + @Override + public void onFailure(Exception e) { + UpdateIngestionStateResponse response = new UpdateIngestionStateResponse( + true, + 0, + 0, + 0, + Collections.emptyList() + ); + response.setErrorMessage("Error encountered while verifying ingestion poller state: " + e.getMessage()); + listener.onResponse(response); + } + }); + } + } + + @Override + public void onFailure(String source, Exception e) { + listener.onFailure( + new OpenSearchException( + "Ingestion cluster state update failed to set ingestionPaused={}", + request.getIngestionPaused(), + e + ) + ); + } + + @Override + public TimeValue timeout() { + return request.timeout(); + } + }); + } + + /** + * Executes transport action to update ingestion state on provided index shards. + */ + public void processUpdateIngestionRequestOnShards( + UpdateIngestionStateRequest updateIngestionStateRequest, + ActionListener listener + ) { + transportUpdateIngestionStateAction.execute(updateIngestionStateRequest, listener); + } + + /** + * Updates ingestionPaused value in provided cluster state. + */ + private ClusterState getUpdatedIngestionPausedClusterState( + final Index[] indices, + final ClusterState currentState, + boolean ingestionPaused + ) { + final Metadata.Builder metadata = Metadata.builder(currentState.metadata()); + + for (Index index : indices) { + final IndexMetadata indexMetadata = metadata.getSafe(index); + + if (indexMetadata.useIngestionSource() == false) { + logger.debug("Pause/resume request will be ignored for index {} as streaming ingestion is not enabled", index); + } + + if (indexMetadata.getIngestionStatus().isPaused() != ingestionPaused) { + IngestionStatus updatedIngestionStatus = new IngestionStatus(ingestionPaused); + final IndexMetadata.Builder updatedMetadata = IndexMetadata.builder(indexMetadata).ingestionStatus(updatedIngestionStatus); + metadata.put(updatedMetadata); + } else { + logger.debug( + "Received request for ingestionPaused:{} for index {}. The state is already ingestionPaused:{}", + ingestionPaused, + index, + ingestionPaused + ); + } + } + + return ClusterState.builder(currentState).metadata(metadata).build(); + } +} diff --git a/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java b/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java index 23c49d359b25e..1d5d104394558 100644 --- a/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java @@ -12,6 +12,7 @@ import org.apache.lucene.index.IndexWriter; import org.apache.lucene.search.IndexSearcher; import org.opensearch.ExceptionsHelper; +import org.opensearch.action.admin.indices.streamingingestion.state.ShardIngestionState; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IngestionSource; import org.opensearch.common.lucene.Lucene; @@ -105,6 +106,9 @@ public void start() { ingestionSource.getType() ); + StreamPoller.State initialPollerState = indexMetadata.getIngestionStatus().isPaused() + ? StreamPoller.State.PAUSED + : StreamPoller.State.NONE; streamPoller = new DefaultStreamPoller( startPointer, persistedPointers, @@ -112,7 +116,8 @@ public void start() { this, resetState, resetValue, - ingestionErrorStrategy + ingestionErrorStrategy, + initialPollerState ); streamPoller.start(); } @@ -329,4 +334,31 @@ private void updateErrorHandlingStrategy(IngestionErrorStrategy.ErrorStrategy er ); streamPoller.updateErrorStrategy(updatedIngestionErrorStrategy); } + + /** + * Pause the poller. Used by management flows. + */ + public void pauseIngestion() { + streamPoller.pause(); + } + + /** + * Resumes the poller. Used by management flows. + */ + public void resumeIngestion() { + streamPoller.resume(); + } + + /** + * Get current ingestion state. Used by management flows. + */ + public ShardIngestionState getIngestionState() { + return new ShardIngestionState( + engineConfig.getIndexSettings().getIndex().getName(), + engineConfig.getShardId().getId(), + streamPoller.getState().toString(), + streamPoller.getErrorStrategy().getName(), + streamPoller.isPaused() + ); + } } diff --git a/server/src/main/java/org/opensearch/index/shard/AbstractIndexShardComponent.java b/server/src/main/java/org/opensearch/index/shard/AbstractIndexShardComponent.java index 5e973ff9acb8f..e5374f964649d 100644 --- a/server/src/main/java/org/opensearch/index/shard/AbstractIndexShardComponent.java +++ b/server/src/main/java/org/opensearch/index/shard/AbstractIndexShardComponent.java @@ -33,6 +33,7 @@ package org.opensearch.index.shard; import org.apache.logging.log4j.Logger; +import org.opensearch.action.admin.indices.streamingingestion.state.ShardIngestionState; import org.opensearch.common.logging.Loggers; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexSettings; @@ -63,4 +64,8 @@ public ShardId shardId() { public IndexSettings indexSettings() { return indexSettings; } + + public ShardIngestionState getIngestionState() { + return new ShardIngestionState(); + } } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 5174a179cdc6b..b0a5212b1d330 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -61,11 +61,13 @@ import org.opensearch.action.ActionRunnable; import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest; +import org.opensearch.action.admin.indices.streamingingestion.state.ShardIngestionState; import org.opensearch.action.admin.indices.upgrade.post.UpgradeRequest; import org.opensearch.action.support.replication.PendingReplicationActions; import org.opensearch.action.support.replication.ReplicationResponse; import org.opensearch.cluster.metadata.DataStream; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.IngestionStatus; import org.opensearch.cluster.metadata.MappingMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; @@ -126,6 +128,7 @@ import org.opensearch.index.engine.EngineConfigFactory; import org.opensearch.index.engine.EngineException; import org.opensearch.index.engine.EngineFactory; +import org.opensearch.index.engine.IngestionEngine; import org.opensearch.index.engine.NRTReplicationEngine; import org.opensearch.index.engine.ReadOnlyEngine; import org.opensearch.index.engine.RefreshFailedEngineException; @@ -5437,4 +5440,48 @@ static ShardMigrationState getShardMigrationState(IndexSettings indexSettings, b } return ShardMigrationState.DOCREP_NON_MIGRATING; } + + /** + * Updates the ingestion state based on the received index metadata. + */ + @Override + public void updateShardIngestionState(IndexMetadata indexMetadata) { + if (indexMetadata.useIngestionSource() == false) { + return; + } + + updateShardIngestionState(indexMetadata.getIngestionStatus()); + } + + /** + * Updates the ingestion state by delegating to the ingestion engine. + */ + public void updateShardIngestionState(IngestionStatus ingestionStatus) { + synchronized (engineMutex) { + if (getEngineOrNull() instanceof IngestionEngine == false) { + return; + } + + IngestionEngine ingestionEngine = (IngestionEngine) getEngineOrNull(); + if (ingestionStatus.isPaused()) { + ingestionEngine.pauseIngestion(); + } else { + ingestionEngine.resumeIngestion(); + } + } + } + + /** + * Returns the current ingestion state for the shard. + */ + @Override + public ShardIngestionState getIngestionState() { + Engine engine = getEngineOrNull(); + if (indexSettings.getIndexMetadata().useIngestionSource() == false || engine instanceof IngestionEngine == false) { + throw new OpenSearchException("Unable to retrieve ingestion state as the shard does not have ingestion enabled."); + } + + IngestionEngine ingestionEngine = (IngestionEngine) engine; + return ingestionEngine.getIngestionState(); + } } diff --git a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java index 2c3ffcdd9e0ba..d35ae46442fa3 100644 --- a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java @@ -718,6 +718,8 @@ private void updateShard( indexShardRoutingTable, nodes ); + updateShardIngestionState(shard, indexMetadata, shardRouting); + } catch (Exception e) { failAndRemoveShard(shardRouting, true, "failed updating shard routing entry", e, clusterState); return; @@ -752,6 +754,20 @@ private void updateShard( } } + /** + * Update the ingestion state for the shard using the new metadata. + */ + private void updateShardIngestionState(Shard shard, IndexMetadata indexMetadata, ShardRouting shardRouting) { + try { + if (indexMetadata.useIngestionSource() && shardRouting.primary()) { + shard.updateShardIngestionState(indexMetadata); + } + } catch (Exception e) { + logger.error("Failed to update shard ingestion state", e); + throw e; + } + } + /** * Finds the routing source node for peer recovery, return null if its not found. Note, this method expects the shard * routing to *require* peer recovery, use {@link ShardRouting#recoverySource()} to check if its needed or not. @@ -927,6 +943,8 @@ void updateShardState( IndexShardRoutingTable routingTable, DiscoveryNodes discoveryNodes ) throws IOException; + + default void updateShardIngestionState(IndexMetadata indexMetadata) {}; } /** diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/BlockIngestionErrorStrategy.java b/server/src/main/java/org/opensearch/indices/pollingingest/BlockIngestionErrorStrategy.java index e342c48632494..11713ff661e9e 100644 --- a/server/src/main/java/org/opensearch/indices/pollingingest/BlockIngestionErrorStrategy.java +++ b/server/src/main/java/org/opensearch/indices/pollingingest/BlockIngestionErrorStrategy.java @@ -16,6 +16,7 @@ */ public class BlockIngestionErrorStrategy implements IngestionErrorStrategy { private static final Logger logger = LogManager.getLogger(BlockIngestionErrorStrategy.class); + private static final String NAME = "BLOCK"; private final String ingestionSource; public BlockIngestionErrorStrategy(String ingestionSource) { @@ -33,4 +34,9 @@ public void handleError(Throwable e, ErrorStage stage) { public boolean shouldIgnoreError(Throwable e, ErrorStage stage) { return false; } + + @Override + public String getName() { + return NAME; + } } diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java b/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java index 351a45b804dff..e1a4f7d3b4b7d 100644 --- a/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java +++ b/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java @@ -76,7 +76,8 @@ public DefaultStreamPoller( IngestionEngine ingestionEngine, ResetState resetState, String resetValue, - IngestionErrorStrategy errorStrategy + IngestionErrorStrategy errorStrategy, + State initialState ) { this( startPointer, @@ -85,7 +86,8 @@ public DefaultStreamPoller( new MessageProcessorRunnable(new ArrayBlockingQueue<>(100), ingestionEngine, errorStrategy), resetState, resetValue, - errorStrategy + errorStrategy, + initialState ); } @@ -96,12 +98,14 @@ public DefaultStreamPoller( MessageProcessorRunnable processorRunnable, ResetState resetState, String resetValue, - IngestionErrorStrategy errorStrategy + IngestionErrorStrategy errorStrategy, + State initialState ) { this.consumer = Objects.requireNonNull(consumer); this.resetState = resetState; this.resetValue = resetValue; - batchStartPointer = startPointer; + this.batchStartPointer = startPointer; + this.state = initialState; this.persistedPointers = persistedPointers; if (!this.persistedPointers.isEmpty()) { maxPersistedPointer = this.persistedPointers.stream().max(IngestionShardPointer::compareTo).get(); @@ -130,6 +134,11 @@ public void start() { if (closed) { throw new RuntimeException("poller is closed!"); } + + if (started) { + throw new RuntimeException("poller is already running"); + } + started = true; // when we start, we need to include the batch start pointer in the read for the first read includeBatchStartPointer = true; @@ -333,7 +342,7 @@ public PollingIngestStats getStats() { } public State getState() { - return state; + return this.state; } @Override diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/DropIngestionErrorStrategy.java b/server/src/main/java/org/opensearch/indices/pollingingest/DropIngestionErrorStrategy.java index 610718d816230..7773cfe7a037d 100644 --- a/server/src/main/java/org/opensearch/indices/pollingingest/DropIngestionErrorStrategy.java +++ b/server/src/main/java/org/opensearch/indices/pollingingest/DropIngestionErrorStrategy.java @@ -16,6 +16,7 @@ */ public class DropIngestionErrorStrategy implements IngestionErrorStrategy { private static final Logger logger = LogManager.getLogger(DropIngestionErrorStrategy.class); + private static final String NAME = "DROP"; private final String ingestionSource; public DropIngestionErrorStrategy(String ingestionSource) { @@ -34,4 +35,8 @@ public boolean shouldIgnoreError(Throwable e, ErrorStage stage) { return true; } + @Override + public String getName() { + return NAME; + } } diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/IngestionErrorStrategy.java b/server/src/main/java/org/opensearch/indices/pollingingest/IngestionErrorStrategy.java index 930fb69e1534f..ebacbb0d4e9d9 100644 --- a/server/src/main/java/org/opensearch/indices/pollingingest/IngestionErrorStrategy.java +++ b/server/src/main/java/org/opensearch/indices/pollingingest/IngestionErrorStrategy.java @@ -39,6 +39,11 @@ static IngestionErrorStrategy create(ErrorStrategy errorStrategy, String ingesti } } + /** + * Returns the name of the error policy. + */ + String getName(); + /** * Indicates available error handling strategies */ diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/StreamPoller.java b/server/src/main/java/org/opensearch/indices/pollingingest/StreamPoller.java index 81e2bddfa687b..16e7c06a49b72 100644 --- a/server/src/main/java/org/opensearch/indices/pollingingest/StreamPoller.java +++ b/server/src/main/java/org/opensearch/indices/pollingingest/StreamPoller.java @@ -54,6 +54,8 @@ public interface StreamPoller extends Closeable { IngestionErrorStrategy getErrorStrategy(); + State getState(); + /** * Update the error strategy for the poller. */ diff --git a/server/src/main/java/org/opensearch/rest/BaseRestHandler.java b/server/src/main/java/org/opensearch/rest/BaseRestHandler.java index 99f81c41d66ee..b4484939e1137 100644 --- a/server/src/main/java/org/opensearch/rest/BaseRestHandler.java +++ b/server/src/main/java/org/opensearch/rest/BaseRestHandler.java @@ -37,6 +37,7 @@ import org.apache.lucene.search.spell.LevenshteinDistance; import org.apache.lucene.util.CollectionUtil; import org.opensearch.OpenSearchParseException; +import org.opensearch.action.pagination.PageParams; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.common.CheckedConsumer; import org.opensearch.common.annotation.ExperimentalApi; @@ -65,6 +66,9 @@ import java.util.concurrent.atomic.LongAdder; import java.util.stream.Collectors; +import static org.opensearch.action.pagination.PageParams.PARAM_ASC_SORT_VALUE; +import static org.opensearch.action.pagination.PageParams.PARAM_DESC_SORT_VALUE; + /** * Base handler for REST requests. *

@@ -348,4 +352,21 @@ public RestChannelConsumer sendTask(String nodeId, Task task) { } }; } + + /** + * Validate and return the page params required for pagination. + */ + protected PageParams validateAndGetPageParams(RestRequest restRequest, String defaultSortValue, int defaultPageSize) { + PageParams pageParams = restRequest.parsePaginatedQueryParams(defaultSortValue, defaultPageSize); + + // validating pageSize + if (pageParams.getSize() <= 0) { + throw new IllegalArgumentException("size must be greater than zero"); + } + // Validating sort order + if ((PARAM_ASC_SORT_VALUE.equals(pageParams.getSort()) || PARAM_DESC_SORT_VALUE.equals(pageParams.getSort())) == false) { + throw new IllegalArgumentException("value of sort can either be asc or desc"); + } + return pageParams; + } } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetIngestionStateAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetIngestionStateAction.java new file mode 100644 index 0000000000000..651fc90d67ec3 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetIngestionStateAction.java @@ -0,0 +1,72 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.admin.indices; + +import org.opensearch.action.admin.indices.streamingingestion.state.GetIngestionStateRequest; +import org.opensearch.action.pagination.PageParams; +import org.opensearch.action.support.IndicesOptions; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.common.Strings; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestToXContentListener; +import org.opensearch.transport.client.node.NodeClient; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; + +import static java.util.Arrays.asList; +import static java.util.Collections.unmodifiableList; +import static org.opensearch.action.admin.indices.streamingingestion.state.GetIngestionStateRequest.DEFAULT_PAGE_SIZE; +import static org.opensearch.action.admin.indices.streamingingestion.state.GetIngestionStateRequest.DEFAULT_SORT_VALUE; +import static org.opensearch.rest.RestRequest.Method.GET; + +/** + * Transport action to get ingestion state. This API supports pagination. + * + * @opensearch.experimental + */ +@ExperimentalApi +public class RestGetIngestionStateAction extends BaseRestHandler { + @Override + public List routes() { + return unmodifiableList(asList(new Route(GET, "/{index}/ingestion/_state"))); + } + + @Override + public String getName() { + return "get_ingestion_state_action"; + } + + @Override + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + GetIngestionStateRequest getIngestionStateRequest = new GetIngestionStateRequest( + Strings.splitStringByCommaToArray(request.param("index")) + ); + + if (request.hasParam("shards")) { + int[] shards = Arrays.stream(request.paramAsStringArrayOrEmptyIfAll("shards")) + .mapToInt(Integer::parseInt) // Convert each string to int + .toArray(); + getIngestionStateRequest.setShards(shards); + } + getIngestionStateRequest.timeout(request.paramAsTime("timeout", getIngestionStateRequest.timeout())); + getIngestionStateRequest.indicesOptions(IndicesOptions.fromRequest(request, getIngestionStateRequest.indicesOptions())); + + PageParams pageParams = validateAndGetPageParams(request, DEFAULT_SORT_VALUE, DEFAULT_PAGE_SIZE); + getIngestionStateRequest.setPageParams(pageParams); + return channel -> client.admin().indices().getIngestionState(getIngestionStateRequest, new RestToXContentListener<>(channel)); + } + + @Override + public boolean isActionPaginated() { + return true; + } +} diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestPauseIngestionAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestPauseIngestionAction.java new file mode 100644 index 0000000000000..30eb472459268 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestPauseIngestionAction.java @@ -0,0 +1,61 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.admin.indices; + +import org.opensearch.action.admin.indices.streamingingestion.pause.PauseIngestionRequest; +import org.opensearch.action.support.IndicesOptions; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.logging.DeprecationLogger; +import org.opensearch.core.common.Strings; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestToXContentListener; +import org.opensearch.transport.client.node.NodeClient; + +import java.io.IOException; +import java.util.List; + +import static java.util.Arrays.asList; +import static java.util.Collections.unmodifiableList; +import static org.opensearch.rest.RestRequest.Method.POST; + +/** + * Transport action to pause pull-based ingestion. + * + * @opensearch.experimental + */ +@ExperimentalApi +public class RestPauseIngestionAction extends BaseRestHandler { + + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestPauseIngestionAction.class); + + @Override + public List routes() { + return unmodifiableList(asList(new Route(POST, "/{index}/ingestion/_pause"))); + } + + @Override + public String getName() { + return "pause_ingestion_action"; + } + + @Override + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + PauseIngestionRequest pauseIngestionRequest = new PauseIngestionRequest(Strings.splitStringByCommaToArray(request.param("index"))); + pauseIngestionRequest.clusterManagerNodeTimeout( + request.paramAsTime("cluster_manager_timeout", pauseIngestionRequest.clusterManagerNodeTimeout()) + ); + parseDeprecatedMasterTimeoutParameter(pauseIngestionRequest, request, deprecationLogger, getName()); + pauseIngestionRequest.timeout(request.paramAsTime("timeout", pauseIngestionRequest.timeout())); + pauseIngestionRequest.indicesOptions(IndicesOptions.fromRequest(request, pauseIngestionRequest.indicesOptions())); + + return channel -> client.admin().indices().pauseIngestion(pauseIngestionRequest, new RestToXContentListener<>(channel)); + } + +} diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestResumeIngestionAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestResumeIngestionAction.java new file mode 100644 index 0000000000000..b136b41949b09 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestResumeIngestionAction.java @@ -0,0 +1,59 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.admin.indices; + +import org.opensearch.action.admin.indices.streamingingestion.resume.ResumeIngestionRequest; +import org.opensearch.action.support.IndicesOptions; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.common.Strings; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestToXContentListener; +import org.opensearch.transport.client.node.NodeClient; + +import java.io.IOException; +import java.util.List; + +import static java.util.Arrays.asList; +import static java.util.Collections.unmodifiableList; +import static org.opensearch.rest.RestRequest.Method.POST; + +/** + * Transport action to resume pull-based ingestion. + * + * @opensearch.experimental + */ +@ExperimentalApi +public class RestResumeIngestionAction extends BaseRestHandler { + + @Override + public List routes() { + return unmodifiableList(asList(new Route(POST, "/{index}/ingestion/_resume"))); + } + + @Override + public String getName() { + return "resume_ingestion_action"; + } + + @Override + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + ResumeIngestionRequest resumeIngestionRequest = new ResumeIngestionRequest( + Strings.splitStringByCommaToArray(request.param("index")) + ); + resumeIngestionRequest.clusterManagerNodeTimeout( + request.paramAsTime("cluster_manager_timeout", resumeIngestionRequest.clusterManagerNodeTimeout()) + ); + resumeIngestionRequest.timeout(request.paramAsTime("timeout", resumeIngestionRequest.timeout())); + resumeIngestionRequest.indicesOptions(IndicesOptions.fromRequest(request, resumeIngestionRequest.indicesOptions())); + + return channel -> client.admin().indices().resumeIngestion(resumeIngestionRequest, new RestToXContentListener<>(channel)); + } + +} diff --git a/server/src/main/java/org/opensearch/transport/client/IndicesAdminClient.java b/server/src/main/java/org/opensearch/transport/client/IndicesAdminClient.java index 122e1be889195..2beec71785d91 100644 --- a/server/src/main/java/org/opensearch/transport/client/IndicesAdminClient.java +++ b/server/src/main/java/org/opensearch/transport/client/IndicesAdminClient.java @@ -109,6 +109,12 @@ import org.opensearch.action.admin.indices.stats.IndicesStatsRequest; import org.opensearch.action.admin.indices.stats.IndicesStatsRequestBuilder; import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; +import org.opensearch.action.admin.indices.streamingingestion.pause.PauseIngestionRequest; +import org.opensearch.action.admin.indices.streamingingestion.pause.PauseIngestionResponse; +import org.opensearch.action.admin.indices.streamingingestion.resume.ResumeIngestionRequest; +import org.opensearch.action.admin.indices.streamingingestion.resume.ResumeIngestionResponse; +import org.opensearch.action.admin.indices.streamingingestion.state.GetIngestionStateRequest; +import org.opensearch.action.admin.indices.streamingingestion.state.GetIngestionStateResponse; import org.opensearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; import org.opensearch.action.admin.indices.template.delete.DeleteIndexTemplateRequestBuilder; import org.opensearch.action.admin.indices.template.get.GetIndexTemplatesRequest; @@ -865,4 +871,22 @@ public interface IndicesAdminClient extends OpenSearchClient { /** Update a view */ ActionFuture updateView(CreateViewAction.Request request); + + /** Pause ingestion */ + ActionFuture pauseIngestion(PauseIngestionRequest request); + + /** Pause ingestion */ + void pauseIngestion(PauseIngestionRequest request, ActionListener listener); + + /** Resume ingestion */ + ActionFuture resumeIngestion(ResumeIngestionRequest request); + + /** Resume ingestion */ + void resumeIngestion(ResumeIngestionRequest request, ActionListener listener); + + /** Get ingestion state */ + ActionFuture getIngestionState(GetIngestionStateRequest request); + + /** Get ingestion state */ + void getIngestionState(GetIngestionStateRequest request, ActionListener listener); } diff --git a/server/src/main/java/org/opensearch/transport/client/Requests.java b/server/src/main/java/org/opensearch/transport/client/Requests.java index 123f803b1f5ed..b2a7384d20b9c 100644 --- a/server/src/main/java/org/opensearch/transport/client/Requests.java +++ b/server/src/main/java/org/opensearch/transport/client/Requests.java @@ -74,11 +74,15 @@ import org.opensearch.action.admin.indices.segments.IndicesSegmentsRequest; import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.opensearch.action.admin.indices.shards.IndicesShardStoresRequest; +import org.opensearch.action.admin.indices.streamingingestion.pause.PauseIngestionRequest; +import org.opensearch.action.admin.indices.streamingingestion.resume.ResumeIngestionRequest; +import org.opensearch.action.admin.indices.streamingingestion.state.GetIngestionStateRequest; import org.opensearch.action.admin.indices.upgrade.post.UpgradeRequest; import org.opensearch.action.bulk.BulkRequest; import org.opensearch.action.delete.DeleteRequest; import org.opensearch.action.get.GetRequest; import org.opensearch.action.index.IndexRequest; +import org.opensearch.action.pagination.PageParams; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchScrollRequest; import org.opensearch.common.xcontent.XContentType; @@ -608,4 +612,35 @@ public static GetDecommissionStateRequest getDecommissionStateRequest() { public static DeleteDecommissionStateRequest deleteDecommissionStateRequest() { return new DeleteDecommissionStateRequest(); } + + /** + * Creates a pause ingestion request given list of index names. + */ + public static PauseIngestionRequest pauseIngestionRequest(String... index) { + return new PauseIngestionRequest(index); + } + + /** + * Creates a resume ingestion request given list of index names. + */ + public static ResumeIngestionRequest resumeIngestionRequest(String... index) { + return new ResumeIngestionRequest(index); + } + + /** + * Creates a get ingestion state request given an index. + */ + public static GetIngestionStateRequest getIngestionStateRequest(String index) { + return new GetIngestionStateRequest(new String[] { index }); + } + + /** + * Creates a get ingestion state request given list of indices. + */ + public static GetIngestionStateRequest getIngestionStateRequest(String[] indices, int[] shards, PageParams pageParams) { + GetIngestionStateRequest request = new GetIngestionStateRequest(indices); + request.setShards(shards); + request.setPageParams(pageParams); + return request; + } } diff --git a/server/src/main/java/org/opensearch/transport/client/support/AbstractClient.java b/server/src/main/java/org/opensearch/transport/client/support/AbstractClient.java index a84e11ef86bcd..317613fd2a86e 100644 --- a/server/src/main/java/org/opensearch/transport/client/support/AbstractClient.java +++ b/server/src/main/java/org/opensearch/transport/client/support/AbstractClient.java @@ -293,6 +293,15 @@ import org.opensearch.action.admin.indices.stats.IndicesStatsRequest; import org.opensearch.action.admin.indices.stats.IndicesStatsRequestBuilder; import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; +import org.opensearch.action.admin.indices.streamingingestion.pause.PauseIngestionAction; +import org.opensearch.action.admin.indices.streamingingestion.pause.PauseIngestionRequest; +import org.opensearch.action.admin.indices.streamingingestion.pause.PauseIngestionResponse; +import org.opensearch.action.admin.indices.streamingingestion.resume.ResumeIngestionAction; +import org.opensearch.action.admin.indices.streamingingestion.resume.ResumeIngestionRequest; +import org.opensearch.action.admin.indices.streamingingestion.resume.ResumeIngestionResponse; +import org.opensearch.action.admin.indices.streamingingestion.state.GetIngestionStateAction; +import org.opensearch.action.admin.indices.streamingingestion.state.GetIngestionStateRequest; +import org.opensearch.action.admin.indices.streamingingestion.state.GetIngestionStateResponse; import org.opensearch.action.admin.indices.template.delete.DeleteIndexTemplateAction; import org.opensearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; import org.opensearch.action.admin.indices.template.delete.DeleteIndexTemplateRequestBuilder; @@ -2145,6 +2154,42 @@ public void updateView(CreateViewAction.Request request, ActionListener updateView(CreateViewAction.Request request) { return execute(UpdateViewAction.INSTANCE, request); } + + /** Pause ingestion */ + @Override + public ActionFuture pauseIngestion(final PauseIngestionRequest request) { + return execute(PauseIngestionAction.INSTANCE, request); + } + + /** Pause ingestion */ + @Override + public void pauseIngestion(final PauseIngestionRequest request, final ActionListener listener) { + execute(PauseIngestionAction.INSTANCE, request, listener); + } + + /** Resume ingestion */ + @Override + public ActionFuture resumeIngestion(final ResumeIngestionRequest request) { + return execute(ResumeIngestionAction.INSTANCE, request); + } + + /** Resume ingestion */ + @Override + public void resumeIngestion(final ResumeIngestionRequest request, final ActionListener listener) { + execute(ResumeIngestionAction.INSTANCE, request, listener); + } + + /** Get ingestion state */ + @Override + public ActionFuture getIngestionState(final GetIngestionStateRequest request) { + return execute(GetIngestionStateAction.INSTANCE, request); + } + + /** Get ingestion state */ + @Override + public void getIngestionState(final GetIngestionStateRequest request, final ActionListener listener) { + execute(GetIngestionStateAction.INSTANCE, request, listener); + } } @Override diff --git a/server/src/test/java/org/opensearch/action/admin/indices/streamingingestion/IngestionUpdateStateResponseTests.java b/server/src/test/java/org/opensearch/action/admin/indices/streamingingestion/IngestionUpdateStateResponseTests.java new file mode 100644 index 0000000000000..dac321e681065 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/streamingingestion/IngestionUpdateStateResponseTests.java @@ -0,0 +1,57 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.streamingingestion; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +public class IngestionUpdateStateResponseTests extends OpenSearchTestCase { + + public void testSerialization() throws IOException { + IngestionStateShardFailure[] shardFailures = new IngestionStateShardFailure[] { + new IngestionStateShardFailure("index1", 0, "test failure") }; + IngestionUpdateStateResponse response = new IngestionUpdateStateResponse(true, true, shardFailures, "test error"); + + try (BytesStreamOutput out = new BytesStreamOutput()) { + response.writeTo(out); + + try (StreamInput in = out.bytes().streamInput()) { + IngestionUpdateStateResponse deserializedResponse = new IngestionUpdateStateResponse(in); + assertTrue(deserializedResponse.isAcknowledged()); + assertTrue(deserializedResponse.isShardsAcknowledged()); + assertNotNull(deserializedResponse.getShardFailures()); + assertEquals(1, deserializedResponse.getShardFailures().length); + assertEquals("index1", deserializedResponse.getShardFailures()[0].index()); + assertEquals(0, deserializedResponse.getShardFailures()[0].shard()); + assertEquals("test error", deserializedResponse.getErrorMessage()); + } + } + } + + public void testShardFailureGrouping() { + IngestionStateShardFailure[] shardFailures = new IngestionStateShardFailure[] { + new IngestionStateShardFailure("index1", 0, "failure 1"), + new IngestionStateShardFailure("index1", 1, "failure 2"), + new IngestionStateShardFailure("index2", 0, "failure 3") }; + IngestionUpdateStateResponse response = new IngestionUpdateStateResponse(true, true, shardFailures, "test error"); + + Map> groupedFailures = IngestionStateShardFailure.groupShardFailuresByIndex(shardFailures); + assertEquals(2, groupedFailures.size()); + assertEquals(2, groupedFailures.get("index1").size()); + assertEquals(1, groupedFailures.get("index2").size()); + assertEquals(0, groupedFailures.get("index1").get(0).shard()); + assertEquals(1, groupedFailures.get("index1").get(1).shard()); + assertEquals(0, groupedFailures.get("index2").get(0).shard()); + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/indices/streamingingestion/pause/PauseIngestionRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/streamingingestion/pause/PauseIngestionRequestTests.java new file mode 100644 index 0000000000000..d1dbb3b442ff9 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/streamingingestion/pause/PauseIngestionRequestTests.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.streamingingestion.pause; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.support.IndicesOptions; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class PauseIngestionRequestTests extends OpenSearchTestCase { + + public void testSerialization() throws IOException { + String[] indices = new String[] { "index1", "index2" }; + PauseIngestionRequest request = new PauseIngestionRequest(indices); + request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); + + try (BytesStreamOutput out = new BytesStreamOutput()) { + request.writeTo(out); + + try (StreamInput in = out.bytes().streamInput()) { + PauseIngestionRequest deserializedRequest = new PauseIngestionRequest(in); + assertArrayEquals(request.indices(), deserializedRequest.indices()); + assertEquals(request.indicesOptions(), deserializedRequest.indicesOptions()); + } + } + } + + public void testValidation() { + // Test with valid indices + PauseIngestionRequest request = new PauseIngestionRequest(new String[] { "index1", "index2" }); + assertNull(request.validate()); + + // Test with empty indices + PauseIngestionRequest request2 = new PauseIngestionRequest(new String[0]); + ActionRequestValidationException e = request2.validate(); + assertNotNull(e); + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/indices/streamingingestion/resume/ResumeIngestionRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/streamingingestion/resume/ResumeIngestionRequestTests.java new file mode 100644 index 0000000000000..9ce19ab1c10b8 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/streamingingestion/resume/ResumeIngestionRequestTests.java @@ -0,0 +1,62 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.streamingingestion.resume; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.support.IndicesOptions; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class ResumeIngestionRequestTests extends OpenSearchTestCase { + + public void testSerialization() throws IOException { + String[] indices = new String[] { "index1", "index2" }; + ResumeIngestionRequest request = new ResumeIngestionRequest(indices); + request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); + + try (BytesStreamOutput out = new BytesStreamOutput()) { + request.writeTo(out); + + try (StreamInput in = out.bytes().streamInput()) { + ResumeIngestionRequest deserializedRequest = new ResumeIngestionRequest(in); + assertArrayEquals(request.indices(), deserializedRequest.indices()); + assertEquals(request.indicesOptions(), deserializedRequest.indicesOptions()); + } + } + } + + public void testValidation() { + // Test with valid indices + ResumeIngestionRequest request1 = new ResumeIngestionRequest(new String[] { "index1", "index2" }); + assertNull(request1.validate()); + + // Test with empty indices + ResumeIngestionRequest request2 = new ResumeIngestionRequest(new String[0]); + ActionRequestValidationException e = request2.validate(); + assertNotNull(e); + } + + public void testResetSettingsSerialization() throws IOException { + ResumeIngestionRequest.ResetSettings settings = new ResumeIngestionRequest.ResetSettings(1, "mode", "value"); + + try (BytesStreamOutput out = new BytesStreamOutput()) { + settings.writeTo(out); + + try (StreamInput in = out.bytes().streamInput()) { + ResumeIngestionRequest.ResetSettings deserialized = new ResumeIngestionRequest.ResetSettings(in); + assertEquals(settings.getShard(), deserialized.getShard()); + assertEquals(settings.getMode(), deserialized.getMode()); + assertEquals(settings.getValue(), deserialized.getValue()); + } + } + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/indices/streamingingestion/state/GetIngestionStateRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/streamingingestion/state/GetIngestionStateRequestTests.java new file mode 100644 index 0000000000000..9e0c70c143a7b --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/streamingingestion/state/GetIngestionStateRequestTests.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.streamingingestion.state; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class GetIngestionStateRequestTests extends OpenSearchTestCase { + + public void testSerialization() throws IOException { + String[] indices = new String[] { "index1", "index2" }; + int[] shards = new int[] { 0, 1, 2 }; + GetIngestionStateRequest request = new GetIngestionStateRequest(indices); + request.setShards(shards); + + try (BytesStreamOutput out = new BytesStreamOutput()) { + request.writeTo(out); + + try (StreamInput in = out.bytes().streamInput()) { + GetIngestionStateRequest deserializedRequest = new GetIngestionStateRequest(in); + assertArrayEquals(request.indices(), deserializedRequest.indices()); + assertArrayEquals(request.getShards(), deserializedRequest.getShards()); + } + } + } + + public void testValidation() { + // Test with valid indices + GetIngestionStateRequest request1 = new GetIngestionStateRequest(new String[] { "index1", "index2" }); + assertNull(request1.validate()); + + // Test with null indices + GetIngestionStateRequest request2 = new GetIngestionStateRequest((String[]) null); + ActionRequestValidationException e = request2.validate(); + assertNotNull(e); + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/indices/streamingingestion/state/GetIngestionStateResponseTests.java b/server/src/test/java/org/opensearch/action/admin/indices/streamingingestion/state/GetIngestionStateResponseTests.java new file mode 100644 index 0000000000000..1bef9ed371c18 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/streamingingestion/state/GetIngestionStateResponseTests.java @@ -0,0 +1,39 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.streamingingestion.state; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.Collections; + +public class GetIngestionStateResponseTests extends OpenSearchTestCase { + + public void testSerialization() throws IOException { + ShardIngestionState[] shardStates = new ShardIngestionState[] { + new ShardIngestionState("index1", 0, "POLLING", "DROP", false), + new ShardIngestionState("index1", 1, "PAUSED", "BLOCK", true) }; + GetIngestionStateResponse response = new GetIngestionStateResponse(shardStates, 2, 2, 0, null, Collections.emptyList()); + + try (BytesStreamOutput out = new BytesStreamOutput()) { + response.writeTo(out); + + try (StreamInput in = out.bytes().streamInput()) { + GetIngestionStateResponse deserializedResponse = new GetIngestionStateResponse(in); + assertEquals(response.getShardStates()[0].shardId(), deserializedResponse.getShardStates()[0].shardId()); + assertEquals(response.getShardStates()[1].shardId(), deserializedResponse.getShardStates()[1].shardId()); + assertEquals(response.getTotalShards(), deserializedResponse.getTotalShards()); + assertEquals(response.getSuccessfulShards(), deserializedResponse.getSuccessfulShards()); + assertEquals(response.getFailedShards(), deserializedResponse.getFailedShards()); + } + } + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/indices/streamingingestion/state/ShardIngestionStateTests.java b/server/src/test/java/org/opensearch/action/admin/indices/streamingingestion/state/ShardIngestionStateTests.java new file mode 100644 index 0000000000000..b0aff39aef8c1 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/streamingingestion/state/ShardIngestionStateTests.java @@ -0,0 +1,74 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.streamingingestion.state; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +public class ShardIngestionStateTests extends OpenSearchTestCase { + + public void testSerialization() throws IOException { + ShardIngestionState state = new ShardIngestionState("index1", 0, "POLLING", "DROP", false); + + try (BytesStreamOutput out = new BytesStreamOutput()) { + state.writeTo(out); + + try (StreamInput in = out.bytes().streamInput()) { + ShardIngestionState deserializedState = new ShardIngestionState(in); + assertEquals(state.index(), deserializedState.index()); + assertEquals(state.shardId(), deserializedState.shardId()); + assertEquals(state.pollerState(), deserializedState.pollerState()); + assertEquals(state.isPollerPaused(), deserializedState.isPollerPaused()); + } + } + } + + public void testSerializationWithNullValues() throws IOException { + ShardIngestionState state = new ShardIngestionState("index1", 0, null, null, false); + + try (BytesStreamOutput out = new BytesStreamOutput()) { + state.writeTo(out); + + try (StreamInput in = out.bytes().streamInput()) { + ShardIngestionState deserializedState = new ShardIngestionState(in); + assertEquals(state.index(), deserializedState.index()); + assertEquals(state.shardId(), deserializedState.shardId()); + assertNull(deserializedState.pollerState()); + assertEquals(state.isPollerPaused(), deserializedState.isPollerPaused()); + } + } + } + + public void testGroupShardStateByIndex() { + ShardIngestionState[] states = new ShardIngestionState[] { + new ShardIngestionState("index1", 0, "POLLING", "DROP", true), + new ShardIngestionState("index1", 1, "PAUSED", "DROP", false), + new ShardIngestionState("index2", 0, "POLLING", "DROP", true) }; + + Map> groupedStates = ShardIngestionState.groupShardStateByIndex(states); + + assertEquals(2, groupedStates.size()); + assertEquals(2, groupedStates.get("index1").size()); + assertEquals(1, groupedStates.get("index2").size()); + + // Verify index1 shards + List indexStates1 = groupedStates.get("index1"); + assertEquals(0, indexStates1.get(0).shardId()); + assertEquals(1, indexStates1.get(1).shardId()); + + // Verify index2 shards + List indexStates2 = groupedStates.get("index2"); + assertEquals(0, indexStates2.get(0).shardId()); + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/indices/streamingingestion/state/TransportGetIngestionStateActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/streamingingestion/state/TransportGetIngestionStateActionTests.java new file mode 100644 index 0000000000000..ad1cf1aea6532 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/streamingingestion/state/TransportGetIngestionStateActionTests.java @@ -0,0 +1,182 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.streamingingestion.state; + +import org.apache.lucene.store.AlreadyClosedException; +import org.opensearch.Version; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardsIterator; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.IndexService; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.ShardNotFoundException; +import org.opensearch.indices.IndicesService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.transport.MockTransportService; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; +import org.opensearch.transport.client.node.NodeClient; + +import java.util.Collections; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class TransportGetIngestionStateActionTests extends OpenSearchTestCase { + + private TestThreadPool threadPool; + private ClusterService clusterService; + private TransportService transportService; + private IndicesService indicesService; + private ActionFilters actionFilters; + private IndexNameExpressionResolver indexNameExpressionResolver; + private TransportGetIngestionStateAction action; + private NodeClient client; + + @Override + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool(getClass().getName()); + clusterService = mock(ClusterService.class); + transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, NoopTracer.INSTANCE); + indicesService = mock(IndicesService.class); + actionFilters = mock(ActionFilters.class); + indexNameExpressionResolver = mock(IndexNameExpressionResolver.class); + client = mock(NodeClient.class); + + action = new TransportGetIngestionStateAction( + clusterService, + transportService, + indicesService, + actionFilters, + indexNameExpressionResolver, + client + ); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + ThreadPool.terminate(threadPool, 30, java.util.concurrent.TimeUnit.SECONDS); + } + + public void testShards() { + GetIngestionStateRequest request = new GetIngestionStateRequest(new String[] { "test-index" }); + request.setShards(new int[] { 0, 1 }); + ClusterState clusterState = mock(ClusterState.class); + ShardsIterator shardsIterator = mock(ShardsIterator.class); + when(clusterState.routingTable()).thenReturn(mock(org.opensearch.cluster.routing.RoutingTable.class)); + when(clusterState.routingTable().allShardsSatisfyingPredicate(any(), any())).thenReturn(shardsIterator); + + ShardsIterator result = action.shards(clusterState, request, new String[] { "test-index" }); + assertThat(result, equalTo(shardsIterator)); + } + + public void testCheckGlobalBlock() { + GetIngestionStateRequest request = new GetIngestionStateRequest(new String[] { "test-index" }); + ClusterState clusterState = mock(ClusterState.class); + when(clusterState.blocks()).thenReturn(mock(org.opensearch.cluster.block.ClusterBlocks.class)); + when(clusterState.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ)).thenReturn(null); + + ClusterBlockException result = action.checkGlobalBlock(clusterState, request); + assertThat(result, equalTo(null)); + } + + public void testCheckRequestBlock() { + GetIngestionStateRequest request = new GetIngestionStateRequest(new String[] { "test-index" }); + ClusterState clusterState = mock(ClusterState.class); + when(clusterState.blocks()).thenReturn(mock(org.opensearch.cluster.block.ClusterBlocks.class)); + when(clusterState.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, request.indices())).thenReturn(null); + + ClusterBlockException result = action.checkRequestBlock(clusterState, request, new String[] { "test-index" }); + assertThat(result, equalTo(null)); + } + + public void testShardOperation() { + GetIngestionStateRequest request = new GetIngestionStateRequest(new String[] { "test-index" }); + ShardRouting shardRouting = mock(ShardRouting.class); + IndexService indexService = mock(IndexService.class); + IndexShard indexShard = mock(IndexShard.class); + ShardIngestionState expectedState = new ShardIngestionState("test-index", 0, "POLLING", "DROP", true); + + when(shardRouting.shardId()).thenReturn(mock(ShardId.class)); + when(shardRouting.shardId().getIndex()).thenReturn(mock(Index.class)); + when(shardRouting.shardId().id()).thenReturn(0); + when(indicesService.indexServiceSafe(any())).thenReturn(indexService); + when(indexService.getShard(0)).thenReturn(indexShard); + when(indexShard.routingEntry()).thenReturn(mock(org.opensearch.cluster.routing.ShardRouting.class)); + when(indexShard.getIngestionState()).thenReturn(expectedState); + + ShardIngestionState result = action.shardOperation(request, shardRouting); + assertThat(result, equalTo(expectedState)); + } + + public void testShardOperationWithShardNotFoundException() { + GetIngestionStateRequest request = new GetIngestionStateRequest(new String[] { "test-index" }); + ShardRouting shardRouting = mock(ShardRouting.class); + IndexService indexService = mock(IndexService.class); + IndexShard indexShard = mock(IndexShard.class); + + when(shardRouting.shardId()).thenReturn(mock(ShardId.class)); + when(shardRouting.shardId().getIndex()).thenReturn(mock(Index.class)); + when(shardRouting.shardId().id()).thenReturn(0); + when(indicesService.indexServiceSafe(any())).thenReturn(indexService); + when(indexService.getShard(0)).thenReturn(indexShard); + when(indexShard.routingEntry()).thenReturn(null); + + expectThrows(ShardNotFoundException.class, () -> action.shardOperation(request, shardRouting)); + } + + public void testShardOperationWithAlreadyClosedException() { + GetIngestionStateRequest request = new GetIngestionStateRequest(new String[] { "test-index" }); + ShardRouting shardRouting = mock(ShardRouting.class); + IndexService indexService = mock(IndexService.class); + IndexShard indexShard = mock(IndexShard.class); + + when(shardRouting.shardId()).thenReturn(mock(ShardId.class)); + when(shardRouting.shardId().getIndex()).thenReturn(mock(Index.class)); + when(shardRouting.shardId().id()).thenReturn(0); + when(indicesService.indexServiceSafe(any())).thenReturn(indexService); + when(indexService.getShard(0)).thenReturn(indexShard); + when(indexShard.routingEntry()).thenReturn(mock(org.opensearch.cluster.routing.ShardRouting.class)); + when(indexShard.getIngestionState()).thenThrow(new AlreadyClosedException("shard is closed")); + + expectThrows(ShardNotFoundException.class, () -> action.shardOperation(request, shardRouting)); + } + + public void testNewResponse() { + GetIngestionStateRequest request = new GetIngestionStateRequest(new String[] { "test-index" }); + List responses = Collections.singletonList(new ShardIngestionState("test-index", 0, "POLLING", "DROP", true)); + List shardFailures = Collections.emptyList(); + ClusterState clusterState = mock(ClusterState.class); + + GetIngestionStateResponse response = action.newResponse(request, 1, 1, 0, responses, shardFailures, clusterState); + + assertThat(response.getTotalShards(), equalTo(1)); + assertThat(response.getSuccessfulShards(), equalTo(1)); + assertThat(response.getFailedShards(), equalTo(0)); + assertThat(response.getShardStates().length, equalTo(1)); + assertThat(response.getShardStates()[0].index(), equalTo("test-index")); + assertThat(response.getShardStates()[0].shardId(), equalTo(0)); + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/indices/streamingingestion/state/TransportUpdateIngestionStateActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/streamingingestion/state/TransportUpdateIngestionStateActionTests.java new file mode 100644 index 0000000000000..2307a71be7eda --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/streamingingestion/state/TransportUpdateIngestionStateActionTests.java @@ -0,0 +1,175 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.streamingingestion.state; + +import org.apache.lucene.store.AlreadyClosedException; +import org.opensearch.Version; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardsIterator; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.IndexService; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.ShardNotFoundException; +import org.opensearch.indices.IndicesService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.transport.MockTransportService; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.util.Collections; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class TransportUpdateIngestionStateActionTests extends OpenSearchTestCase { + + private TestThreadPool threadPool; + private ClusterService clusterService; + private TransportService transportService; + private IndicesService indicesService; + private ActionFilters actionFilters; + private IndexNameExpressionResolver indexNameExpressionResolver; + private TransportUpdateIngestionStateAction action; + + @Override + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool(getClass().getName()); + clusterService = mock(ClusterService.class); + transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, NoopTracer.INSTANCE); + indicesService = mock(IndicesService.class); + actionFilters = mock(ActionFilters.class); + indexNameExpressionResolver = mock(IndexNameExpressionResolver.class); + action = new TransportUpdateIngestionStateAction( + clusterService, + transportService, + indicesService, + actionFilters, + indexNameExpressionResolver + ); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + ThreadPool.terminate(threadPool, 30, java.util.concurrent.TimeUnit.SECONDS); + } + + public void testShards() { + UpdateIngestionStateRequest request = new UpdateIngestionStateRequest(new String[] { "test-index" }, new int[] { 0, 1 }); + ClusterState clusterState = mock(ClusterState.class); + ShardsIterator shardsIterator = mock(ShardsIterator.class); + when(clusterState.routingTable()).thenReturn(mock(org.opensearch.cluster.routing.RoutingTable.class)); + when(clusterState.routingTable().allShardsSatisfyingPredicate(any(), any())).thenReturn(shardsIterator); + + ShardsIterator result = action.shards(clusterState, request, new String[] { "test-index" }); + assertThat(result, equalTo(shardsIterator)); + } + + public void testCheckGlobalBlock() { + UpdateIngestionStateRequest request = new UpdateIngestionStateRequest(new String[] { "test-index" }, new int[] {}); + ClusterState clusterState = mock(ClusterState.class); + when(clusterState.blocks()).thenReturn(mock(org.opensearch.cluster.block.ClusterBlocks.class)); + when(clusterState.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE)).thenReturn(null); + + ClusterBlockException result = action.checkGlobalBlock(clusterState, request); + assertThat(result, equalTo(null)); + } + + public void testCheckRequestBlock() { + UpdateIngestionStateRequest request = new UpdateIngestionStateRequest(new String[] { "test-index" }, new int[] {}); + ClusterState clusterState = mock(ClusterState.class); + when(clusterState.blocks()).thenReturn(mock(org.opensearch.cluster.block.ClusterBlocks.class)); + when(clusterState.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, request.indices())).thenReturn(null); + + ClusterBlockException result = action.checkRequestBlock(clusterState, request, new String[] { "test-index" }); + assertThat(result, equalTo(null)); + } + + public void testShardOperation() { + UpdateIngestionStateRequest request = new UpdateIngestionStateRequest(new String[] { "test-index" }, new int[] { 0 }); + request.setIngestionPaused(true); + ShardRouting shardRouting = mock(ShardRouting.class); + IndexService indexService = mock(IndexService.class); + IndexShard indexShard = mock(IndexShard.class); + ShardIngestionState expectedState = new ShardIngestionState("test-index", 0, "PAUSED", "DROP", true); + + when(shardRouting.shardId()).thenReturn(mock(ShardId.class)); + when(shardRouting.shardId().getIndex()).thenReturn(mock(Index.class)); + when(shardRouting.shardId().id()).thenReturn(0); + when(indicesService.indexServiceSafe(any())).thenReturn(indexService); + when(indexService.getShard(0)).thenReturn(indexShard); + when(indexShard.routingEntry()).thenReturn(mock(org.opensearch.cluster.routing.ShardRouting.class)); + when(indexShard.getIngestionState()).thenReturn(expectedState); + + ShardIngestionState result = action.shardOperation(request, shardRouting); + assertThat(result, equalTo(expectedState)); + } + + public void testShardOperationWithShardNotFoundException() { + UpdateIngestionStateRequest request = new UpdateIngestionStateRequest(new String[] { "test-index" }, new int[] { 0 }); + ShardRouting shardRouting = mock(ShardRouting.class); + IndexService indexService = mock(IndexService.class); + IndexShard indexShard = mock(IndexShard.class); + + when(shardRouting.shardId()).thenReturn(mock(ShardId.class)); + when(shardRouting.shardId().getIndex()).thenReturn(mock(Index.class)); + when(shardRouting.shardId().id()).thenReturn(0); + when(indicesService.indexServiceSafe(any())).thenReturn(indexService); + when(indexService.getShard(0)).thenReturn(indexShard); + when(indexShard.routingEntry()).thenReturn(null); + + expectThrows(ShardNotFoundException.class, () -> action.shardOperation(request, shardRouting)); + } + + public void testShardOperationWithAlreadyClosedException() { + UpdateIngestionStateRequest request = new UpdateIngestionStateRequest(new String[] { "test-index" }, new int[] { 0 }); + ShardRouting shardRouting = mock(ShardRouting.class); + IndexService indexService = mock(IndexService.class); + IndexShard indexShard = mock(IndexShard.class); + + when(shardRouting.shardId()).thenReturn(mock(ShardId.class)); + when(shardRouting.shardId().getIndex()).thenReturn(mock(Index.class)); + when(shardRouting.shardId().id()).thenReturn(0); + when(indicesService.indexServiceSafe(any())).thenReturn(indexService); + when(indexService.getShard(0)).thenReturn(indexShard); + when(indexShard.routingEntry()).thenReturn(mock(org.opensearch.cluster.routing.ShardRouting.class)); + when(indexShard.getIngestionState()).thenThrow(new AlreadyClosedException("shard is closed")); + + expectThrows(ShardNotFoundException.class, () -> action.shardOperation(request, shardRouting)); + } + + public void testNewResponse() { + UpdateIngestionStateRequest request = new UpdateIngestionStateRequest(new String[] { "test-index" }, new int[] { 0 }); + List responses = Collections.singletonList(new ShardIngestionState("test-index", 0, "PAUSED", "DROP", true)); + List shardFailures = Collections.emptyList(); + ClusterState clusterState = mock(ClusterState.class); + + UpdateIngestionStateResponse response = action.newResponse(request, 1, 1, 0, responses, shardFailures, clusterState); + + assertThat(response.isAcknowledged(), equalTo(true)); + assertThat(response.getTotalShards(), equalTo(1)); + assertThat(response.getSuccessfulShards(), equalTo(1)); + assertThat(response.getFailedShards(), equalTo(0)); + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/indices/streamingingestion/state/UpdateIngestionStateRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/streamingingestion/state/UpdateIngestionStateRequestTests.java new file mode 100644 index 0000000000000..5903881fb3098 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/streamingingestion/state/UpdateIngestionStateRequestTests.java @@ -0,0 +1,58 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.streamingingestion.state; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class UpdateIngestionStateRequestTests extends OpenSearchTestCase { + + public void testConstructor() { + String[] indices = new String[] { "index1", "index2" }; + int[] shards = new int[] { 0, 1, 2 }; + UpdateIngestionStateRequest request = new UpdateIngestionStateRequest(indices, shards); + assertArrayEquals(indices, request.getIndex()); + assertArrayEquals(shards, request.getShards()); + assertNull(request.getIngestionPaused()); + } + + public void testSerialization() throws IOException { + String[] indices = new String[] { "index1", "index2" }; + int[] shards = new int[] { 0, 1, 2 }; + UpdateIngestionStateRequest request = new UpdateIngestionStateRequest(indices, shards); + request.setIngestionPaused(true); + + try (BytesStreamOutput out = new BytesStreamOutput()) { + request.writeTo(out); + + try (StreamInput in = out.bytes().streamInput()) { + UpdateIngestionStateRequest deserializedRequest = new UpdateIngestionStateRequest(in); + assertArrayEquals(request.getIndex(), deserializedRequest.getIndex()); + assertArrayEquals(request.getShards(), deserializedRequest.getShards()); + assertTrue(deserializedRequest.getIngestionPaused()); + } + } + } + + public void testValidation() { + // Test with null indices + UpdateIngestionStateRequest request = new UpdateIngestionStateRequest(null, new int[] {}); + ActionRequestValidationException validationException = request.validate(); + assertNotNull(validationException); + + // Test with valid indices + request = new UpdateIngestionStateRequest(new String[] { "index1" }, new int[] {}); + validationException = request.validate(); + assertNull(validationException); + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/indices/streamingingestion/state/UpdateIngestionStateResponseTests.java b/server/src/test/java/org/opensearch/action/admin/indices/streamingingestion/state/UpdateIngestionStateResponseTests.java new file mode 100644 index 0000000000000..bfc67589fe2f4 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/streamingingestion/state/UpdateIngestionStateResponseTests.java @@ -0,0 +1,46 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.streamingingestion.state; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; + +public class UpdateIngestionStateResponseTests extends OpenSearchTestCase { + + public void testSerialization() throws IOException { + List shardFailures = Collections.singletonList( + new DefaultShardOperationFailedException("index1", 0, new Exception("test failure")) + ); + UpdateIngestionStateResponse response = new UpdateIngestionStateResponse(true, 3, 2, 1, shardFailures); + response.setErrorMessage("test error"); + + try (BytesStreamOutput out = new BytesStreamOutput()) { + response.writeTo(out); + + try (StreamInput in = out.bytes().streamInput()) { + UpdateIngestionStateResponse deserializedResponse = new UpdateIngestionStateResponse(in); + assertTrue(deserializedResponse.isAcknowledged()); + assertEquals(3, deserializedResponse.getTotalShards()); + assertEquals(2, deserializedResponse.getSuccessfulShards()); + assertEquals(1, deserializedResponse.getFailedShards()); + assertNotNull(deserializedResponse.getShardFailureList()); + assertEquals(1, deserializedResponse.getShardFailureList().length); + assertEquals("index1", deserializedResponse.getShardFailureList()[0].index()); + assertEquals(0, deserializedResponse.getShardFailureList()[0].shard()); + assertEquals("test error", deserializedResponse.getErrorMessage()); + } + } + } +} diff --git a/server/src/test/java/org/opensearch/indices/pollingingest/DefaultStreamPollerTests.java b/server/src/test/java/org/opensearch/indices/pollingingest/DefaultStreamPollerTests.java index 4599e3e8f154c..6d71a3763fbc9 100644 --- a/server/src/test/java/org/opensearch/indices/pollingingest/DefaultStreamPollerTests.java +++ b/server/src/test/java/org/opensearch/indices/pollingingest/DefaultStreamPollerTests.java @@ -69,7 +69,8 @@ public void setUp() throws Exception { processorRunnable, StreamPoller.ResetState.NONE, "", - errorStrategy + errorStrategy, + StreamPoller.State.NONE ); } @@ -125,7 +126,8 @@ public void testSkipProcessed() throws InterruptedException { processorRunnable, StreamPoller.ResetState.NONE, "", - errorStrategy + errorStrategy, + StreamPoller.State.NONE ); CountDownLatch latch = new CountDownLatch(2); @@ -162,7 +164,8 @@ public void testResetStateEarliest() throws InterruptedException { processorRunnable, StreamPoller.ResetState.EARLIEST, "", - errorStrategy + errorStrategy, + StreamPoller.State.NONE ); CountDownLatch latch = new CountDownLatch(2); doAnswer(invocation -> { @@ -185,7 +188,8 @@ public void testResetStateLatest() throws InterruptedException { processorRunnable, StreamPoller.ResetState.LATEST, "", - errorStrategy + errorStrategy, + StreamPoller.State.NONE ); poller.start(); @@ -204,7 +208,8 @@ public void testResetStateRewindByOffset() throws InterruptedException { processorRunnable, StreamPoller.ResetState.REWIND_BY_OFFSET, "1", - errorStrategy + errorStrategy, + StreamPoller.State.NONE ); CountDownLatch latch = new CountDownLatch(1); doAnswer(invocation -> { @@ -276,7 +281,8 @@ public void testDropErrorIngestionStrategy() throws TimeoutException, Interrupte processorRunnable, StreamPoller.ResetState.NONE, "", - errorStrategy + errorStrategy, + StreamPoller.State.NONE ); poller.start(); Thread.sleep(sleepTime); @@ -321,7 +327,8 @@ public void testBlockErrorIngestionStrategy() throws TimeoutException, Interrupt processorRunnable, StreamPoller.ResetState.NONE, "", - errorStrategy + errorStrategy, + StreamPoller.State.NONE ); poller.start(); Thread.sleep(sleepTime); @@ -347,7 +354,8 @@ public void testProcessingErrorWithBlockErrorIngestionStrategy() throws TimeoutE processorRunnable, StreamPoller.ResetState.NONE, "", - mockErrorStrategy + mockErrorStrategy, + StreamPoller.State.NONE ); poller.start(); Thread.sleep(sleepTime); From 8182bb000d9179bf47e14e3f7ce0c885a1a19316 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 31 Mar 2025 14:55:51 -0500 Subject: [PATCH 123/550] Bump com.netflix.nebula.ospackage-base from 11.11.1 to 11.11.2 in /distribution/packages (#17734) * Bump com.netflix.nebula.ospackage-base in /distribution/packages Bumps com.netflix.nebula.ospackage-base from 11.11.1 to 11.11.2. --- updated-dependencies: - dependency-name: com.netflix.nebula.ospackage-base dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- CHANGELOG.md | 1 + distribution/packages/build.gradle | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f7d0cd93d7005..448e5361e3fc4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,6 +30,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.google.code.gson:gson` from 2.11.0 to 2.12.1 ([#17668](https://github.com/opensearch-project/OpenSearch/pull/17668)) - Bump `com.github.luben:zstd-jni` from 1.5.5-1 to 1.5.6-1 ([#17674](https://github.com/opensearch-project/OpenSearch/pull/17674)) - Bump `lycheeverse/lychee-action` from 2.3.0 to 2.4.0 ([#17731](https://github.com/opensearch-project/OpenSearch/pull/17731)) +- Bump `com.netflix.nebula.ospackage-base` from 11.11.1 to 11.11.2 ([#17734](https://github.com/opensearch-project/OpenSearch/pull/17734)) ### Changed diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index d3cecde24a35d..a980c4d415045 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -63,7 +63,7 @@ import java.util.regex.Pattern */ plugins { - id "com.netflix.nebula.ospackage-base" version "11.11.1" + id "com.netflix.nebula.ospackage-base" version "11.11.2" } void addProcessFilesTask(String type, boolean jdk) { From 0eabc79630f253a1f38b3be7d3cb55864571d483 Mon Sep 17 00:00:00 2001 From: Vinay Krishna Pudyodu Date: Mon, 31 Mar 2025 14:53:12 -0700 Subject: [PATCH 124/550] Support Allocation Awareness for Search replicas (#17652) * Added allocation awareness attribute support for search replica Signed-off-by: Vinay Krishna Pudyodu * Support allocation awareness balance for search replica Signed-off-by: Vinay Krishna Pudyodu * Updated awareness balance condition Signed-off-by: Vinay Krishna Pudyodu * Address PR comments Signed-off-by: Vinay Krishna Pudyodu * updated the if condition check Signed-off-by: Vinay Krishna Pudyodu * changes based on the PR comments, reducing diffs Signed-off-by: Vinay Krishna Pudyodu * reducing the diffs Signed-off-by: Vinay Krishna Pudyodu * removed unnecessary static keyword Signed-off-by: Vinay Krishna Pudyodu --------- Signed-off-by: Vinay Krishna Pudyodu --- .../allocation/AwarenessAllocationIT.java | 29 +- .../SearchReplicaAwarenessAllocationIT.java | 326 ++++++++++++++++++ .../metadata/MetadataCreateIndexService.java | 10 +- .../MetadataUpdateSettingsService.java | 13 +- .../cluster/routing/RoutingNodes.java | 23 +- .../allocation/AwarenessReplicaBalance.java | 10 + .../decider/AwarenessAllocationDecider.java | 34 +- .../allocation/AwarenessAllocationTests.java | 58 +++- .../AwarenessReplicaBalanceTests.java | 14 + ...SearchReplicaAwarenessAllocationTests.java | 267 ++++++++++++++ .../cluster/OpenSearchAllocationTestCase.java | 4 + 11 files changed, 768 insertions(+), 20 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/opensearch/cluster/allocation/SearchReplicaAwarenessAllocationIT.java create mode 100644 server/src/test/java/org/opensearch/cluster/routing/allocation/decider/SearchReplicaAwarenessAllocationTests.java diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/AwarenessAllocationIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/AwarenessAllocationIT.java index 522d63b22a0da..f947ca1ccbb85 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/AwarenessAllocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/AwarenessAllocationIT.java @@ -364,7 +364,6 @@ public void testAwarenessZonesIncrementalNodes() { assertThat(counts.get(noZoneNode), equalTo(2)); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/5908") public void testThreeZoneOneReplicaWithForceZoneValueAndLoadAwareness() throws Exception { int nodeCountPerAZ = 5; int numOfShards = 30; @@ -504,4 +503,32 @@ public void testThreeZoneOneReplicaWithForceZoneValueAndLoadAwareness() throws E assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(2 * numOfShards * (numOfReplica + 1))); assertThat(health.isTimedOut(), equalTo(false)); } + + public void testAwarenessBalanceWithForcedAwarenessCreateAndUpdateIndex() { + Settings settings = Settings.builder() + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.balance", "true") + .build(); + + logger.info("--> starting 3 nodes on zones a,b,c"); + internalCluster().startNodes( + Settings.builder().put(settings).put("node.attr.zone", "a").build(), + Settings.builder().put(settings).put("node.attr.zone", "b").build(), + Settings.builder().put(settings).put("node.attr.zone", "c").build() + ); + + // Create index with 2 replicas ie total 3 shards + createIndex( + "test-idx", + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2).build() + ); + + // Update the number of replicas to 4 + final Settings newsettings = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 4).build(); + + assertThrows(IllegalArgumentException.class, () -> { + assertAcked(client().admin().indices().prepareUpdateSettings("test-idx").setSettings(newsettings)); + }); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/SearchReplicaAwarenessAllocationIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/SearchReplicaAwarenessAllocationIT.java new file mode 100644 index 0000000000000..3e610df1887ed --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/SearchReplicaAwarenessAllocationIT.java @@ -0,0 +1,326 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.allocation; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.IndexShardRoutingTable; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.allocation.AwarenessReplicaBalance; +import org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; +import static org.opensearch.test.NodeRoles.searchOnlyNode; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.equalTo; + +@ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class SearchReplicaAwarenessAllocationIT extends RemoteStoreBaseIntegTestCase { + + private final Logger logger = LogManager.getLogger(SearchReplicaAwarenessAllocationIT.class); + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL, Boolean.TRUE).build(); + } + + public void testAllocationAwarenessZones() { + Settings commonSettings = Settings.builder() + .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "zone.values", "a,b") + .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(), "zone") + .build(); + + logger.info("--> starting 8 nodes on different zones"); + List nodes = internalCluster().startNodes( + Settings.builder().put(commonSettings).put("node.attr.zone", "a").build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "b").build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "a").put(searchOnlyNode()).build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "b").put(searchOnlyNode()).build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "b").put(searchOnlyNode()).build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "a").put(searchOnlyNode()).build() + ); + + logger.info("--> waiting for nodes to form a cluster"); + ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("6").execute().actionGet(); + assertThat(health.isTimedOut(), equalTo(false)); + + logger.info("--> create index"); + createIndex( + "test", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 3) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS, 2) + .put(SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build() + ); + + logger.info("--> waiting for shards to be allocated"); + ensureGreen("test"); + + ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState(); + final Map counts = new HashMap<>(); + + for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) { + for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) { + for (ShardRouting shardRouting : indexShardRoutingTable) { + counts.merge(clusterState.nodes().get(shardRouting.currentNodeId()).getName(), 1, Integer::sum); + } + } + } + + /* + * Ensures that shards are distributed across different zones in the cluster. + * Given two zones (a and b) with one data node in each, the shards are evenly distributed, + * resulting in each data node being assigned three shards. + */ + for (int i = 0; i < 2; i++) { + assertThat(counts.get(nodes.get(i)), equalTo(3)); + } + + /* + * There are two search nodes in each zone, totaling four search nodes. + * With six search shards to allocate, they are assigned using a best-effort spread, + * ensuring each search node receives either one or two shards. + */ + for (int i = 2; i < 6; i++) { + assertThat(counts.get(nodes.get(i)), anyOf(equalTo(1), equalTo(2))); + } + } + + public void testAwarenessZonesIncrementalNodes() { + Settings commonSettings = Settings.builder() + .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "zone.values", "a,b") + .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(), "zone") + .build(); + + logger.info("--> starting 2 nodes on zones 'a' & 'b'"); + List nodes = internalCluster().startNodes( + Settings.builder().put(commonSettings).put("node.attr.zone", "a").build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "b").build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "a").put(searchOnlyNode()).build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "b").put(searchOnlyNode()).build() + ); + + createIndex( + "test", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 3) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS, 2) + .build() + ); + + ensureGreen("test"); + + ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState(); + Map counts = new HashMap<>(); + + for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) { + for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) { + for (ShardRouting shardRouting : indexShardRoutingTable) { + counts.merge(clusterState.nodes().get(shardRouting.currentNodeId()).getName(), 1, Integer::sum); + } + } + } + + /* + * The cluster consists of two zones, each containing one data node and one search node. + * Replicas and search replicas are evenly distributed across these zones. + */ + for (int i = 0; i < 4; i++) { + assertThat(counts.get(nodes.get(i)), equalTo(3)); + } + + logger.info("--> starting another data and search node in zone 'b'"); + + String B_2 = internalCluster().startNode(Settings.builder().put(commonSettings).put("node.attr.zone", "b").build()); + String B_3 = internalCluster().startNode( + Settings.builder().put(commonSettings).put("node.attr.zone", "b").put(searchOnlyNode()).build() + ); + + ensureGreen("test"); + + client().admin().cluster().prepareReroute().get(); + + ensureGreen("test"); + + clusterState = client().admin().cluster().prepareState().execute().actionGet().getState(); + + counts = new HashMap<>(); + + for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) { + for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) { + for (ShardRouting shardRouting : indexShardRoutingTable) { + counts.merge(clusterState.nodes().get(shardRouting.currentNodeId()).getName(), 1, Integer::sum); + } + } + } + + /* + * Adding a new data node and a new search node in zone B results in: + * - Zone A: 1 data node, 1 search node + * - Zone B: 2 data nodes, 2 search nodes + * + * As a result, shards are rerouted to maintain a best-effort balanced allocation. + */ + assertThat(counts.get(nodes.get(0)), equalTo(3)); + assertThat(counts.get(nodes.get(1)), equalTo(2)); + assertThat(counts.get(nodes.get(2)), equalTo(3)); + assertThat(counts.get(nodes.get(3)), equalTo(2)); + assertThat(counts.get(B_2), equalTo(1)); + assertThat(counts.get(B_3), equalTo(1)); + + logger.info("--> starting another data node without any zone"); + + String noZoneNode = internalCluster().startNode(); + ensureGreen("test"); + client().admin().cluster().prepareReroute().get(); + ensureGreen("test"); + clusterState = client().admin().cluster().prepareState().execute().actionGet().getState(); + + counts = new HashMap<>(); + + for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) { + for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) { + for (ShardRouting shardRouting : indexShardRoutingTable) { + counts.merge(clusterState.nodes().get(shardRouting.currentNodeId()).getName(), 1, Integer::sum); + } + } + } + + logger.info("--> Ensure there was not rerouting"); + + /* + * Adding another node to the cluster without a zone attribute + * does not trigger shard reallocation; existing shard assignments remain unchanged. + */ + assertThat(counts.get(nodes.get(0)), equalTo(3)); + assertThat(counts.get(nodes.get(1)), equalTo(2)); + assertThat(counts.get(nodes.get(2)), equalTo(3)); + assertThat(counts.get(nodes.get(3)), equalTo(2)); + assertThat(counts.get(B_2), equalTo(1)); + assertThat(counts.get(B_3), equalTo(1)); + assertThat(counts.containsKey(noZoneNode), equalTo(false)); + + logger.info("--> Remove the awareness attribute setting"); + + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder() + .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(), "") + .build() + ) + .get(); + + ensureGreen("test"); + clusterState = client().admin().cluster().prepareState().execute().actionGet().getState(); + + counts = new HashMap<>(); + + for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) { + for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) { + for (ShardRouting shardRouting : indexShardRoutingTable) { + counts.merge(clusterState.nodes().get(shardRouting.currentNodeId()).getName(), 1, Integer::sum); + } + } + } + + /* + * Removing allocation awareness attributes from the cluster disables zone-based distribution. + * Shards are then assigned based solely the other deciders in the cluster manager. + */ + assertThat(counts.get(nodes.get(0)), equalTo(2)); + assertThat(counts.get(nodes.get(1)), equalTo(2)); + assertThat(counts.get(nodes.get(2)), equalTo(2)); + assertThat(counts.get(nodes.get(3)), equalTo(2)); + assertThat(counts.get(B_2), equalTo(1)); + assertThat(counts.get(B_3), equalTo(2)); + assertThat(counts.get(noZoneNode), equalTo(1)); + } + + public void testAwarenessBalanceWithForcedAwarenessCreateIndex() { + Settings settings = Settings.builder() + .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "zone.values", "a,b,c") + .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(), "zone") + .put(AwarenessReplicaBalance.CLUSTER_ROUTING_ALLOCATION_AWARENESS_BALANCE_SETTING.getKey(), "true") + .build(); + + logger.info("--> starting 3 nodes on zones a,b,c"); + internalCluster().startNodes( + Settings.builder().put(settings).put("node.attr.zone", "a").build(), + Settings.builder().put(settings).put("node.attr.zone", "b").build(), + Settings.builder().put(settings).put("node.attr.zone", "c").build() + ); + + // Create index with 2 replicas and 2 search replicas + assertThrows(IllegalArgumentException.class, () -> { + createIndex( + "test-idx", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2) + .put(IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS, 2) + .build() + ); + }); + } + + public void testAwarenessBalanceWithForcedAwarenessUpdateIndex() { + Settings settings = Settings.builder() + .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "zone.values", "a,b,c") + .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(), "zone") + .put(AwarenessReplicaBalance.CLUSTER_ROUTING_ALLOCATION_AWARENESS_BALANCE_SETTING.getKey(), "true") + .build(); + + logger.info("--> starting 3 nodes on zones a,b,c"); + internalCluster().startNodes( + Settings.builder().put(settings).put("node.attr.zone", "a").build(), + Settings.builder().put(settings).put("node.attr.zone", "b").build(), + Settings.builder().put(settings).put("node.attr.zone", "c").build() + ); + + // Create index with 2 replicas and 3 search replicas + createIndex( + "test-idx", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2) + .put(IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS, 3) + .build() + ); + + // Update the number of search replicas to 4 + assertThrows(IllegalArgumentException.class, () -> { + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings("test-idx") + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS, 4).build()) + ); + }); + } +} diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index 2bdd31b23aee3..3483c14df6272 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -1500,11 +1500,13 @@ List getIndexSettingsValidationErrors( IndexMetadata.SETTING_NUMBER_OF_REPLICAS, DEFAULT_REPLICA_COUNT_SETTING.get(this.clusterService.state().metadata().settings()) ); + int searchReplicaCount = settings.getAsInt(SETTING_NUMBER_OF_SEARCH_REPLICAS, 0); AutoExpandReplicas autoExpandReplica = AutoExpandReplicas.SETTING.get(settings); - Optional error = awarenessReplicaBalance.validate(replicaCount, autoExpandReplica); - if (error.isPresent()) { - validationErrors.add(error.get()); - } + + Optional replicaValidationError = awarenessReplicaBalance.validate(replicaCount, autoExpandReplica); + replicaValidationError.ifPresent(validationErrors::add); + Optional searchReplicaValidationError = awarenessReplicaBalance.validate(searchReplicaCount); + searchReplicaValidationError.ifPresent(validationErrors::add); } return validationErrors; } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java index 469bec7220721..fff704210ca7a 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java @@ -301,7 +301,18 @@ public ClusterState execute(ClusterState currentState) { } final int updatedNumberOfSearchReplicas = IndexMetadata.INDEX_NUMBER_OF_SEARCH_REPLICAS_SETTING.get(openSettings); if (preserveExisting == false) { - // TODO: Honor awareness validation to search replicas. + for (Index index : request.indices()) { + if (index.getName().charAt(0) != '.') { + // No replica count validation for system indices + Optional error = awarenessReplicaBalance.validate(updatedNumberOfSearchReplicas); + + if (error.isPresent()) { + ValidationException ex = new ValidationException(); + ex.addValidationError(error.get()); + throw ex; + } + } + } // Verify that this won't take us over the cluster shard limit. int totalNewShards = Arrays.stream(request.indices()) diff --git a/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java b/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java index 76111f623e0a5..6db70cc5f4fc5 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java @@ -59,7 +59,6 @@ import java.util.ListIterator; import java.util.Map; import java.util.NoSuchElementException; -import java.util.Objects; import java.util.Queue; import java.util.Set; import java.util.function.Function; @@ -103,6 +102,7 @@ public class RoutingNodes implements Iterable { private int relocatingShards = 0; private final Map> nodesPerAttributeNames; + private final Map> searchNodesPerAttributeNames; private final Map recoveriesPerNode = new HashMap<>(); private final Map initialReplicaRecoveries = new HashMap<>(); private final Map initialPrimaryRecoveries = new HashMap<>(); @@ -116,6 +116,7 @@ public RoutingNodes(ClusterState clusterState, boolean readOnly) { this.readOnly = readOnly; final RoutingTable routingTable = clusterState.routingTable(); this.nodesPerAttributeNames = Collections.synchronizedMap(new HashMap<>()); + this.searchNodesPerAttributeNames = Collections.synchronizedMap(new HashMap<>()); // fill in the nodeToShards with the "live" nodes for (final DiscoveryNode cursor : clusterState.nodes().getDataNodes().values()) { @@ -297,10 +298,28 @@ public Stream stream() { return nodesToShards.values().stream(); } + /** + * Retrieves all unique values for a specific awareness attribute across all nodes + * Eg: "zone" : ["zone1", "zone2", "zone3"] + * @param attributeName The name of the awareness attribute to collect values for + * @return A set of unique attribute values for the specified attribute + */ public Set nodesPerAttributesCounts(String attributeName) { + return nodesPerAttributesCounts(attributeName, routingNode -> true); + } + + /** + * Retrieves all unique values for a specific awareness attribute across filtered nodes + * Eg: "zone" : ["zone1", "zone2", "zone3"] + * @param attributeName The name of the awareness attribute to collect values for + * @param routingNodeFilter filters the routing nodes based on given condition + * @return A set of unique attribute values for the specified attribute + */ + public Set nodesPerAttributesCounts(String attributeName, Predicate routingNodeFilter) { + return nodesPerAttributeNames.computeIfAbsent( attributeName, - ignored -> stream().map(r -> r.node().getAttributes().get(attributeName)).filter(Objects::nonNull).collect(Collectors.toSet()) + ignored -> stream().filter(routingNodeFilter).map(r -> r.node().getAttributes().get(attributeName)).collect(Collectors.toSet()) ); } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalance.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalance.java index 6fc0e535ef4dc..d2cf30bd31983 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalance.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalance.java @@ -122,4 +122,14 @@ public Optional validate(int replicaCount, AutoExpandReplicas autoExpand return Optional.empty(); } + public Optional validate(int searchReplicaCount) { + // TODO: For now Search replicas do not support auto expand, when we add support update this validation + if (searchReplicaCount > 0 && searchReplicaCount % maxAwarenessAttributes() != 0) { + String errorMessage = "total search replicas needs to be a multiple of total awareness attributes [" + + maxAwarenessAttributes() + + "]"; + return Optional.of(errorMessage); + } + return Optional.empty(); + } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java index 16c94acfbb553..17b8aa1d3cbb5 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java @@ -48,6 +48,7 @@ import java.util.Map; import java.util.Set; import java.util.function.Function; +import java.util.stream.Collectors; import static java.util.Collections.emptyList; @@ -160,7 +161,10 @@ private Decision underCapacity(ShardRouting shardRouting, RoutingNode node, Rout } IndexMetadata indexMetadata = allocation.metadata().getIndexSafe(shardRouting.index()); - int shardCount = indexMetadata.getNumberOfReplicas() + 1; // 1 for primary + int shardCount = shardRouting.isSearchOnly() + ? indexMetadata.getNumberOfSearchOnlyReplicas() + : indexMetadata.getNumberOfReplicas() + 1; // 1 for primary + for (String awarenessAttribute : awarenessAttributes) { // the node the shard exists on must be associated with an awareness attribute. if (isAwarenessAttributeAssociatedWithNode(node, awarenessAttribute) == false) { @@ -175,18 +179,14 @@ private Decision underCapacity(ShardRouting shardRouting, RoutingNode node, Rout } int currentNodeCount = getCurrentNodeCountForAttribute(shardRouting, node, allocation, moveToNode, awarenessAttribute); + Set attributeValues = getAttributeValues(shardRouting, allocation, awarenessAttribute); + int numberOfAttributes = attributeValues.size(); - // build attr_value -> nodes map - Set nodesPerAttribute = allocation.routingNodes().nodesPerAttributesCounts(awarenessAttribute); - int numberOfAttributes = nodesPerAttribute.size(); List fullValues = forcedAwarenessAttributes.get(awarenessAttribute); - if (fullValues != null) { // If forced awareness is enabled, numberOfAttributes = count(distinct((union(discovered_attributes, forced_attributes))) Set attributesSet = new HashSet<>(fullValues); - for (String stringObjectCursor : nodesPerAttribute) { - attributesSet.add(stringObjectCursor); - } + attributesSet.addAll(attributeValues); numberOfAttributes = attributesSet.size(); } @@ -211,6 +211,11 @@ private Decision underCapacity(ShardRouting shardRouting, RoutingNode node, Rout return allocation.decision(Decision.YES, NAME, "node meets all awareness attribute requirements"); } + private Set getAttributeValues(ShardRouting shardRouting, RoutingAllocation allocation, String awarenessAttribute) { + return allocation.routingNodes() + .nodesPerAttributesCounts(awarenessAttribute, routingNode -> routingNode.node().isSearchNode() == shardRouting.isSearchOnly()); + } + private int getCurrentNodeCountForAttribute( ShardRouting shardRouting, RoutingNode node, @@ -220,9 +225,9 @@ private int getCurrentNodeCountForAttribute( ) { // build the count of shards per attribute value final String shardAttributeForNode = getAttributeValueForNode(node, awarenessAttribute); + // Get all assigned shards of the same type + List assignedShards = getAssignedShards(allocation, shardRouting); int currentNodeCount = 0; - final List assignedShards = allocation.routingNodes().assignedShards(shardRouting.shardId()); - for (ShardRouting assignedShard : assignedShards) { if (assignedShard.started() || assignedShard.initializing()) { // Note: this also counts relocation targets as that will be the new location of the shard. @@ -255,6 +260,14 @@ private int getCurrentNodeCountForAttribute( return currentNodeCount; } + private List getAssignedShards(RoutingAllocation allocation, ShardRouting shardRouting) { + return allocation.routingNodes() + .assignedShards(shardRouting.shardId()) + .stream() + .filter(s -> s.isSearchOnly() == shardRouting.isSearchOnly()) + .collect(Collectors.toList()); + } + private boolean isAwarenessAttributeAssociatedWithNode(RoutingNode node, String awarenessAttribute) { return node.node().getAttributes().containsKey(awarenessAttribute); } @@ -262,5 +275,4 @@ private boolean isAwarenessAttributeAssociatedWithNode(RoutingNode node, String private String getAttributeValueForNode(final RoutingNode node, final String awarenessAttribute) { return node.node().getAttributes().get(awarenessAttribute); } - } diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/AwarenessAllocationTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/AwarenessAllocationTests.java index 02966b835fae0..d954e4675aa9a 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/AwarenessAllocationTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/AwarenessAllocationTests.java @@ -67,7 +67,6 @@ import static org.opensearch.cluster.routing.ShardRoutingState.RELOCATING; import static org.opensearch.cluster.routing.ShardRoutingState.STARTED; import static org.opensearch.cluster.routing.ShardRoutingState.UNASSIGNED; -import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.sameInstance; @@ -1063,4 +1062,61 @@ public void testAllocationExplainForUnassignedShardsWithUnbalancedZones() { } } + + public void testAllocationAwarenessWhenNotEnabled() { + AllocationService strategy = createAllocationService(Settings.builder().build()); + + logger.info("--> Building initial routing table"); + Metadata metadata = Metadata.builder() + .put(IndexMetadata.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)) + .build(); + + RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metadata.index("test")).build(); + ClusterState clusterState = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(initialRoutingTable) + .build(); + + logger.info("--> adding two nodes on same zone and do rerouting"); + clusterState = ClusterState.builder(clusterState) + .nodes( + DiscoveryNodes.builder().add(newNode("node1", singletonMap("zone", "a"))).add(newNode("node2", singletonMap("zone", "a"))) + ) + .build(); + clusterState = strategy.reroute(clusterState, "reroute"); + assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); + + logger.info("--> start the shards (primaries)"); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + + logger.info("--> start the shards (replicas)"); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(2)); + + logger.info("--> add a a nodes without zone and reroute"); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build(); + + logger.info("--> try to move the replica to new node"); + AllocationService.CommandsResult commandsResult = strategy.reroute( + clusterState, + new AllocationCommands(new MoveAllocationCommand("test", 0, "node2", "node3")), + true, + false + ); + + assertEquals(commandsResult.explanations().explanations().size(), 1); + assertEquals(commandsResult.explanations().explanations().get(0).decisions().type(), Decision.Type.YES); + List decisions = commandsResult.explanations() + .explanations() + .get(0) + .decisions() + .getDecisions() + .stream() + .filter(item -> item.getExplanation().startsWith("allocation awareness is not enabled")) + .toList(); + assertEquals( + "allocation awareness is not enabled, set cluster setting " + "[cluster.routing.allocation.awareness.attributes] to enable it", + decisions.get(0).getExplanation() + ); + } } diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalanceTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalanceTests.java index 019db47e74cc3..c6134330727aa 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalanceTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalanceTests.java @@ -37,6 +37,9 @@ public void testNoForcedAwarenessAttribute() { assertEquals(awarenessReplicaBalance.validate(0, autoExpandReplica), Optional.empty()); assertEquals(awarenessReplicaBalance.validate(1, autoExpandReplica), Optional.empty()); + + assertEquals(awarenessReplicaBalance.validate(0), Optional.empty()); + assertEquals(awarenessReplicaBalance.validate(1), Optional.empty()); } public void testForcedAwarenessAttribute() { @@ -68,6 +71,7 @@ public void testForcedAwarenessAttribute() { awarenessReplicaBalance = new AwarenessReplicaBalance(settings, EMPTY_CLUSTER_SETTINGS); autoExpandReplica = AutoExpandReplicas.SETTING.get(settings); + assertThat(awarenessReplicaBalance.maxAwarenessAttributes(), equalTo(3)); assertEquals(awarenessReplicaBalance.validate(2, autoExpandReplica), Optional.empty()); assertEquals(awarenessReplicaBalance.validate(1, autoExpandReplica), Optional.empty()); assertEquals(awarenessReplicaBalance.validate(0, autoExpandReplica), Optional.empty()); @@ -114,6 +118,16 @@ public void testForcedAwarenessAttribute() { Optional.of("expected total copies needs to be a multiple of total awareness attributes [3]") ); + assertEquals(awarenessReplicaBalance.validate(3), Optional.empty()); + assertEquals(awarenessReplicaBalance.validate(0), Optional.empty()); + assertEquals( + awarenessReplicaBalance.validate(2), + Optional.of("total search replicas needs to be a multiple of total awareness attributes [3]") + ); + assertEquals( + awarenessReplicaBalance.validate(1), + Optional.of("total search replicas needs to be a multiple of total awareness attributes [3]") + ); } public void testForcedAwarenessAttributeDisabled() { diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/SearchReplicaAwarenessAllocationTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/SearchReplicaAwarenessAllocationTests.java new file mode 100644 index 0000000000000..b757d5911d204 --- /dev/null +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/SearchReplicaAwarenessAllocationTests.java @@ -0,0 +1,267 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing.allocation.decider; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.Version; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.OpenSearchAllocationTestCase; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.cluster.routing.allocation.AllocationService; +import org.opensearch.cluster.routing.allocation.command.AllocationCommands; +import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; + +import java.util.Comparator; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.singletonMap; +import static org.opensearch.cluster.routing.ShardRoutingState.INITIALIZING; +import static org.opensearch.cluster.routing.ShardRoutingState.STARTED; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.sameInstance; + +public class SearchReplicaAwarenessAllocationTests extends OpenSearchAllocationTestCase { + + private final Logger logger = LogManager.getLogger(SearchReplicaAwarenessAllocationTests.class); + + public void testAllocationAwarenessForIndexWithSearchReplica() { + AllocationService strategy = createAllocationService( + Settings.builder() + .put(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL_SETTING.getKey(), true) + .put("cluster.routing.allocation.awareness.attributes", "zone") + .build() + ); + + logger.info("--> Building initial routing table"); + + Metadata metadata = Metadata.builder() + .put( + IndexMetadata.builder("test") + .settings(settings(Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(1) + .numberOfSearchReplicas(2) + ) + .build(); + + RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metadata.index("test")).build(); + ClusterState clusterState = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(initialRoutingTable) + .build(); + + logger.info("--> adding four nodes on same zone and do rerouting"); + clusterState = ClusterState.builder(clusterState) + .nodes( + DiscoveryNodes.builder() + .add(newNode("node1", Map.of("zone", "a"))) + .add(newNode("node2", Map.of("zone", "a"))) + .add(newSearchNode("node3", Map.of("zone", "a"))) + .add(newSearchNode("node4", Map.of("zone", "a"))) + ) + .build(); + clusterState = strategy.reroute(clusterState, "reroute"); + assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); + + logger.info("--> start the shards (primaries)"); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + + logger.info("--> start the shards (replicas)"); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(4)); + + logger.info("--> add a two nodes with a new zone and reroute"); + clusterState = ClusterState.builder(clusterState) + .nodes( + DiscoveryNodes.builder(clusterState.nodes()) + .add(newNode("node5", Map.of("zone", "b"))) + .add(newSearchNode("node6", Map.of("zone", "b"))) + ) + .build(); + clusterState = strategy.reroute(clusterState, "reroute"); + + assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(2)); + assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(2)); + + List shardRoutings = clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.RELOCATING); + shardRoutings.sort(Comparator.comparing(ShardRouting::currentNodeId)); + assertThat(shardRoutings.get(0).relocatingNodeId(), equalTo("node5")); + assertThat(shardRoutings.get(1).relocatingNodeId(), equalTo("node6")); + + logger.info("--> complete relocation"); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(4)); + + logger.info("--> do another reroute, make sure nothing moves"); + assertThat(strategy.reroute(clusterState, "reroute").routingTable(), sameInstance(clusterState.routingTable())); + + logger.info("--> add a new node with a new zone and reroute"); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node7", Map.of("zone", "c")))) + .build(); + + ClusterState newState = strategy.reroute(clusterState, "reroute"); + assertThat(newState, equalTo(clusterState)); + assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(4)); + } + + public void testMoveShardOnceNewNodeWithOutAwarenessAttributeAdded() { + AllocationService strategy = createAllocationService( + Settings.builder() + .put(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL_SETTING.getKey(), true) + .put("cluster.routing.allocation.awareness.attributes", "zone") + .build() + ); + + logger.info("--> Building initial routing table'"); + Metadata metadata = Metadata.builder() + .put( + IndexMetadata.builder("test") + .settings(settings(Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(0) + .numberOfSearchReplicas(1) + ) + .build(); + + RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metadata.index("test")).build(); + ClusterState clusterState = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(initialRoutingTable) + .build(); + + logger.info("--> adding four nodes on same zone and do rerouting"); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder().add(newNode("node1", Map.of("zone", "a"))).add(newSearchNode("node2", Map.of("zone", "a")))) + .build(); + clusterState = strategy.reroute(clusterState, "reroute"); + assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); + + logger.info("--> start the shards (primaries)"); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + + logger.info("--> start the shards (replicas)"); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(2)); + + logger.info("--> add a search node without zone and reroute"); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newSearchNode("node3", Map.of("searchonly", "true")))) + .build(); + + logger.info("--> do another reroute, make sure nothing moves"); + assertThat(strategy.reroute(clusterState, "reroute").routingTable(), sameInstance(clusterState.routingTable())); + + logger.info("--> try to move the replica to node without zone attribute"); + AllocationService.CommandsResult commandsResult = strategy.reroute( + clusterState, + new AllocationCommands(new MoveAllocationCommand("test", 0, "node2", "node3")), + true, + false + ); + + assertEquals(commandsResult.explanations().explanations().size(), 1); + assertEquals(commandsResult.explanations().explanations().get(0).decisions().type(), Decision.Type.NO); + List decisions = commandsResult.explanations() + .explanations() + .get(0) + .decisions() + .getDecisions() + .stream() + .filter(item -> item.type() == Decision.Type.NO) + .toList(); + assertEquals( + "node does not contain the awareness attribute [zone]; " + + "required attributes cluster setting [cluster.routing.allocation.awareness.attributes=zone]", + decisions.get(0).getExplanation() + ); + } + + public void testFullAwarenessWithSearchReplica() { + AllocationService strategy = createAllocationService( + Settings.builder() + .put(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL_SETTING.getKey(), true) + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b") + .build() + ); + + logger.info("Building initial routing table"); + Metadata metadata = Metadata.builder() + .put( + IndexMetadata.builder("test") + .settings(settings(Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(0) + .numberOfSearchReplicas(2) + ) + .build(); + + RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metadata.index("test")).build(); + + ClusterState clusterState = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(initialRoutingTable) + .build(); + + logger.info("--> adding three nodes on same rack and do rerouting"); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder().add(newNode("node1", Map.of("zone", "a"))).add(newSearchNode("node2", Map.of("zone", "a")))) + .build(); + + clusterState = strategy.reroute(clusterState, "reroute"); + assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); + + logger.info("--> start the shards (primaries)"); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + + logger.info("--> start the shards (replicas)"); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + + logger.info("--> one search replica will not start because we have only one zone value"); + assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(2)); + assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(0)); + assertThat(clusterState.getRoutingNodes().unassigned().size(), equalTo(1)); + + logger.info("--> add a new node with a new zome and reroute"); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newSearchNode("node3", Map.of("zone", "2")))) + .build(); + clusterState = strategy.reroute(clusterState, "reroute"); + + assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2)); + assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); + assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo("node3")); + + logger.info("--> complete relocation"); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + + assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(3)); + + logger.info("--> do another reroute, make sure nothing moves"); + assertThat(strategy.reroute(clusterState, "reroute").routingTable(), sameInstance(clusterState.routingTable())); + + logger.info("--> add another node with a new zone, make sure nothing moves"); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node4", singletonMap("zone", "c")))) + .build(); + ClusterState newState = strategy.reroute(clusterState, "reroute"); + assertThat(newState, equalTo(clusterState)); + assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(3)); + } +} diff --git a/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationTestCase.java b/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationTestCase.java index 102a0e5aa2e6d..f0e4502787b28 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationTestCase.java @@ -170,6 +170,10 @@ protected static DiscoveryNode newSearchNode(String nodeId, Version version) { return new DiscoveryNode(nodeId, buildNewFakeTransportAddress(), emptyMap(), SEARCH_ROLE, version); } + protected static DiscoveryNode newSearchNode(String nodeId, Map attributes) { + return new DiscoveryNode(nodeId, buildNewFakeTransportAddress(), attributes, SEARCH_ROLE, Version.CURRENT); + } + protected static DiscoveryNode newNode(String nodeName, String nodeId, Map attributes) { return new DiscoveryNode(nodeName, nodeId, buildNewFakeTransportAddress(), attributes, CLUSTER_MANAGER_DATA_ROLES, Version.CURRENT); } From 15d27a1aef282d74cb024935f9544c554c3e9b3c Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Tue, 1 Apr 2025 08:25:38 -0400 Subject: [PATCH 125/550] Enhance Java Agent to intercept System::exit (#17746) * Enhance Java Agent to intercept System::exit Signed-off-by: Andriy Redko * Address code review comments Signed-off-by: Andriy Redko --------- Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + gradle/libs.versions.toml | 2 +- libs/agent-sm/agent/build.gradle | 14 ++++++- .../agent/licenses/byte-buddy-1.17.4.jar.sha1 | 1 - .../agent/licenses/byte-buddy-1.17.5.jar.sha1 | 1 + .../java/org/opensearch/javaagent/Agent.java | 8 +++- .../javaagent/SystemExitInterceptor.java | 40 +++++++++++++++++++ .../javaagent/SystemExitInterceptorTests.java | 30 ++++++++++++++ .../javaagent/bootstrap/AgentPolicy.java | 17 +++++++- .../licenses/byte-buddy-1.17.4.jar.sha1 | 1 - .../licenses/byte-buddy-1.17.5.jar.sha1 | 1 + 11 files changed, 109 insertions(+), 7 deletions(-) delete mode 100644 libs/agent-sm/agent/licenses/byte-buddy-1.17.4.jar.sha1 create mode 100644 libs/agent-sm/agent/licenses/byte-buddy-1.17.5.jar.sha1 create mode 100644 libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/SystemExitInterceptor.java create mode 100644 libs/agent-sm/agent/src/test/java/org/opensearch/javaagent/SystemExitInterceptorTests.java delete mode 100644 test/framework/licenses/byte-buddy-1.17.4.jar.sha1 create mode 100644 test/framework/licenses/byte-buddy-1.17.5.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 448e5361e3fc4..12421fb019bd2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Added Kinesis support as a plugin for the pull-based ingestion ([#17615](https://github.com/opensearch-project/OpenSearch/pull/17615)) - [Security Manager Replacement] Create initial Java Agent to intercept Socket::connect calls ([#17724](https://github.com/opensearch-project/OpenSearch/pull/17724)) - Add ingestion management APIs for pause, resume and get ingestion state ([#17631](https://github.com/opensearch-project/OpenSearch/pull/17631)) +- [Security Manager Replacement] Enhance Java Agent to intercept System::exit ([#17746](https://github.com/opensearch-project/OpenSearch/pull/17746)) ### Changed - Migrate BC libs to their FIPS counterparts ([#14912](https://github.com/opensearch-project/OpenSearch/pull/14912)) diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index cf4b66e2a2986..4d3aebf83eecc 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -72,7 +72,7 @@ junit = "4.13.2" hamcrest = "2.1" mockito = "5.16.1" objenesis = "3.3" -bytebuddy = "1.17.4" +bytebuddy = "1.17.5" # benchmark dependencies jmh = "1.35" diff --git a/libs/agent-sm/agent/build.gradle b/libs/agent-sm/agent/build.gradle index 266750f8d9529..a69dc057f2f9c 100644 --- a/libs/agent-sm/agent/build.gradle +++ b/libs/agent-sm/agent/build.gradle @@ -13,6 +13,9 @@ dependencies { implementation project(":libs:agent-sm:bootstrap") implementation "net.bytebuddy:byte-buddy:${versions.bytebuddy}" compileOnly "com.google.code.findbugs:jsr305:3.0.2" + + testImplementation "junit:junit:${versions.junit}" + testImplementation "org.hamcrest:hamcrest:${versions.hamcrest}" } var bootClasspath = configurations.bootstrap.incoming.artifactView { }.files @@ -35,8 +38,8 @@ compileJava { options.compilerArgs -= '-Werror' } -test.enabled = false testingConventions.enabled = false +tasks.named('forbiddenApisTest').configure { onlyIf { false } } tasks.named('forbiddenApisMain').configure { replaceSignatureFiles 'jdk-signatures' @@ -62,3 +65,12 @@ thirdPartyAudit { tasks.named('validateNebulaPom') { dependsOn prepareAgent } + +tasks.test { + dependsOn prepareAgent + jvmArgs += ["-javaagent:" + project.jar.archiveFile.get()] +} + +tasks.check { + dependsOn test +} diff --git a/libs/agent-sm/agent/licenses/byte-buddy-1.17.4.jar.sha1 b/libs/agent-sm/agent/licenses/byte-buddy-1.17.4.jar.sha1 deleted file mode 100644 index ebf5d77477f30..0000000000000 --- a/libs/agent-sm/agent/licenses/byte-buddy-1.17.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ffb8488d93290eff074fb542a596e4c5a26d0315 \ No newline at end of file diff --git a/libs/agent-sm/agent/licenses/byte-buddy-1.17.5.jar.sha1 b/libs/agent-sm/agent/licenses/byte-buddy-1.17.5.jar.sha1 new file mode 100644 index 0000000000000..d22afd953f340 --- /dev/null +++ b/libs/agent-sm/agent/licenses/byte-buddy-1.17.5.jar.sha1 @@ -0,0 +1 @@ +88450f120903b7e72470462cdbd2b75a3842223c \ No newline at end of file diff --git a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/Agent.java b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/Agent.java index 6c5931f537956..4b65d841f9768 100644 --- a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/Agent.java +++ b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/Agent.java @@ -77,7 +77,13 @@ private static AgentBuilder createAgentBuilder(Instrumentation inst) throws Exce .with(AgentBuilder.TypeStrategy.Default.REDEFINE) .ignore(ElementMatchers.none()) .type(systemType) - .transform(transformer); + .transform(transformer) + .type(ElementMatchers.is(java.lang.System.class)) + .transform( + (b, typeDescription, classLoader, module, pd) -> b.visit( + Advice.to(SystemExitInterceptor.class).on(ElementMatchers.named("exit")) + ) + ); return agentBuilder; } diff --git a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/SystemExitInterceptor.java b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/SystemExitInterceptor.java new file mode 100644 index 0000000000000..20087500f1df4 --- /dev/null +++ b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/SystemExitInterceptor.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.javaagent; + +import org.opensearch.javaagent.bootstrap.AgentPolicy; + +import java.lang.StackWalker.Option; + +import net.bytebuddy.asm.Advice; + +/** + * {@link System#exit} interceptor + */ +public class SystemExitInterceptor { + /** + * SystemExitInterceptor + */ + public SystemExitInterceptor() {} + + /** + * Interceptor + * @param code exit code + * @throws Exception exceptions + */ + @Advice.OnMethodEnter() + public static void intercept(int code) throws Exception { + final StackWalker walker = StackWalker.getInstance(Option.RETAIN_CLASS_REFERENCE); + final Class caller = walker.getCallerClass(); + + if (!AgentPolicy.isClassThatCanExit(caller.getName())) { + throw new SecurityException("The class " + caller + " is not allowed to call System::exit(" + code + ")"); + } + } +} diff --git a/libs/agent-sm/agent/src/test/java/org/opensearch/javaagent/SystemExitInterceptorTests.java b/libs/agent-sm/agent/src/test/java/org/opensearch/javaagent/SystemExitInterceptorTests.java new file mode 100644 index 0000000000000..de5f84fa68e6b --- /dev/null +++ b/libs/agent-sm/agent/src/test/java/org/opensearch/javaagent/SystemExitInterceptorTests.java @@ -0,0 +1,30 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.javaagent; + +import org.opensearch.javaagent.bootstrap.AgentPolicy; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.security.Policy; +import java.util.Set; + +public class SystemExitInterceptorTests { + @SuppressWarnings("removal") + @BeforeClass + public static void setUp() { + AgentPolicy.setPolicy(new Policy() { + }, Set.of(), new String[] { "worker.org.gradle.process.internal.worker.GradleWorkerMain" }); + } + + @Test(expected = SecurityException.class) + public void testSystemExitIsForbidden() { + System.exit(0); + } +} diff --git a/libs/agent-sm/bootstrap/src/main/java/org/opensearch/javaagent/bootstrap/AgentPolicy.java b/libs/agent-sm/bootstrap/src/main/java/org/opensearch/javaagent/bootstrap/AgentPolicy.java index d2c77fac011b5..7f64646a0ca29 100644 --- a/libs/agent-sm/bootstrap/src/main/java/org/opensearch/javaagent/bootstrap/AgentPolicy.java +++ b/libs/agent-sm/bootstrap/src/main/java/org/opensearch/javaagent/bootstrap/AgentPolicy.java @@ -13,6 +13,7 @@ import java.security.Permission; import java.security.Policy; import java.security.ProtectionDomain; +import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Set; @@ -27,6 +28,7 @@ public class AgentPolicy { private static final Logger LOGGER = Logger.getLogger(AgentPolicy.class.getName()); private static volatile Policy policy; private static volatile Set trustedHosts; + private static volatile Set classesThatCanExit; private AgentPolicy() {} @@ -35,18 +37,20 @@ private AgentPolicy() {} * @param policy policy */ public static void setPolicy(Policy policy) { - setPolicy(policy, Set.of()); + setPolicy(policy, Set.of(), new String[0]); } /** * Set Agent policy * @param policy policy * @param trustedHosts trusted hosts + * @param classesThatCanExit classed that are allowed to call {@link System#exit} */ - public static void setPolicy(Policy policy, final Set trustedHosts) { + public static void setPolicy(Policy policy, final Set trustedHosts, final String[] classesThatCanExit) { if (AgentPolicy.policy == null) { AgentPolicy.policy = policy; AgentPolicy.trustedHosts = Collections.unmodifiableSet(trustedHosts); + AgentPolicy.classesThatCanExit = Arrays.stream(classesThatCanExit).collect(Collectors.toSet()); LOGGER.info("Policy attached successfully: " + policy); } else { throw new SecurityException("The Policy has been set already: " + AgentPolicy.policy); @@ -86,4 +90,13 @@ public static Policy getPolicy() { public static boolean isTrustedHost(String hostname) { return AgentPolicy.trustedHosts.contains(hostname); } + + /** + * Check if class is allowed to call {@link System#exit} + * @param name class name + * @return is class allowed to call {@link System#exit} or not + */ + public static boolean isClassThatCanExit(String name) { + return AgentPolicy.classesThatCanExit.contains(name); + } } diff --git a/test/framework/licenses/byte-buddy-1.17.4.jar.sha1 b/test/framework/licenses/byte-buddy-1.17.4.jar.sha1 deleted file mode 100644 index ebf5d77477f30..0000000000000 --- a/test/framework/licenses/byte-buddy-1.17.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ffb8488d93290eff074fb542a596e4c5a26d0315 \ No newline at end of file diff --git a/test/framework/licenses/byte-buddy-1.17.5.jar.sha1 b/test/framework/licenses/byte-buddy-1.17.5.jar.sha1 new file mode 100644 index 0000000000000..d22afd953f340 --- /dev/null +++ b/test/framework/licenses/byte-buddy-1.17.5.jar.sha1 @@ -0,0 +1 @@ +88450f120903b7e72470462cdbd2b75a3842223c \ No newline at end of file From 4af0f712353b04074b50808c5e5468548dfe879f Mon Sep 17 00:00:00 2001 From: Prudhvi Godithi Date: Tue, 1 Apr 2025 11:49:49 -0700 Subject: [PATCH 126/550] Increase default floor segment size to 16MB (#17699) Signed-off-by: Prudhvi Godithi --- CHANGELOG.md | 1 + .../opensearch/index/LogByteSizeMergePolicyProvider.java | 6 +++++- .../org/opensearch/index/TieredMergePolicyProvider.java | 8 +++++++- 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 12421fb019bd2..d4ad8ef681b8a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Changed - Migrate BC libs to their FIPS counterparts ([#14912](https://github.com/opensearch-project/OpenSearch/pull/14912)) +- Increase the floor segment size to 16MB ([#17699](https://github.com/opensearch-project/OpenSearch/pull/17699)) ### Dependencies - Bump `com.nimbusds:nimbus-jose-jwt` from 9.41.1 to 10.0.2 ([#17607](https://github.com/opensearch-project/OpenSearch/pull/17607), [#17669](https://github.com/opensearch-project/OpenSearch/pull/17669)) diff --git a/server/src/main/java/org/opensearch/index/LogByteSizeMergePolicyProvider.java b/server/src/main/java/org/opensearch/index/LogByteSizeMergePolicyProvider.java index 0b762d781957c..1b44f910ba51b 100644 --- a/server/src/main/java/org/opensearch/index/LogByteSizeMergePolicyProvider.java +++ b/server/src/main/java/org/opensearch/index/LogByteSizeMergePolicyProvider.java @@ -50,7 +50,11 @@ public class LogByteSizeMergePolicyProvider implements MergePolicyProvider { private final Logger logger; private final boolean mergesEnabled; - public static final ByteSizeValue DEFAULT_MIN_MERGE = new ByteSizeValue(2, ByteSizeUnit.MB); + /** + * Use 16MB floor size to match Lucene default. + * See ... + */ + public static final ByteSizeValue DEFAULT_MIN_MERGE = new ByteSizeValue(16, ByteSizeUnit.MB); public static final int DEFAULT_MERGE_FACTOR = 10; public static final ByteSizeValue DEFAULT_MAX_MERGED_SEGMENT = new ByteSizeValue(5, ByteSizeUnit.GB); diff --git a/server/src/main/java/org/opensearch/index/TieredMergePolicyProvider.java b/server/src/main/java/org/opensearch/index/TieredMergePolicyProvider.java index d5d354c6c960a..2eeb25dee88c3 100644 --- a/server/src/main/java/org/opensearch/index/TieredMergePolicyProvider.java +++ b/server/src/main/java/org/opensearch/index/TieredMergePolicyProvider.java @@ -136,7 +136,13 @@ public final class TieredMergePolicyProvider implements MergePolicyProvider { private final boolean mergesEnabled; public static final double DEFAULT_EXPUNGE_DELETES_ALLOWED = 10d; - public static final ByteSizeValue DEFAULT_FLOOR_SEGMENT = new ByteSizeValue(2, ByteSizeUnit.MB); + + /** + * Use 16MB floor size to match Lucene default. + * See ... + */ + public static final ByteSizeValue DEFAULT_FLOOR_SEGMENT = new ByteSizeValue(16, ByteSizeUnit.MB); + public static final int DEFAULT_MAX_MERGE_AT_ONCE = 10; public static final ByteSizeValue DEFAULT_MAX_MERGED_SEGMENT = new ByteSizeValue(5, ByteSizeUnit.GB); public static final double DEFAULT_SEGMENTS_PER_TIER = 10.0d; From a64bfdbefd48deccabf1354ad293b5579840c69e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Apr 2025 16:21:45 -0400 Subject: [PATCH 127/550] Bump com.nimbusds:oauth2-oidc-sdk from 11.21 to 11.23.1 in /plugins/repository-azure (#17729) * Bump com.nimbusds:oauth2-oidc-sdk in /plugins/repository-azure Bumps [com.nimbusds:oauth2-oidc-sdk](https://bitbucket.org/connect2id/oauth-2.0-sdk-with-openid-connect-extensions) from 11.21 to 11.23.1. - [Changelog](https://bitbucket.org/connect2id/oauth-2.0-sdk-with-openid-connect-extensions/src/master/CHANGELOG.txt) - [Commits](https://bitbucket.org/connect2id/oauth-2.0-sdk-with-openid-connect-extensions/branches/compare/11.23.1..11.21) --- updated-dependencies: - dependency-name: com.nimbusds:oauth2-oidc-sdk dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- CHANGELOG.md | 1 + plugins/repository-azure/build.gradle | 2 +- .../repository-azure/licenses/oauth2-oidc-sdk-11.21.jar.sha1 | 1 - .../repository-azure/licenses/oauth2-oidc-sdk-11.23.1.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-azure/licenses/oauth2-oidc-sdk-11.21.jar.sha1 create mode 100644 plugins/repository-azure/licenses/oauth2-oidc-sdk-11.23.1.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index d4ad8ef681b8a..6d0cf7bd73239 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,6 +33,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.github.luben:zstd-jni` from 1.5.5-1 to 1.5.6-1 ([#17674](https://github.com/opensearch-project/OpenSearch/pull/17674)) - Bump `lycheeverse/lychee-action` from 2.3.0 to 2.4.0 ([#17731](https://github.com/opensearch-project/OpenSearch/pull/17731)) - Bump `com.netflix.nebula.ospackage-base` from 11.11.1 to 11.11.2 ([#17734](https://github.com/opensearch-project/OpenSearch/pull/17734)) +- Bump `com.nimbusds:oauth2-oidc-sdk` from 11.21 to 11.23.1 ([#17729](https://github.com/opensearch-project/OpenSearch/pull/17729)) ### Changed diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 8c61acd978f09..b8a674c6d8a6c 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -62,7 +62,7 @@ dependencies { api 'com.microsoft.azure:msal4j-persistence-extension:1.3.0' api "net.java.dev.jna:jna-platform:${versions.jna}" api 'com.microsoft.azure:msal4j:1.18.0' - api 'com.nimbusds:oauth2-oidc-sdk:11.21' + api 'com.nimbusds:oauth2-oidc-sdk:11.23.1' api 'com.nimbusds:nimbus-jose-jwt:10.0.2' api 'com.nimbusds:content-type:2.3' api 'com.nimbusds:lang-tag:1.7' diff --git a/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.21.jar.sha1 b/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.21.jar.sha1 deleted file mode 100644 index 9736182141a0a..0000000000000 --- a/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.21.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -97bec173d2a199fdd7f5c1f3a61f7ccc2e992fc1 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.23.1.jar.sha1 b/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.23.1.jar.sha1 new file mode 100644 index 0000000000000..127140477a3a0 --- /dev/null +++ b/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.23.1.jar.sha1 @@ -0,0 +1 @@ +17facb3e3fa9e048f87b34c706e1163cad660e6d \ No newline at end of file From a586a6202853c15b8aa295550469b0dff418d588 Mon Sep 17 00:00:00 2001 From: Michael Froh Date: Tue, 1 Apr 2025 14:16:52 -0700 Subject: [PATCH 128/550] Add FilterFieldType (#17627) This class allows developers (in core or plugins) to wrap an existing field type, delegating all behavior by default, overriding specific methods as needed. --------- Signed-off-by: Michael Froh --- CHANGELOG.md | 1 + .../index/mapper/FilterFieldTypeTest.java | 88 ++++++ .../index/mapper/FilterFieldType.java | 293 ++++++++++++++++++ .../index/mapper/MappedFieldType.java | 8 + 4 files changed, 390 insertions(+) create mode 100644 server/src/internalClusterTest/java/org/opensearch/index/mapper/FilterFieldTypeTest.java create mode 100644 server/src/main/java/org/opensearch/index/mapper/FilterFieldType.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 6d0cf7bd73239..ae440d2b6214b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix systemd integTest on deb regarding path ownership check ([#17641](https://github.com/opensearch-project/OpenSearch/pull/17641)) - Add dfs transformation function in XContentMapValues ([#17612](https://github.com/opensearch-project/OpenSearch/pull/17612)) - Added Kinesis support as a plugin for the pull-based ingestion ([#17615](https://github.com/opensearch-project/OpenSearch/pull/17615)) +- Add FilterFieldType for developers who want to wrap MappedFieldType ([#17627](https://github.com/opensearch-project/OpenSearch/pull/17627)) - [Security Manager Replacement] Create initial Java Agent to intercept Socket::connect calls ([#17724](https://github.com/opensearch-project/OpenSearch/pull/17724)) - Add ingestion management APIs for pause, resume and get ingestion state ([#17631](https://github.com/opensearch-project/OpenSearch/pull/17631)) - [Security Manager Replacement] Enhance Java Agent to intercept System::exit ([#17746](https://github.com/opensearch-project/OpenSearch/pull/17746)) diff --git a/server/src/internalClusterTest/java/org/opensearch/index/mapper/FilterFieldTypeTest.java b/server/src/internalClusterTest/java/org/opensearch/index/mapper/FilterFieldTypeTest.java new file mode 100644 index 0000000000000..ad3293620d67c --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/index/mapper/FilterFieldTypeTest.java @@ -0,0 +1,88 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.opensearch.test.OpenSearchTestCase; + +import java.lang.reflect.Method; +import java.lang.reflect.Modifier; +import java.util.Arrays; +import java.util.HashSet; +import java.util.Objects; +import java.util.Set; + +public class FilterFieldTypeTest extends OpenSearchTestCase { + + private static final class MethodSignature { + private final String name; + private final Class returnType; + private final Class[] parameterTypes; + + public MethodSignature(String name, Class returnType, Class[] parameterTypes) { + this.name = name; + this.returnType = returnType; + this.parameterTypes = parameterTypes; + } + + @Override + public boolean equals(Object o) { + if (o == null || getClass() != o.getClass()) return false; + MethodSignature that = (MethodSignature) o; + return Objects.equals(name, that.name) + && Objects.equals(returnType, that.returnType) + && Objects.deepEquals(parameterTypes, that.parameterTypes); + } + + @Override + public int hashCode() { + return Objects.hash(name, returnType, Arrays.hashCode(parameterTypes)); + } + } + + private static final Set EXCLUDED_SIGNATURES = Set.of(new MethodSignature("typeName", String.class, new Class[0])); + + public void testAllMethodsDelegated() { + Method[] mappedFieldTypeMethods = MappedFieldType.class.getMethods(); + Method[] filterFieldTypeMethods = FilterFieldType.class.getMethods(); + + Set mappedFieldTypeMethodSignatures = new HashSet<>(); + for (Method method : mappedFieldTypeMethods) { + if (method.getDeclaringClass() == MappedFieldType.class + && Modifier.isFinal(method.getModifiers()) == false + && Modifier.isStatic(method.getModifiers()) == false) { + mappedFieldTypeMethodSignatures.add( + new MethodSignature(method.getName(), method.getReturnType(), method.getParameterTypes()) + ); + } + } + + Set filterFieldTypeMethodSignatures = new HashSet<>(); + for (Method method : filterFieldTypeMethods) { + if (method.getDeclaringClass() == FilterFieldType.class) { + filterFieldTypeMethodSignatures.add( + new MethodSignature(method.getName(), method.getReturnType(), method.getParameterTypes()) + ); + } + } + for (MethodSignature methodSignature : mappedFieldTypeMethodSignatures) { + if (filterFieldTypeMethodSignatures.contains(methodSignature)) { + assertFalse( + "Method " + methodSignature.name + " should NOT be implemented in " + FilterFieldType.class.getSimpleName(), + EXCLUDED_SIGNATURES.contains(methodSignature) + ); + } else { + assertTrue( + "Method " + methodSignature.name + " should be implemented in " + FilterFieldType.class.getSimpleName(), + EXCLUDED_SIGNATURES.contains(methodSignature) + ); + } + } + } + +} diff --git a/server/src/main/java/org/opensearch/index/mapper/FilterFieldType.java b/server/src/main/java/org/opensearch/index/mapper/FilterFieldType.java new file mode 100644 index 0000000000000..5029dd471813e --- /dev/null +++ b/server/src/main/java/org/opensearch/index/mapper/FilterFieldType.java @@ -0,0 +1,293 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.queries.intervals.IntervalsSource; +import org.apache.lucene.queries.spans.SpanMultiTermQueryWrapper; +import org.apache.lucene.queries.spans.SpanQuery; +import org.apache.lucene.search.MultiTermQuery; +import org.apache.lucene.search.Query; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.geo.ShapeRelation; +import org.opensearch.common.time.DateMathParser; +import org.opensearch.common.unit.Fuzziness; +import org.opensearch.index.analysis.NamedAnalyzer; +import org.opensearch.index.fielddata.IndexFieldData; +import org.opensearch.index.query.IntervalMode; +import org.opensearch.index.query.QueryRewriteContext; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.search.DocValueFormat; +import org.opensearch.search.lookup.SearchLookup; + +import java.io.IOException; +import java.time.ZoneId; +import java.util.List; +import java.util.Map; +import java.util.function.Function; +import java.util.function.Supplier; + +/** + * Wraps a {@link MappedFieldType}, delegating all methods (except typeName). + *

+ * Subclasses can extend this class to wrap an existing {@link MappedFieldType} to reuse most functionality, while + * customizing/modifying some specific behavior by overriding the relevant methods. + */ +@PublicApi(since = "3.0.0") +public abstract class FilterFieldType extends MappedFieldType { + protected final MappedFieldType delegate; + + public FilterFieldType(MappedFieldType delegate) { + super( + delegate.name(), + delegate.isSearchable(), + delegate.isStored(), + delegate.hasDocValues(), + delegate.getTextSearchInfo(), + delegate.meta() + ); + this.delegate = delegate; + } + + @Override + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { + return delegate.valueFetcher(context, searchLookup, format); + } + + @Override + public Query termQuery(Object value, QueryShardContext context) { + return delegate.termQuery(value, context); + } + + @Override + public String familyTypeName() { + return delegate.familyTypeName(); + } + + @Override + public String name() { + return delegate.name(); + } + + @Override + public float boost() { + return delegate.boost(); + } + + @Override + public void setBoost(float boost) { + delegate.setBoost(boost); + } + + @Override + public boolean hasDocValues() { + return delegate.hasDocValues(); + } + + @Override + public NamedAnalyzer indexAnalyzer() { + return delegate.indexAnalyzer(); + } + + @Override + public void setIndexAnalyzer(NamedAnalyzer analyzer) { + delegate.setIndexAnalyzer(analyzer); + } + + @Override + public Object valueForDisplay(Object value) { + return delegate.valueForDisplay(value); + } + + @Override + public boolean isSearchable() { + return delegate.isSearchable(); + } + + @Override + public boolean isStored() { + return delegate.isStored(); + } + + @Override + public Function pointReaderIfPossible() { + return delegate.pointReaderIfPossible(); + } + + @Override + public boolean isAggregatable() { + return delegate.isAggregatable(); + } + + @Override + public Query termQueryCaseInsensitive(Object value, QueryShardContext context) { + return delegate.termQueryCaseInsensitive(value, context); + } + + @Override + public Query termsQuery(List values, QueryShardContext context) { + return delegate.termsQuery(values, context); + } + + @Override + public Query rangeQuery( + Object lowerTerm, + Object upperTerm, + boolean includeLower, + boolean includeUpper, + ShapeRelation relation, + ZoneId timeZone, + DateMathParser parser, + QueryShardContext context + ) { + return delegate.rangeQuery(lowerTerm, upperTerm, includeLower, includeUpper, relation, timeZone, parser, context); + } + + @Override + public Query fuzzyQuery( + Object value, + Fuzziness fuzziness, + int prefixLength, + int maxExpansions, + boolean transpositions, + MultiTermQuery.RewriteMethod method, + QueryShardContext context + ) { + return delegate.fuzzyQuery(value, fuzziness, prefixLength, maxExpansions, transpositions, method, context); + } + + @Override + public Query prefixQuery(String value, MultiTermQuery.RewriteMethod method, boolean caseInsensitve, QueryShardContext context) { + return delegate.prefixQuery(value, method, caseInsensitve, context); + } + + @Override + public Query wildcardQuery(String value, MultiTermQuery.RewriteMethod method, boolean caseInsensitve, QueryShardContext context) { + return delegate.wildcardQuery(value, method, caseInsensitve, context); + } + + @Override + public Query normalizedWildcardQuery(String value, MultiTermQuery.RewriteMethod method, QueryShardContext context) { + return delegate.normalizedWildcardQuery(value, method, context); + } + + @Override + public Query regexpQuery( + String value, + int syntaxFlags, + int matchFlags, + int maxDeterminizedStates, + MultiTermQuery.RewriteMethod method, + QueryShardContext context + ) { + return delegate.regexpQuery(value, syntaxFlags, matchFlags, maxDeterminizedStates, method, context); + } + + @Override + public Query existsQuery(QueryShardContext context) { + return delegate.existsQuery(context); + } + + @Override + public Query phraseQuery(TokenStream stream, int slop, boolean enablePositionIncrements) throws IOException { + return delegate.phraseQuery(stream, slop, enablePositionIncrements); + } + + @Override + public Query phraseQuery(TokenStream stream, int slop, boolean enablePositionIncrements, QueryShardContext context) throws IOException { + return delegate.phraseQuery(stream, slop, enablePositionIncrements, context); + } + + @Override + public Query multiPhraseQuery(TokenStream stream, int slop, boolean enablePositionIncrements) throws IOException { + return delegate.multiPhraseQuery(stream, slop, enablePositionIncrements); + } + + @Override + public Query multiPhraseQuery(TokenStream stream, int slop, boolean enablePositionIncrements, QueryShardContext context) + throws IOException { + return delegate.multiPhraseQuery(stream, slop, enablePositionIncrements, context); + } + + @Override + public Query phrasePrefixQuery(TokenStream stream, int slop, int maxExpansions) throws IOException { + return delegate.phrasePrefixQuery(stream, slop, maxExpansions); + } + + @Override + public Query phrasePrefixQuery(TokenStream stream, int slop, int maxExpansions, QueryShardContext context) throws IOException { + return delegate.phrasePrefixQuery(stream, slop, maxExpansions, context); + } + + @Override + public SpanQuery spanPrefixQuery(String value, SpanMultiTermQueryWrapper.SpanRewriteMethod method, QueryShardContext context) { + return delegate.spanPrefixQuery(value, method, context); + } + + @Override + public Query distanceFeatureQuery(Object origin, String pivot, float boost, QueryShardContext context) { + return delegate.distanceFeatureQuery(origin, pivot, boost, context); + } + + @Override + public IntervalsSource intervals(String query, int max_gaps, IntervalMode mode, NamedAnalyzer analyzer, boolean prefix) + throws IOException { + return delegate.intervals(query, max_gaps, mode, analyzer, prefix); + } + + @Override + public Relation isFieldWithinQuery( + IndexReader reader, + Object from, + Object to, + boolean includeLower, + boolean includeUpper, + ZoneId timeZone, + DateMathParser dateMathParser, + QueryRewriteContext context + ) throws IOException { + return delegate.isFieldWithinQuery(reader, from, to, includeLower, includeUpper, timeZone, dateMathParser, context); + } + + @Override + public boolean eagerGlobalOrdinals() { + return delegate.eagerGlobalOrdinals(); + } + + @Override + public void setEagerGlobalOrdinals(boolean eagerGlobalOrdinals) { + delegate.setEagerGlobalOrdinals(eagerGlobalOrdinals); + } + + @Override + public DocValueFormat docValueFormat(String format, ZoneId timeZone) { + return delegate.docValueFormat(format, timeZone); + } + + @Override + public Map meta() { + return delegate.meta(); + } + + @Override + public TextSearchInfo getTextSearchInfo() { + return delegate.getTextSearchInfo(); + } + + @Override + public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, Supplier searchLookup) { + return delegate.fielddataBuilder(fullyQualifiedIndexName, searchLookup); + } + + @Override + public MappedFieldType unwrap() { + return delegate.unwrap(); + } +} diff --git a/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java b/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java index 4a0740dbf1b3c..81065a88c3001 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java +++ b/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java @@ -521,4 +521,12 @@ public Map meta() { public TextSearchInfo getTextSearchInfo() { return textSearchInfo; } + + /** + * @return a concrete (unfiltered) field type, which should be the current instance + * if this is not a field type wrapper. See {@link FilterFieldType}. + */ + public MappedFieldType unwrap() { + return this; + } } From 928a168685edf30ecb56741291d01627ee32a937 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 2 Apr 2025 16:14:03 -0500 Subject: [PATCH 129/550] Bump com.google.api.grpc:proto-google-common-protos from 2.52.0 to 2.54.1 in /plugins/repository-gcs (#17733) * Bump com.google.api.grpc:proto-google-common-protos Bumps [com.google.api.grpc:proto-google-common-protos](https://github.com/googleapis/sdk-platform-java) from 2.52.0 to 2.54.1. - [Release notes](https://github.com/googleapis/sdk-platform-java/releases) - [Changelog](https://github.com/googleapis/sdk-platform-java/blob/main/CHANGELOG.md) - [Commits](https://github.com/googleapis/sdk-platform-java/compare/v2.52.0...gax/v2.54.1) --- updated-dependencies: - dependency-name: com.google.api.grpc:proto-google-common-protos dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Signed-off-by: Craig Perkins Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Craig Perkins --- CHANGELOG.md | 1 + plugins/repository-gcs/build.gradle | 2 +- .../licenses/proto-google-common-protos-2.52.0.jar.sha1 | 1 - .../licenses/proto-google-common-protos-2.54.1.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-gcs/licenses/proto-google-common-protos-2.52.0.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/proto-google-common-protos-2.54.1.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index ae440d2b6214b..9ce7a1ecbb55e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,6 +35,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `lycheeverse/lychee-action` from 2.3.0 to 2.4.0 ([#17731](https://github.com/opensearch-project/OpenSearch/pull/17731)) - Bump `com.netflix.nebula.ospackage-base` from 11.11.1 to 11.11.2 ([#17734](https://github.com/opensearch-project/OpenSearch/pull/17734)) - Bump `com.nimbusds:oauth2-oidc-sdk` from 11.21 to 11.23.1 ([#17729](https://github.com/opensearch-project/OpenSearch/pull/17729)) +- Bump `com.google.api.grpc:proto-google-common-protos` from 2.52.0 to 2.54.1 ([#17733](https://github.com/opensearch-project/OpenSearch/pull/17733)) ### Changed diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index cae9a29dd341e..6d9b9635fa39c 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -56,7 +56,7 @@ dependencies { api 'com.google.api-client:google-api-client:2.7.0' - api 'com.google.api.grpc:proto-google-common-protos:2.52.0' + api 'com.google.api.grpc:proto-google-common-protos:2.54.1' api 'com.google.api.grpc:proto-google-iam-v1:1.33.0' api "com.google.auth:google-auth-library-credentials:${versions.google_auth}" diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-2.52.0.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-common-protos-2.52.0.jar.sha1 deleted file mode 100644 index d955f83389a2d..0000000000000 --- a/plugins/repository-gcs/licenses/proto-google-common-protos-2.52.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8f64c0540ed74ca464a4a025b32f967bd764bdbe \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-2.54.1.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-common-protos-2.54.1.jar.sha1 new file mode 100644 index 0000000000000..a2cb686dc7bf6 --- /dev/null +++ b/plugins/repository-gcs/licenses/proto-google-common-protos-2.54.1.jar.sha1 @@ -0,0 +1 @@ +aa342c093e2b75ecc341f28d2ee6c2b4480169c2 \ No newline at end of file From 137683e52844943c1e619584c2cb2dde7bb02eb4 Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Wed, 2 Apr 2025 18:42:22 -0700 Subject: [PATCH 130/550] Skip list creation on stack walker (#17765) Minor optimization to avoid creating a collection when iteration is the only thing needed. Signed-off-by: Andrew Ross --- .../javaagent/SocketChannelInterceptor.java | 12 ++++++------ .../javaagent/StackCallerChainExtractor.java | 16 +++++++++------- 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/SocketChannelInterceptor.java b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/SocketChannelInterceptor.java index b3e0251c6f1b1..40b8118882b58 100644 --- a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/SocketChannelInterceptor.java +++ b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/SocketChannelInterceptor.java @@ -18,7 +18,7 @@ import java.net.UnixDomainSocketAddress; import java.security.Policy; import java.security.ProtectionDomain; -import java.util.List; +import java.util.stream.Stream; import net.bytebuddy.asm.Advice; import net.bytebuddy.asm.Advice.Origin; @@ -47,26 +47,26 @@ public static void intercept(@Advice.AllArguments Object[] args, @Origin Method } final StackWalker walker = StackWalker.getInstance(Option.RETAIN_CLASS_REFERENCE); - final List callers = walker.walk(new StackCallerChainExtractor()); + final Stream callers = walker.walk(StackCallerChainExtractor.INSTANCE); if (args[0] instanceof InetSocketAddress address) { if (!AgentPolicy.isTrustedHost(address.getHostString())) { final String host = address.getHostString() + ":" + address.getPort(); final SocketPermission permission = new SocketPermission(host, "connect,resolve"); - for (final ProtectionDomain domain : callers) { + callers.forEach(domain -> { if (!policy.implies(domain, permission)) { throw new SecurityException("Denied access to: " + host + ", domain " + domain); } - } + }); } } else if (args[0] instanceof UnixDomainSocketAddress address) { final NetPermission permission = new NetPermission("accessUnixDomainSocket"); - for (final ProtectionDomain domain : callers) { + callers.forEach(domain -> { if (!policy.implies(domain, permission)) { throw new SecurityException("Denied access to: " + address + ", domain " + domain); } - } + }); } } } diff --git a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/StackCallerChainExtractor.java b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/StackCallerChainExtractor.java index 6c33ca73e107d..3586f638edfdb 100644 --- a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/StackCallerChainExtractor.java +++ b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/StackCallerChainExtractor.java @@ -10,30 +10,32 @@ import java.lang.StackWalker.StackFrame; import java.security.ProtectionDomain; -import java.util.List; import java.util.function.Function; -import java.util.stream.Collectors; import java.util.stream.Stream; /** * Stack Caller Chain Extractor */ -public final class StackCallerChainExtractor implements Function, List> { +public final class StackCallerChainExtractor implements Function, Stream> { + /** + * Single instance of stateless class. + */ + public static final StackCallerChainExtractor INSTANCE = new StackCallerChainExtractor(); + /** * Constructor */ - public StackCallerChainExtractor() {} + private StackCallerChainExtractor() {} /** * Folds the stack * @param frames stack frames */ @Override - public List apply(Stream frames) { + public Stream apply(Stream frames) { return frames.map(StackFrame::getDeclaringClass) .map(Class::getProtectionDomain) .filter(pd -> pd.getCodeSource() != null) /* JDK */ - .distinct() - .collect(Collectors.toList()); + .distinct(); } } From 04db50a098af40819130f4c9b8e7f9675cf42512 Mon Sep 17 00:00:00 2001 From: Vinay Krishna Pudyodu Date: Wed, 2 Apr 2025 20:48:44 -0700 Subject: [PATCH 131/550] Support AutoExpand for SearchReplica (#17741) --- CHANGELOG.md | 1 + .../metadata/AutoExpandSearchReplicasIT.java | 74 +++++++ .../cluster/metadata/AutoExpandReplicas.java | 16 +- .../metadata/AutoExpandSearchReplicas.java | 180 ++++++++++++++++++ .../cluster/metadata/IndexMetadata.java | 2 + .../metadata/MetadataCreateIndexService.java | 5 +- .../MetadataUpdateSettingsService.java | 5 +- .../routing/allocation/AllocationService.java | 45 ++++- .../allocation/AwarenessReplicaBalance.java | 24 ++- .../common/settings/IndexScopedSettings.java | 1 + .../metadata/AutoExpandReplicasTests.java | 5 +- .../AutoExpandSearchReplicasTests.java | 141 ++++++++++++++ .../allocation/AllocationServiceTests.java | 119 ++++++++++++ .../AwarenessReplicaBalanceTests.java | 42 +++- .../test/OpenSearchIntegTestCase.java | 11 +- 15 files changed, 632 insertions(+), 39 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/opensearch/cluster/metadata/AutoExpandSearchReplicasIT.java create mode 100644 server/src/main/java/org/opensearch/cluster/metadata/AutoExpandSearchReplicas.java create mode 100644 server/src/test/java/org/opensearch/cluster/metadata/AutoExpandSearchReplicasTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 9ce7a1ecbb55e..53cb18c04dde1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Security Manager Replacement] Create initial Java Agent to intercept Socket::connect calls ([#17724](https://github.com/opensearch-project/OpenSearch/pull/17724)) - Add ingestion management APIs for pause, resume and get ingestion state ([#17631](https://github.com/opensearch-project/OpenSearch/pull/17631)) - [Security Manager Replacement] Enhance Java Agent to intercept System::exit ([#17746](https://github.com/opensearch-project/OpenSearch/pull/17746)) +- Support AutoExpand for SearchReplica ([#17741](https://github.com/opensearch-project/OpenSearch/pull/17741)) ### Changed - Migrate BC libs to their FIPS counterparts ([#14912](https://github.com/opensearch-project/OpenSearch/pull/14912)) diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/AutoExpandSearchReplicasIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/AutoExpandSearchReplicasIT.java new file mode 100644 index 0000000000000..c177b01fea642 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/AutoExpandSearchReplicasIT.java @@ -0,0 +1,74 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.metadata; + +import org.opensearch.cluster.routing.UnassignedInfo; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; + +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class AutoExpandSearchReplicasIT extends RemoteStoreBaseIntegTestCase { + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL, Boolean.TRUE).build(); + } + + public void testAutoExpandSearchReplica() throws Exception { + String indexName = "test"; + internalCluster().startClusterManagerOnlyNode(); + + // Create a cluster with 2 data nodes and 1 search node + internalCluster().startDataOnlyNode(); + internalCluster().startDataOnlyNode(); + String searchNode = internalCluster().startSearchOnlyNode(); + + // Create index with 1 primary, 1 replica and 1 search replica shards + createIndex( + indexName, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS, 1) + .put(SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueMillis(0)) + .build() + ); + ensureGreen(); + + assertBusy(() -> assertEquals(1, getNumShards(indexName).numSearchReplicas)); + + // Enable auto expand for search replica + client().admin() + .indices() + .prepareUpdateSettings(indexName) + .setSettings(Settings.builder().put("index.auto_expand_search_replicas", "0-all")) + .get(); + + // Add 1 more search nodes + internalCluster().startSearchOnlyNode(); + + assertBusy(() -> assertEquals(2, getNumShards(indexName).numSearchReplicas)); + + // Stop a node which hosts search replica + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(searchNode)); + assertBusy(() -> assertEquals(1, getNumShards(indexName).numSearchReplicas)); + + // Add 1 more search nodes + internalCluster().startSearchOnlyNode(); + assertBusy(() -> assertEquals(2, getNumShards(indexName).numSearchReplicas)); + } +} diff --git a/server/src/main/java/org/opensearch/cluster/metadata/AutoExpandReplicas.java b/server/src/main/java/org/opensearch/cluster/metadata/AutoExpandReplicas.java index bd31350780d72..d9a3d7bf8eb3f 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/AutoExpandReplicas.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/AutoExpandReplicas.java @@ -31,7 +31,6 @@ package org.opensearch.cluster.metadata; -import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.cluster.routing.allocation.decider.Decision; import org.opensearch.common.Booleans; @@ -142,13 +141,14 @@ public boolean isEnabled() { private OptionalInt getDesiredNumberOfReplicas(IndexMetadata indexMetadata, RoutingAllocation allocation) { if (enabled) { - int numMatchingDataNodes = 0; - for (final DiscoveryNode cursor : allocation.nodes().getDataNodes().values()) { - Decision decision = allocation.deciders().shouldAutoExpandToNode(indexMetadata, cursor, allocation); - if (decision.type() != Decision.Type.NO) { - numMatchingDataNodes++; - } - } + int numMatchingDataNodes = (int) allocation.nodes() + .getDataNodes() + .values() + .stream() + .filter(node -> node.isSearchNode() == false) + .map(node -> allocation.deciders().shouldAutoExpandToNode(indexMetadata, node, allocation)) + .filter(decision -> decision.type() != Decision.Type.NO) + .count(); final int min = getMinReplicas(); final int max = getMaxReplicas(numMatchingDataNodes); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/AutoExpandSearchReplicas.java b/server/src/main/java/org/opensearch/cluster/metadata/AutoExpandSearchReplicas.java new file mode 100644 index 0000000000000..91608d46aff20 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/metadata/AutoExpandSearchReplicas.java @@ -0,0 +1,180 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.metadata; + +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.allocation.RoutingAllocation; +import org.opensearch.cluster.routing.allocation.decider.Decision; +import org.opensearch.common.Booleans; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Setting.Property; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.OptionalInt; + +import static org.opensearch.cluster.metadata.MetadataIndexStateService.isIndexVerifiedBeforeClosed; + +/** + * This class acts as a functional wrapper around the {@code index.auto_expand_search_replicas} setting. + * This setting's value expands into a minimum and maximum value, requiring special handling based on the + * number of search nodes in the cluster. This class handles parsing and simplifies access to these values. + * + * @opensearch.internal + */ +public final class AutoExpandSearchReplicas { + // the value we recognize in the "max" position to mean all the search nodes + private static final String ALL_NODES_VALUE = "all"; + + private static final AutoExpandSearchReplicas FALSE_INSTANCE = new AutoExpandSearchReplicas(0, 0, false); + + public static final Setting SETTING = new Setting<>( + IndexMetadata.SETTING_AUTO_EXPAND_SEARCH_REPLICAS, + "false", + AutoExpandSearchReplicas::parse, + Property.Dynamic, + Property.IndexScope + ); + + private static AutoExpandSearchReplicas parse(String value) { + final int min; + final int max; + if (Booleans.isFalse(value)) { + return FALSE_INSTANCE; + } + final int dash = value.indexOf('-'); + if (-1 == dash) { + throw new IllegalArgumentException( + "failed to parse [" + IndexMetadata.SETTING_AUTO_EXPAND_SEARCH_REPLICAS + "] from value: [" + value + "] at index " + dash + ); + } + final String sMin = value.substring(0, dash); + try { + min = Integer.parseInt(sMin); + } catch (NumberFormatException e) { + throw new IllegalArgumentException( + "failed to parse [" + IndexMetadata.SETTING_AUTO_EXPAND_SEARCH_REPLICAS + "] from value: [" + value + "] at index " + dash, + e + ); + } + String sMax = value.substring(dash + 1); + if (sMax.equals(ALL_NODES_VALUE)) { + max = Integer.MAX_VALUE; + } else { + try { + max = Integer.parseInt(sMax); + } catch (NumberFormatException e) { + throw new IllegalArgumentException( + "failed to parse [" + + IndexMetadata.SETTING_AUTO_EXPAND_SEARCH_REPLICAS + + "] from value: [" + + value + + "] at index " + + dash, + e + ); + } + } + return new AutoExpandSearchReplicas(min, max, true); + } + + private final int minSearchReplicas; + private final int maxSearchReplicas; + private final boolean enabled; + + private AutoExpandSearchReplicas(int minReplicas, int maxReplicas, boolean enabled) { + if (minReplicas > maxReplicas) { + throw new IllegalArgumentException( + "[" + + IndexMetadata.SETTING_AUTO_EXPAND_SEARCH_REPLICAS + + "] minSearchReplicas must be =< maxSearchReplicas but wasn't " + + minReplicas + + " > " + + maxReplicas + ); + } + this.minSearchReplicas = minReplicas; + this.maxSearchReplicas = maxReplicas; + this.enabled = enabled; + } + + int getMinSearchReplicas() { + return minSearchReplicas; + } + + public int getMaxSearchReplicas() { + return maxSearchReplicas; + } + + public boolean isEnabled() { + return enabled; + } + + private OptionalInt getDesiredNumberOfSearchReplicas(IndexMetadata indexMetadata, RoutingAllocation allocation) { + int numMatchingSearchNodes = (int) allocation.nodes() + .getDataNodes() + .values() + .stream() + .filter(DiscoveryNode::isSearchNode) + .map(node -> allocation.deciders().shouldAutoExpandToNode(indexMetadata, node, allocation)) + .filter(decision -> decision.type() != Decision.Type.NO) + .count(); + + return calculateNumberOfSearchReplicas(numMatchingSearchNodes); + } + + // package private for testing + OptionalInt calculateNumberOfSearchReplicas(int numMatchingSearchNodes) { + // Calculate the maximum possible number of search replicas + int maxPossibleReplicas = Math.min(numMatchingSearchNodes, maxSearchReplicas); + + // Determine the number of search replicas + int numberOfSearchReplicas = Math.max(minSearchReplicas, maxPossibleReplicas); + + // Additional check to ensure we don't exceed max possible search replicas + if (numberOfSearchReplicas <= maxPossibleReplicas) { + return OptionalInt.of(numberOfSearchReplicas); + } + + return OptionalInt.empty(); + } + + @Override + public String toString() { + return enabled ? minSearchReplicas + "-" + maxSearchReplicas : "false"; + } + + /** + * Checks if there are search replicas with the auto-expand feature that need to be adapted. + * Returns a map of updates, which maps the indices to be updated to the desired number of search replicas. + * The map has the desired number of search replicas as key and the indices to update as value, as this allows the result + * of this method to be directly applied to RoutingTable.Builder#updateNumberOfSearchReplicas. + */ + public static Map> getAutoExpandSearchReplicaChanges(Metadata metadata, RoutingAllocation allocation) { + Map> updatedSearchReplicas = new HashMap<>(); + + for (final IndexMetadata indexMetadata : metadata) { + if (indexMetadata.getState() == IndexMetadata.State.OPEN || isIndexVerifiedBeforeClosed(indexMetadata)) { + AutoExpandSearchReplicas autoExpandSearchReplicas = SETTING.get(indexMetadata.getSettings()); + if (autoExpandSearchReplicas.isEnabled()) { + autoExpandSearchReplicas.getDesiredNumberOfSearchReplicas(indexMetadata, allocation) + .ifPresent(numberOfSearchReplicas -> { + if (numberOfSearchReplicas != indexMetadata.getNumberOfSearchOnlyReplicas()) { + updatedSearchReplicas.computeIfAbsent(numberOfSearchReplicas, ArrayList::new) + .add(indexMetadata.getIndex().getName()); + } + }); + } + } + } + return updatedSearchReplicas; + } +} diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index 72afd44eadef8..9005c830167f9 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -492,7 +492,9 @@ public Iterator> settings() { ); public static final String SETTING_AUTO_EXPAND_REPLICAS = "index.auto_expand_replicas"; + public static final String SETTING_AUTO_EXPAND_SEARCH_REPLICAS = "index.auto_expand_search_replicas"; public static final Setting INDEX_AUTO_EXPAND_REPLICAS_SETTING = AutoExpandReplicas.SETTING; + public static final Setting INDEX_AUTO_EXPAND_SEARCH_REPLICAS_SETTING = AutoExpandSearchReplicas.SETTING; /** * Blocks the API. diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index 3483c14df6272..76b2b948ca164 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -1505,7 +1505,10 @@ List getIndexSettingsValidationErrors( Optional replicaValidationError = awarenessReplicaBalance.validate(replicaCount, autoExpandReplica); replicaValidationError.ifPresent(validationErrors::add); - Optional searchReplicaValidationError = awarenessReplicaBalance.validate(searchReplicaCount); + Optional searchReplicaValidationError = awarenessReplicaBalance.validate( + searchReplicaCount, + AutoExpandSearchReplicas.SETTING.get(settings) + ); searchReplicaValidationError.ifPresent(validationErrors::add); } return validationErrors; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java index fff704210ca7a..8eff5604045bc 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java @@ -304,7 +304,10 @@ public ClusterState execute(ClusterState currentState) { for (Index index : request.indices()) { if (index.getName().charAt(0) != '.') { // No replica count validation for system indices - Optional error = awarenessReplicaBalance.validate(updatedNumberOfSearchReplicas); + Optional error = awarenessReplicaBalance.validate( + updatedNumberOfSearchReplicas, + AutoExpandSearchReplicas.SETTING.get(openSettings) + ); if (error.isPresent()) { ValidationException ex = new ValidationException(); diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationService.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationService.java index 78f17c9ff212b..efe51e36ec748 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationService.java @@ -44,6 +44,7 @@ import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.cluster.health.ClusterStateHealth; import org.opensearch.cluster.metadata.AutoExpandReplicas; +import org.opensearch.cluster.metadata.AutoExpandSearchReplicas; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; @@ -373,11 +374,19 @@ public ClusterState adaptAutoExpandReplicas(ClusterState clusterState) { clusterState.metadata(), allocation ); - if (autoExpandReplicaChanges.isEmpty()) { + + final Map> autoExpandSearchReplicaChanges = AutoExpandSearchReplicas.getAutoExpandSearchReplicaChanges( + clusterState.metadata(), + allocation + ); + + if (autoExpandReplicaChanges.isEmpty() && autoExpandSearchReplicaChanges.isEmpty()) { return clusterState; } else { final RoutingTable.Builder routingTableBuilder = RoutingTable.builder(clusterState.routingTable()); final Metadata.Builder metadataBuilder = Metadata.builder(clusterState.metadata()); + final Set updatedIndices = new HashSet<>(); + for (Map.Entry> entry : autoExpandReplicaChanges.entrySet()) { final int numberOfReplicas = entry.getKey(); final String[] indices = entry.getValue().toArray(new String[0]); @@ -385,21 +394,36 @@ public ClusterState adaptAutoExpandReplicas(ClusterState clusterState) { // operation which make these copies stale routingTableBuilder.updateNumberOfReplicas(numberOfReplicas, indices); metadataBuilder.updateNumberOfReplicas(numberOfReplicas, indices); - // update settings version for each index - for (final String index : indices) { - final IndexMetadata indexMetadata = metadataBuilder.get(index); - final IndexMetadata.Builder indexMetadataBuilder = new IndexMetadata.Builder(indexMetadata).settingsVersion( - 1 + indexMetadata.getSettingsVersion() - ); - metadataBuilder.put(indexMetadataBuilder); - } + updatedIndices.addAll(Set.of(indices)); logger.info("updating number_of_replicas to [{}] for indices {}", numberOfReplicas, indices); } + + for (Map.Entry> entry : autoExpandSearchReplicaChanges.entrySet()) { + final int numberOfSearchReplicas = entry.getKey(); + final String[] indices = entry.getValue().toArray(new String[0]); + // we do *not* update the in sync allocation ids as they will be removed upon the first index + // operation which make these copies stale + routingTableBuilder.updateNumberOfSearchReplicas(numberOfSearchReplicas, indices); + metadataBuilder.updateNumberOfSearchReplicas(numberOfSearchReplicas, indices); + updatedIndices.addAll(Set.of(indices)); + logger.info("updating number_of_search_replicas to [{}] for indices {}", numberOfSearchReplicas, indices); + } + + // update settings version for each updated index + for (final String index : updatedIndices) { + final IndexMetadata indexMetadata = metadataBuilder.get(index); + final IndexMetadata.Builder indexMetadataBuilder = new IndexMetadata.Builder(indexMetadata).settingsVersion( + 1 + indexMetadata.getSettingsVersion() + ); + metadataBuilder.put(indexMetadataBuilder); + } + final ClusterState fixedState = ClusterState.builder(clusterState) .routingTable(routingTableBuilder.build()) .metadata(metadataBuilder) .build(); assert AutoExpandReplicas.getAutoExpandReplicaChanges(fixedState.metadata(), allocation).isEmpty(); + assert AutoExpandSearchReplicas.getAutoExpandSearchReplicaChanges(fixedState.metadata(), allocation).isEmpty(); return fixedState; } } @@ -567,6 +591,9 @@ private void reroute(RoutingAllocation allocation) { assert hasDeadNodes(allocation) == false : "dead nodes should be explicitly cleaned up. See disassociateDeadNodes"; assert AutoExpandReplicas.getAutoExpandReplicaChanges(allocation.metadata(), allocation).isEmpty() : "auto-expand replicas out of sync with number of nodes in the cluster"; + assert AutoExpandSearchReplicas.getAutoExpandSearchReplicaChanges(allocation.metadata(), allocation).isEmpty() + : "auto-expand search replicas out of sync with number of search nodes in the cluster"; + assert assertInitialized(); long rerouteStartTimeNS = System.nanoTime(); removeDelayMarkers(allocation); diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalance.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalance.java index d2cf30bd31983..538d49d4e4701 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalance.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalance.java @@ -9,6 +9,7 @@ package org.opensearch.cluster.routing.allocation; import org.opensearch.cluster.metadata.AutoExpandReplicas; +import org.opensearch.cluster.metadata.AutoExpandSearchReplicas; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; @@ -122,13 +123,22 @@ public Optional validate(int replicaCount, AutoExpandReplicas autoExpand return Optional.empty(); } - public Optional validate(int searchReplicaCount) { - // TODO: For now Search replicas do not support auto expand, when we add support update this validation - if (searchReplicaCount > 0 && searchReplicaCount % maxAwarenessAttributes() != 0) { - String errorMessage = "total search replicas needs to be a multiple of total awareness attributes [" - + maxAwarenessAttributes() - + "]"; - return Optional.of(errorMessage); + public Optional validate(int searchReplicaCount, AutoExpandSearchReplicas autoExpandSearchReplicas) { + if (autoExpandSearchReplicas.isEnabled()) { + if ((autoExpandSearchReplicas.getMaxSearchReplicas() != Integer.MAX_VALUE) + && ((autoExpandSearchReplicas.getMaxSearchReplicas()) % maxAwarenessAttributes() != 0)) { + String errorMessage = "expected max cap on auto expand search replicas to be a multiple of total awareness attributes [" + + maxAwarenessAttributes() + + "]"; + return Optional.of(errorMessage); + } + } else { + if (searchReplicaCount > 0 && searchReplicaCount % maxAwarenessAttributes() != 0) { + String errorMessage = "total search replicas needs to be a multiple of total awareness attributes [" + + maxAwarenessAttributes() + + "]"; + return Optional.of(errorMessage); + } } return Optional.empty(); } diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index 14cd7479866d2..3793b9b09e3b2 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -98,6 +98,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING, IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING, IndexMetadata.INDEX_AUTO_EXPAND_REPLICAS_SETTING, + IndexMetadata.INDEX_AUTO_EXPAND_SEARCH_REPLICAS_SETTING, IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING, IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING, IndexMetadata.INDEX_ROUTING_PARTITION_SIZE_SETTING, diff --git a/server/src/test/java/org/opensearch/cluster/metadata/AutoExpandReplicasTests.java b/server/src/test/java/org/opensearch/cluster/metadata/AutoExpandReplicasTests.java index 4b7eaf0272a91..3c1bcf8449458 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/AutoExpandReplicasTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/AutoExpandReplicasTests.java @@ -52,7 +52,6 @@ import java.util.ArrayList; import java.util.Collections; -import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Set; @@ -123,10 +122,8 @@ public void testInvalidValues() { private static final AtomicInteger nodeIdGenerator = new AtomicInteger(); protected DiscoveryNode createNode(Version version, DiscoveryNodeRole... mustHaveRoles) { - Set roles = new HashSet<>(randomSubsetOf(DiscoveryNodeRole.BUILT_IN_ROLES)); - Collections.addAll(roles, mustHaveRoles); final String id = String.format(Locale.ROOT, "node_%03d", nodeIdGenerator.incrementAndGet()); - return new DiscoveryNode(id, id, buildNewFakeTransportAddress(), Collections.emptyMap(), roles, version); + return new DiscoveryNode(id, id, buildNewFakeTransportAddress(), Collections.emptyMap(), Set.of(mustHaveRoles), version); } protected DiscoveryNode createNode(DiscoveryNodeRole... mustHaveRoles) { diff --git a/server/src/test/java/org/opensearch/cluster/metadata/AutoExpandSearchReplicasTests.java b/server/src/test/java/org/opensearch/cluster/metadata/AutoExpandSearchReplicasTests.java new file mode 100644 index 0000000000000..bfc0ec748c7d0 --- /dev/null +++ b/server/src/test/java/org/opensearch/cluster/metadata/AutoExpandSearchReplicasTests.java @@ -0,0 +1,141 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.cluster.metadata; + +import org.opensearch.Version; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.OpenSearchAllocationTestCase; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.allocation.RoutingAllocation; +import org.opensearch.common.settings.Settings; + +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.OptionalInt; + +public class AutoExpandSearchReplicasTests extends OpenSearchAllocationTestCase { + + public void testParseAutoExpandSearchReplicaSettings() { + AutoExpandSearchReplicas autoExpandSearchReplicas = AutoExpandSearchReplicas.SETTING.get( + Settings.builder().put("index.auto_expand_search_replicas", "0-5").build() + ); + assertEquals(0, autoExpandSearchReplicas.getMinSearchReplicas()); + assertEquals(5, autoExpandSearchReplicas.getMaxSearchReplicas()); + + autoExpandSearchReplicas = AutoExpandSearchReplicas.SETTING.get( + Settings.builder().put("index.auto_expand_search_replicas", "0-all").build() + ); + assertEquals(0, autoExpandSearchReplicas.getMinSearchReplicas()); + assertEquals(Integer.MAX_VALUE, autoExpandSearchReplicas.getMaxSearchReplicas()); + + autoExpandSearchReplicas = AutoExpandSearchReplicas.SETTING.get( + Settings.builder().put("index.auto_expand_search_replicas", "1-all").build() + ); + assertEquals(1, autoExpandSearchReplicas.getMinSearchReplicas()); + assertEquals(Integer.MAX_VALUE, autoExpandSearchReplicas.getMaxSearchReplicas()); + } + + public void testInvalidValues() { + Throwable throwable = assertThrows(IllegalArgumentException.class, () -> { + AutoExpandSearchReplicas.SETTING.get(Settings.builder().put("index.auto_expand_search_replicas", "boom").build()); + }); + assertEquals("failed to parse [index.auto_expand_search_replicas] from value: [boom] at index -1", throwable.getMessage()); + + throwable = assertThrows(IllegalArgumentException.class, () -> { + AutoExpandSearchReplicas.SETTING.get(Settings.builder().put("index.auto_expand_search_replicas", "1-boom").build()); + }); + assertEquals("failed to parse [index.auto_expand_search_replicas] from value: [1-boom] at index 1", throwable.getMessage()); + assertEquals("For input string: \"boom\"", throwable.getCause().getMessage()); + + throwable = assertThrows(IllegalArgumentException.class, () -> { + AutoExpandSearchReplicas.SETTING.get(Settings.builder().put("index.auto_expand_search_replicas", "boom-1").build()); + }); + assertEquals("failed to parse [index.auto_expand_search_replicas] from value: [boom-1] at index 4", throwable.getMessage()); + assertEquals("For input string: \"boom\"", throwable.getCause().getMessage()); + + throwable = assertThrows(IllegalArgumentException.class, () -> { + AutoExpandSearchReplicas.SETTING.get(Settings.builder().put("index.auto_expand_search_replicas", "2-1").build()); + }); + assertEquals( + "[index.auto_expand_search_replicas] minSearchReplicas must be =< maxSearchReplicas but wasn't 2 > 1", + throwable.getMessage() + ); + } + + public void testCalculateNumberOfSearchReplicas() { + // when the number of matching search nodes is lesser than the maximum value of auto-expand + AutoExpandSearchReplicas autoExpandSearchReplicas = AutoExpandSearchReplicas.SETTING.get( + Settings.builder().put("index.auto_expand_search_replicas", "0-all").build() + ); + assertEquals(OptionalInt.of(5), autoExpandSearchReplicas.calculateNumberOfSearchReplicas(5)); + + // when the number of matching search nodes is equal to the maximum value of auto-expand + autoExpandSearchReplicas = AutoExpandSearchReplicas.SETTING.get( + Settings.builder().put("index.auto_expand_search_replicas", "0-5").build() + ); + assertEquals(OptionalInt.of(5), autoExpandSearchReplicas.calculateNumberOfSearchReplicas(5)); + + // when the number of matching search nodes is equal to the minimum value of auto-expand + autoExpandSearchReplicas = AutoExpandSearchReplicas.SETTING.get( + Settings.builder().put("index.auto_expand_search_replicas", "0-5").build() + ); + assertEquals(OptionalInt.of(0), autoExpandSearchReplicas.calculateNumberOfSearchReplicas(0)); + + // when the number of matching search nodes is greater than the maximum value of auto-expand + autoExpandSearchReplicas = AutoExpandSearchReplicas.SETTING.get( + Settings.builder().put("index.auto_expand_search_replicas", "0-5").build() + ); + assertEquals(OptionalInt.of(5), autoExpandSearchReplicas.calculateNumberOfSearchReplicas(8)); + + // when the number of matching search nodes is lesser than the minimum value of auto-expand, + // then the number of search replicas remains unchanged + autoExpandSearchReplicas = AutoExpandSearchReplicas.SETTING.get( + Settings.builder().put("index.auto_expand_search_replicas", "2-5").build() + ); + assertEquals(OptionalInt.empty(), autoExpandSearchReplicas.calculateNumberOfSearchReplicas(1)); + } + + public void testGetAutoExpandReplicaChanges() { + Metadata metadata = Metadata.builder() + .put( + IndexMetadata.builder("test") + .settings(settings(Version.CURRENT).put("index.auto_expand_search_replicas", "0-all")) + .numberOfShards(1) + .numberOfReplicas(0) + .numberOfSearchReplicas(1) + ) + .build(); + + RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metadata.index("test")).build(); + ClusterState clusterState = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(initialRoutingTable) + .nodes( + DiscoveryNodes.builder() + .add(new DiscoveryNode("node1", buildNewFakeTransportAddress(), Collections.emptyMap(), SEARCH_ROLE, Version.CURRENT)) + .add(new DiscoveryNode("node2", buildNewFakeTransportAddress(), Collections.emptyMap(), SEARCH_ROLE, Version.CURRENT)) + .add(new DiscoveryNode("node3", buildNewFakeTransportAddress(), Collections.emptyMap(), SEARCH_ROLE, Version.CURRENT)) + .build() + ) + .build(); + + RoutingAllocation allocation = new RoutingAllocation( + yesAllocationDeciders(), + clusterState.getRoutingNodes(), + clusterState, + null, + null, + System.nanoTime() + ); + + assertEquals(Map.of(3, List.of("test")), AutoExpandSearchReplicas.getAutoExpandSearchReplicaChanges(metadata, allocation)); + } +} diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationServiceTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationServiceTests.java index cce75105dd33f..b1f4b45bb2441 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationServiceTests.java @@ -40,6 +40,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.cluster.routing.IndexShardRoutingTable; @@ -48,9 +49,11 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.UnassignedInfo; +import org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.opensearch.cluster.routing.allocation.allocator.ShardsAllocator; import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; import org.opensearch.cluster.routing.allocation.decider.Decision; +import org.opensearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.SameShardAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.opensearch.common.settings.ClusterSettings; @@ -66,12 +69,16 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.IntStream; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_AUTO_EXPAND_SEARCH_REPLICAS; import static org.opensearch.cluster.routing.UnassignedInfo.AllocationStatus.DECIDERS_NO; import static org.opensearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING; import static org.opensearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING; @@ -445,4 +452,116 @@ private static ClusterState rerouteAndStartShards(final AllocationService alloca ); } + public void testAdaptAutoExpandReplicasWhenAutoExpandChangesExists() { + final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(); + String indexName = "index1"; + Set SEARCH_NODE_ROLE = new HashSet<>(List.of(DiscoveryNodeRole.SEARCH_ROLE)); + Set DATA_NODE_ROLE = new HashSet<>(List.of(DiscoveryNodeRole.DATA_ROLE)); + + nodesBuilder.add( + new DiscoveryNode("node1", buildNewFakeTransportAddress(), Collections.emptyMap(), DATA_NODE_ROLE, Version.CURRENT) + ); + nodesBuilder.add( + new DiscoveryNode("node2", buildNewFakeTransportAddress(), Collections.emptyMap(), DATA_NODE_ROLE, Version.CURRENT) + ); + nodesBuilder.add( + new DiscoveryNode("node3", buildNewFakeTransportAddress(), Collections.emptyMap(), DATA_NODE_ROLE, Version.CURRENT) + ); + nodesBuilder.add( + new DiscoveryNode("node4", buildNewFakeTransportAddress(), Collections.emptyMap(), SEARCH_NODE_ROLE, Version.CURRENT) + ); + nodesBuilder.add( + new DiscoveryNode("node5", buildNewFakeTransportAddress(), Collections.emptyMap(), SEARCH_NODE_ROLE, Version.CURRENT) + ); + + Metadata.Builder metadataBuilder = Metadata.builder() + .put( + IndexMetadata.builder(indexName) + .settings( + settings(Version.CURRENT).put(SETTING_AUTO_EXPAND_REPLICAS, "0-all") + .put(SETTING_AUTO_EXPAND_SEARCH_REPLICAS, "0-all") + ) + .numberOfShards(1) + .numberOfReplicas(1) + .numberOfSearchReplicas(1) + ); + + final RoutingTable.Builder routingTableBuilder = RoutingTable.builder().addAsRecovery(metadataBuilder.get("index1")); + final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(nodesBuilder) + .metadata(metadataBuilder) + .routingTable(routingTableBuilder.build()) + .build(); + final AllocationService allocationService = new AllocationService( + new AllocationDeciders(Collections.singleton(new MaxRetryAllocationDecider())), + new TestGatewayAllocator(), + new BalancedShardsAllocator(Settings.EMPTY), + EmptyClusterInfoService.INSTANCE, + EmptySnapshotsInfoService.INSTANCE + ); + + ClusterState updatedClusterState = allocationService.adaptAutoExpandReplicas(clusterState); + assertEquals(2, updatedClusterState.routingTable().index(indexName).shard(0).writerReplicas().size()); + assertEquals(2, updatedClusterState.routingTable().index(indexName).shard(0).searchOnlyReplicas().size()); + assertEquals(2, updatedClusterState.metadata().index(indexName).getNumberOfReplicas()); + assertEquals(2, updatedClusterState.metadata().index(indexName).getNumberOfSearchOnlyReplicas()); + assertNotEquals(updatedClusterState, clusterState); + assertEquals( + clusterState.metadata().index(indexName).getSettingsVersion() + 1, + updatedClusterState.metadata().index(indexName).getSettingsVersion() + ); + } + + public void testAdaptAutoExpandReplicasWhenAutoExpandChangesNotExists() { + final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(); + String indexName = "index1"; + Set SEARCH_NODE_ROLE = Set.of(DiscoveryNodeRole.SEARCH_ROLE); + Set DATA_NODE_ROLE = Set.of(DiscoveryNodeRole.DATA_ROLE); + + nodesBuilder.add( + new DiscoveryNode("node1", buildNewFakeTransportAddress(), Collections.emptyMap(), DATA_NODE_ROLE, Version.CURRENT) + ); + nodesBuilder.add( + new DiscoveryNode("node2", buildNewFakeTransportAddress(), Collections.emptyMap(), DATA_NODE_ROLE, Version.CURRENT) + ); + nodesBuilder.add( + new DiscoveryNode("node3", buildNewFakeTransportAddress(), Collections.emptyMap(), DATA_NODE_ROLE, Version.CURRENT) + ); + nodesBuilder.add( + new DiscoveryNode("node4", buildNewFakeTransportAddress(), Collections.emptyMap(), SEARCH_NODE_ROLE, Version.CURRENT) + ); + nodesBuilder.add( + new DiscoveryNode("node5", buildNewFakeTransportAddress(), Collections.emptyMap(), SEARCH_NODE_ROLE, Version.CURRENT) + ); + + Metadata.Builder metadataBuilder = Metadata.builder() + .put( + IndexMetadata.builder(indexName) + .settings(settings(Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(1) + .numberOfSearchReplicas(1) + ); + + final RoutingTable.Builder routingTableBuilder = RoutingTable.builder().addAsRecovery(metadataBuilder.get("index1")); + final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(nodesBuilder) + .metadata(metadataBuilder) + .routingTable(routingTableBuilder.build()) + .build(); + final AllocationService allocationService = new AllocationService( + new AllocationDeciders(Collections.singleton(new MaxRetryAllocationDecider())), + new TestGatewayAllocator(), + new BalancedShardsAllocator(Settings.EMPTY), + EmptyClusterInfoService.INSTANCE, + EmptySnapshotsInfoService.INSTANCE + ); + + ClusterState updatedClusterState = allocationService.adaptAutoExpandReplicas(clusterState); + assertEquals(1, updatedClusterState.routingTable().index(indexName).shard(0).writerReplicas().size()); + assertEquals(1, updatedClusterState.routingTable().index(indexName).shard(0).searchOnlyReplicas().size()); + assertEquals(1, updatedClusterState.metadata().index(indexName).getNumberOfReplicas()); + assertEquals(1, updatedClusterState.metadata().index(indexName).getNumberOfSearchOnlyReplicas()); + assertEquals(updatedClusterState, clusterState); + } } diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalanceTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalanceTests.java index c6134330727aa..aecd721542127 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalanceTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalanceTests.java @@ -10,6 +10,7 @@ import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.metadata.AutoExpandReplicas; +import org.opensearch.cluster.metadata.AutoExpandSearchReplicas; import org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; @@ -17,6 +18,8 @@ import java.util.Optional; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_AUTO_EXPAND_SEARCH_REPLICAS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS; import static org.hamcrest.Matchers.equalTo; public class AwarenessReplicaBalanceTests extends OpenSearchAllocationTestCase { @@ -30,6 +33,7 @@ public void testNoForcedAwarenessAttribute() { Settings settings = Settings.builder() .put("cluster.routing.allocation.awareness.attributes", "rack_id") .put(SETTING_AUTO_EXPAND_REPLICAS, "0-1") + .put(SETTING_AUTO_EXPAND_SEARCH_REPLICAS, "0-1") .build(); AutoExpandReplicas autoExpandReplica = AutoExpandReplicas.SETTING.get(settings); AwarenessReplicaBalance awarenessReplicaBalance = new AwarenessReplicaBalance(settings, EMPTY_CLUSTER_SETTINGS); @@ -38,8 +42,9 @@ public void testNoForcedAwarenessAttribute() { assertEquals(awarenessReplicaBalance.validate(0, autoExpandReplica), Optional.empty()); assertEquals(awarenessReplicaBalance.validate(1, autoExpandReplica), Optional.empty()); - assertEquals(awarenessReplicaBalance.validate(0), Optional.empty()); - assertEquals(awarenessReplicaBalance.validate(1), Optional.empty()); + AutoExpandSearchReplicas autoExpandSearchReplicas = AutoExpandSearchReplicas.SETTING.get(settings); + assertEquals(awarenessReplicaBalance.validate(0, autoExpandSearchReplicas), Optional.empty()); + assertEquals(awarenessReplicaBalance.validate(1, autoExpandSearchReplicas), Optional.empty()); } public void testForcedAwarenessAttribute() { @@ -50,6 +55,7 @@ public void testForcedAwarenessAttribute() { .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "rack.values", "c, d, e") .put(AwarenessReplicaBalance.CLUSTER_ROUTING_ALLOCATION_AWARENESS_BALANCE_SETTING.getKey(), true) .put(SETTING_AUTO_EXPAND_REPLICAS, "0-2") + .put(SETTING_AUTO_EXPAND_SEARCH_REPLICAS, "0-3") .build(); AwarenessReplicaBalance awarenessReplicaBalance = new AwarenessReplicaBalance(settings, EMPTY_CLUSTER_SETTINGS); @@ -59,6 +65,9 @@ public void testForcedAwarenessAttribute() { assertEquals(awarenessReplicaBalance.validate(1, autoExpandReplica), Optional.empty()); assertEquals(awarenessReplicaBalance.validate(0, autoExpandReplica), Optional.empty()); + AutoExpandSearchReplicas autoExpandSearchReplicas = AutoExpandSearchReplicas.SETTING.get(settings); + assertEquals(awarenessReplicaBalance.validate(1, autoExpandSearchReplicas), Optional.empty()); + // When auto expand replica settings is passed as max cap settings = Settings.builder() .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(), "zone, rack") @@ -66,6 +75,7 @@ public void testForcedAwarenessAttribute() { .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "rack.values", "c, d, e") .put(AwarenessReplicaBalance.CLUSTER_ROUTING_ALLOCATION_AWARENESS_BALANCE_SETTING.getKey(), true) .put(SETTING_AUTO_EXPAND_REPLICAS, "0-all") + .put(SETTING_AUTO_EXPAND_SEARCH_REPLICAS, "0-all") .build(); awarenessReplicaBalance = new AwarenessReplicaBalance(settings, EMPTY_CLUSTER_SETTINGS); @@ -76,6 +86,9 @@ public void testForcedAwarenessAttribute() { assertEquals(awarenessReplicaBalance.validate(1, autoExpandReplica), Optional.empty()); assertEquals(awarenessReplicaBalance.validate(0, autoExpandReplica), Optional.empty()); + autoExpandSearchReplicas = AutoExpandSearchReplicas.SETTING.get(settings); + assertEquals(awarenessReplicaBalance.validate(1, autoExpandSearchReplicas), Optional.empty()); + // when auto expand is not valid set as per zone awareness settings = Settings.builder() .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(), "zone, rack") @@ -83,6 +96,7 @@ public void testForcedAwarenessAttribute() { .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "rack.values", "c, d, e") .put(AwarenessReplicaBalance.CLUSTER_ROUTING_ALLOCATION_AWARENESS_BALANCE_SETTING.getKey(), true) .put(SETTING_AUTO_EXPAND_REPLICAS, "0-1") + .put(SETTING_AUTO_EXPAND_SEARCH_REPLICAS, "0-1") .build(); awarenessReplicaBalance = new AwarenessReplicaBalance(settings, EMPTY_CLUSTER_SETTINGS); @@ -97,6 +111,16 @@ public void testForcedAwarenessAttribute() { Optional.of("expected max cap on auto expand to be a multiple of total awareness attributes [3]") ); + autoExpandSearchReplicas = AutoExpandSearchReplicas.SETTING.get(settings); + assertEquals( + awarenessReplicaBalance.validate(1, autoExpandSearchReplicas), + Optional.of("expected max cap on auto expand search replicas to be a multiple of total awareness attributes [3]") + ); + assertEquals( + awarenessReplicaBalance.validate(2, autoExpandSearchReplicas), + Optional.of("expected max cap on auto expand search replicas to be a multiple of total awareness attributes [3]") + ); + // When auto expand replica is not present settings = Settings.builder() .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(), "zone, rack") @@ -118,14 +142,15 @@ public void testForcedAwarenessAttribute() { Optional.of("expected total copies needs to be a multiple of total awareness attributes [3]") ); - assertEquals(awarenessReplicaBalance.validate(3), Optional.empty()); - assertEquals(awarenessReplicaBalance.validate(0), Optional.empty()); + autoExpandSearchReplicas = AutoExpandSearchReplicas.SETTING.get(settings); + assertEquals(awarenessReplicaBalance.validate(3, autoExpandSearchReplicas), Optional.empty()); + assertEquals(awarenessReplicaBalance.validate(0, autoExpandSearchReplicas), Optional.empty()); assertEquals( - awarenessReplicaBalance.validate(2), + awarenessReplicaBalance.validate(2, autoExpandSearchReplicas), Optional.of("total search replicas needs to be a multiple of total awareness attributes [3]") ); assertEquals( - awarenessReplicaBalance.validate(1), + awarenessReplicaBalance.validate(1, autoExpandSearchReplicas), Optional.of("total search replicas needs to be a multiple of total awareness attributes [3]") ); } @@ -135,6 +160,7 @@ public void testForcedAwarenessAttributeDisabled() { .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(), "zone, rack") .put(AwarenessReplicaBalance.CLUSTER_ROUTING_ALLOCATION_AWARENESS_BALANCE_SETTING.getKey(), true) .put(SETTING_AUTO_EXPAND_REPLICAS, "0-1") + .put(SETTING_NUMBER_OF_SEARCH_REPLICAS, "0-1") .build(); AwarenessReplicaBalance awarenessReplicaBalance = new AwarenessReplicaBalance(settings, EMPTY_CLUSTER_SETTINGS); @@ -143,6 +169,10 @@ public void testForcedAwarenessAttributeDisabled() { assertThat(awarenessReplicaBalance.maxAwarenessAttributes(), equalTo(1)); assertEquals(awarenessReplicaBalance.validate(0, autoExpandReplica), Optional.empty()); assertEquals(awarenessReplicaBalance.validate(1, autoExpandReplica), Optional.empty()); + + AutoExpandSearchReplicas autoExpandSearchReplicas = AutoExpandSearchReplicas.SETTING.get(settings); + assertEquals(awarenessReplicaBalance.validate(0, autoExpandSearchReplicas), Optional.empty()); + assertEquals(awarenessReplicaBalance.validate(1, autoExpandSearchReplicas), Optional.empty()); } } diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index e69b5984bce8d..60cddfc2972cb 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -217,6 +217,7 @@ import reactor.util.annotation.NonNull; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.common.unit.TimeValue.timeValueMillis; import static org.opensearch.core.common.util.CollectionUtils.eagerPartition; @@ -2276,7 +2277,9 @@ protected NumShards getNumShards(String index) { assertThat(metadata.hasIndex(index), equalTo(true)); int numShards = Integer.valueOf(metadata.index(index).getSettings().get(SETTING_NUMBER_OF_SHARDS)); int numReplicas = Integer.valueOf(metadata.index(index).getSettings().get(SETTING_NUMBER_OF_REPLICAS)); - return new NumShards(numShards, numReplicas); + String numSearchReplicasValue = metadata.index(index).getSettings().get(SETTING_NUMBER_OF_SEARCH_REPLICAS); + int numSearchReplicas = numSearchReplicasValue != null ? Integer.parseInt(numSearchReplicasValue) : 0; + return new NumShards(numShards, numReplicas, numSearchReplicas); } /** @@ -2317,13 +2320,15 @@ public void assertSortedSegments(String indexName, Sort expectedIndexSort) { protected static class NumShards { public final int numPrimaries; public final int numReplicas; + public final int numSearchReplicas; public final int totalNumShards; public final int dataCopies; - private NumShards(int numPrimaries, int numReplicas) { + private NumShards(int numPrimaries, int numReplicas, int numSearchReplicas) { this.numPrimaries = numPrimaries; this.numReplicas = numReplicas; - this.dataCopies = numReplicas + 1; + this.numSearchReplicas = numSearchReplicas; + this.dataCopies = numReplicas + numSearchReplicas + 1; this.totalNumShards = numPrimaries * dataCopies; } } From 8312e42362e3fdb068cbe6b36edaeabc2f987f47 Mon Sep 17 00:00:00 2001 From: Kaushal Kumar Date: Wed, 2 Apr 2025 21:03:55 -0700 Subject: [PATCH 132/550] [Rule based auto tagging] Add in-memory rule processing service (#17365) * [rule based autotagging] add attribute value store Signed-off-by: Kaushal Kumar * add in-memory rule processing service Signed-off-by: Kaushal Kumar * add missing javadoc Signed-off-by: Kaushal Kumar * merge the in-memory store changes: commit d02e54475f3521be2c375c2699b4cc095768072c Author: Kaushal Kumar Date: Mon Feb 17 13:05:20 2025 -0800 add licenses directory Signed-off-by: Kaushal Kumar commit 3f98f9d1734f9414f4e449a33b120e0dc693ca7f Author: Kaushal Kumar Date: Mon Feb 17 11:52:56 2025 -0800 improve binary search bisecting expression Signed-off-by: Kaushal Kumar commit 630a3ee627ff6f356207ae41523a7c380d45e477 Author: Kaushal Kumar Date: Mon Feb 17 11:14:39 2025 -0800 improve javadoc for attribute value store Signed-off-by: Kaushal Kumar commit acdb27cc8219e62b81a1a3a220c52ca3165eb373 Author: Kaushal Kumar Date: Fri Feb 14 10:09:58 2025 -0800 add missing javadoc Signed-off-by: Kaushal Kumar commit 24c4ea6155da2eb7c3bf369bd70b032e708dccfb Author: Kaushal Kumar Date: Fri Feb 14 09:28:46 2025 -0800 run spotless apply Signed-off-by: Kaushal Kumar commit 75b6e68f750ffcb64873ddadf240138eeaddc87f Author: Kaushal Kumar Date: Fri Feb 14 09:24:32 2025 -0800 make the store interface generic Signed-off-by: Kaushal Kumar Signed-off-by: Kaushal Kumar * fix generics error Signed-off-by: Kaushal Kumar * add CHANGELOG entry Signed-off-by: Kaushal Kumar * remove stubs Signed-off-by: Kaushal Kumar * move generic logic to lib Signed-off-by: Kaushal Kumar * fix javadoc error Signed-off-by: Kaushal Kumar * fix javadoc error Signed-off-by: Kaushal Kumar * delete licenses from the wlm plugin Signed-off-by: Kaushal Kumar * expose feature level attribute value store init method Signed-off-by: Kaushal Kumar * add extra space to remove unwanted entry from the changelog diff Signed-off-by: Kaushal Kumar * address comments Signed-off-by: Kaushal Kumar * use constructors over static methods Signed-off-by: Kaushal Kumar * make member var final in InMemoryRuleProcessingService Signed-off-by: Kaushal Kumar * make concurrency checks more granular Signed-off-by: Kaushal Kumar * add concurrent test Signed-off-by: Kaushal Kumar * remove forbidden api usage Signed-off-by: Kaushal Kumar --------- Signed-off-by: Kaushal Kumar --- CHANGELOG.md | 1 + libs/autotagging-commons/build.gradle | 20 +++ .../licenses/commons-collections-LICENSE.txt | 0 .../licenses/commons-collections-NOTICE.txt | 0 .../commons-collections4-4.4.jar.sha1 | 0 .../rule/InMemoryRuleProcessingService.java | 115 +++++++++++++ .../AttributeExtractor.java | 29 ++++ .../attribute_extractor/package-info.java | 12 ++ .../org/opensearch/rule/package-info.java | 12 ++ .../rule/storage/AttributeValueStore.java | 2 +- .../storage/AttributeValueStoreFactory.java | 52 ++++++ .../storage/DefaultAttributeValueStore.java | 56 ++++-- .../rule/storage/package-info.java | 2 +- .../InMemoryRuleProcessingServiceTests.java | 162 ++++++++++++++++++ .../AttributeValueStoreFactoryTests.java | 38 ++++ .../storage/AttributeValueStoreTests.java | 126 ++++++++++++++ plugins/workload-management/build.gradle | 6 +- .../attribute_extractor/IndicesExtractor.java | 41 +++++ .../attribute_extractor/package-info.java | 12 ++ .../plugin/wlm/rule/package-info.java | 2 +- .../storage/AttributeValueStoreTests.java | 53 ------ 21 files changed, 661 insertions(+), 80 deletions(-) create mode 100644 libs/autotagging-commons/build.gradle rename {plugins/workload-management => libs/autotagging-commons}/licenses/commons-collections-LICENSE.txt (100%) rename {plugins/workload-management => libs/autotagging-commons}/licenses/commons-collections-NOTICE.txt (100%) rename {plugins/workload-management => libs/autotagging-commons}/licenses/commons-collections4-4.4.jar.sha1 (100%) create mode 100644 libs/autotagging-commons/src/main/java/org/opensearch/rule/InMemoryRuleProcessingService.java create mode 100644 libs/autotagging-commons/src/main/java/org/opensearch/rule/attribute_extractor/AttributeExtractor.java create mode 100644 libs/autotagging-commons/src/main/java/org/opensearch/rule/attribute_extractor/package-info.java create mode 100644 libs/autotagging-commons/src/main/java/org/opensearch/rule/package-info.java rename {plugins/workload-management/src/main/java/org/opensearch/plugin/wlm => libs/autotagging-commons/src/main/java/org/opensearch}/rule/storage/AttributeValueStore.java (95%) create mode 100644 libs/autotagging-commons/src/main/java/org/opensearch/rule/storage/AttributeValueStoreFactory.java rename {plugins/workload-management/src/main/java/org/opensearch/plugin/wlm => libs/autotagging-commons/src/main/java/org/opensearch}/rule/storage/DefaultAttributeValueStore.java (55%) rename {plugins/workload-management/src/main/java/org/opensearch/plugin/wlm => libs/autotagging-commons/src/main/java/org/opensearch}/rule/storage/package-info.java (86%) create mode 100644 libs/autotagging-commons/src/test/java/org/opensearch/rule/InMemoryRuleProcessingServiceTests.java create mode 100644 libs/autotagging-commons/src/test/java/org/opensearch/rule/storage/AttributeValueStoreFactoryTests.java create mode 100644 libs/autotagging-commons/src/test/java/org/opensearch/rule/storage/AttributeValueStoreTests.java create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/attribute_extractor/IndicesExtractor.java create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/attribute_extractor/package-info.java delete mode 100644 plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rule/storage/AttributeValueStoreTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 53cb18c04dde1..e76ebb48d89d9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add dfs transformation function in XContentMapValues ([#17612](https://github.com/opensearch-project/OpenSearch/pull/17612)) - Added Kinesis support as a plugin for the pull-based ingestion ([#17615](https://github.com/opensearch-project/OpenSearch/pull/17615)) - Add FilterFieldType for developers who want to wrap MappedFieldType ([#17627](https://github.com/opensearch-project/OpenSearch/pull/17627)) +- [Rule Based Auto-tagging] Add in-memory rule processing service ([#17365](https://github.com/opensearch-project/OpenSearch/pull/17365)) - [Security Manager Replacement] Create initial Java Agent to intercept Socket::connect calls ([#17724](https://github.com/opensearch-project/OpenSearch/pull/17724)) - Add ingestion management APIs for pause, resume and get ingestion state ([#17631](https://github.com/opensearch-project/OpenSearch/pull/17631)) - [Security Manager Replacement] Enhance Java Agent to intercept System::exit ([#17746](https://github.com/opensearch-project/OpenSearch/pull/17746)) diff --git a/libs/autotagging-commons/build.gradle b/libs/autotagging-commons/build.gradle new file mode 100644 index 0000000000000..cf3a75440c299 --- /dev/null +++ b/libs/autotagging-commons/build.gradle @@ -0,0 +1,20 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +dependencies { + api 'org.apache.commons:commons-collections4:4.4' + api project(":server") + + testImplementation(project(":test:framework")) { + exclude group: 'org.opensearch', module: 'opensearch-core' + } +} + +tasks.named("dependencyLicenses").configure { + mapping from: /commons-collections.*/, to: 'commons-collections' +} diff --git a/plugins/workload-management/licenses/commons-collections-LICENSE.txt b/libs/autotagging-commons/licenses/commons-collections-LICENSE.txt similarity index 100% rename from plugins/workload-management/licenses/commons-collections-LICENSE.txt rename to libs/autotagging-commons/licenses/commons-collections-LICENSE.txt diff --git a/plugins/workload-management/licenses/commons-collections-NOTICE.txt b/libs/autotagging-commons/licenses/commons-collections-NOTICE.txt similarity index 100% rename from plugins/workload-management/licenses/commons-collections-NOTICE.txt rename to libs/autotagging-commons/licenses/commons-collections-NOTICE.txt diff --git a/plugins/workload-management/licenses/commons-collections4-4.4.jar.sha1 b/libs/autotagging-commons/licenses/commons-collections4-4.4.jar.sha1 similarity index 100% rename from plugins/workload-management/licenses/commons-collections4-4.4.jar.sha1 rename to libs/autotagging-commons/licenses/commons-collections4-4.4.jar.sha1 diff --git a/libs/autotagging-commons/src/main/java/org/opensearch/rule/InMemoryRuleProcessingService.java b/libs/autotagging-commons/src/main/java/org/opensearch/rule/InMemoryRuleProcessingService.java new file mode 100644 index 0000000000000..219f6fa5e1999 --- /dev/null +++ b/libs/autotagging-commons/src/main/java/org/opensearch/rule/InMemoryRuleProcessingService.java @@ -0,0 +1,115 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rule; + +import org.opensearch.autotagging.Attribute; +import org.opensearch.autotagging.FeatureType; +import org.opensearch.autotagging.Rule; +import org.opensearch.rule.attribute_extractor.AttributeExtractor; +import org.opensearch.rule.storage.AttributeValueStore; +import org.opensearch.rule.storage.AttributeValueStoreFactory; + +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.function.BiConsumer; +import java.util.function.Supplier; + +/** + * This class is responsible for managing in-memory view of Rules and Find matching Rule for the request + * Each auto-tagging feature should use a separate instance of this class as this avoid potential concurrency overhead + * in case of dynamic updates and attribute sharing scenarios + */ +public class InMemoryRuleProcessingService { + + private final AttributeValueStoreFactory attributeValueStoreFactory; + + /** + * Constrcutor + * @param featureType + * @param attributeValueStoreSupplier + */ + public InMemoryRuleProcessingService( + FeatureType featureType, + Supplier> attributeValueStoreSupplier + ) { + attributeValueStoreFactory = new AttributeValueStoreFactory(featureType, attributeValueStoreSupplier); + } + + /** + * Adds the rule to in-memory view + * @param rule to be added + */ + public void add(final Rule rule) { + perform(rule, this::addOperation); + } + + /** + * Removes the rule from in-memory view + * @param rule to be removed + */ + public void remove(final Rule rule) { + perform(rule, this::removeOperation); + } + + private void perform(Rule rule, BiConsumer>, Rule> ruleOperation) { + for (Map.Entry> attributeEntry : rule.getAttributeMap().entrySet()) { + ruleOperation.accept(attributeEntry, rule); + } + } + + private void removeOperation(Map.Entry> attributeEntry, Rule rule) { + AttributeValueStore valueStore = attributeValueStoreFactory.getAttributeValueStore(attributeEntry.getKey()); + for (String value : attributeEntry.getValue()) { + valueStore.remove(value); + } + } + + private void addOperation(Map.Entry> attributeEntry, Rule rule) { + AttributeValueStore valueStore = attributeValueStoreFactory.getAttributeValueStore(attributeEntry.getKey()); + for (String value : attributeEntry.getValue()) { + valueStore.put(value, rule.getFeatureValue()); + } + } + + /** + * Evaluates the label for the current request. It finds the matches for each attribute value and then it is an + * intersection of all the matches + * @param attributeExtractors list of extractors which are used to get the attribute values to find the + * matching rule + * @return a label if there is unique label otherwise empty + */ + public Optional evaluateLabel(List> attributeExtractors) { + assert attributeValueStoreFactory != null; + Optional result = Optional.empty(); + for (AttributeExtractor attributeExtractor : attributeExtractors) { + AttributeValueStore valueStore = attributeValueStoreFactory.getAttributeValueStore( + attributeExtractor.getAttribute() + ); + for (String value : attributeExtractor.extract()) { + Optional possibleMatch = valueStore.get(value); + + if (possibleMatch.isEmpty()) { + return Optional.empty(); + } + + if (result.isEmpty()) { + result = possibleMatch; + } else { + boolean isThePossibleMatchEqualResult = possibleMatch.get().equals(result.get()); + if (!isThePossibleMatchEqualResult) { + return Optional.empty(); + } + } + } + } + return result; + } +} diff --git a/libs/autotagging-commons/src/main/java/org/opensearch/rule/attribute_extractor/AttributeExtractor.java b/libs/autotagging-commons/src/main/java/org/opensearch/rule/attribute_extractor/AttributeExtractor.java new file mode 100644 index 0000000000000..3e13ea54fad34 --- /dev/null +++ b/libs/autotagging-commons/src/main/java/org/opensearch/rule/attribute_extractor/AttributeExtractor.java @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rule.attribute_extractor; + +import org.opensearch.autotagging.Attribute; + +/** + * This interface defines the contract for extracting the attributes for Rule based auto-tagging feature + * @param + */ +public interface AttributeExtractor { + /** + * This method returns the Attribute which it is responsible for extracting + * @return attribute + */ + Attribute getAttribute(); + + /** + * This method returns the attribute values in context of the current request + * @return attribute value + */ + Iterable extract(); +} diff --git a/libs/autotagging-commons/src/main/java/org/opensearch/rule/attribute_extractor/package-info.java b/libs/autotagging-commons/src/main/java/org/opensearch/rule/attribute_extractor/package-info.java new file mode 100644 index 0000000000000..19a06fcba8f59 --- /dev/null +++ b/libs/autotagging-commons/src/main/java/org/opensearch/rule/attribute_extractor/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains feature attribute extractor interface and its implementations + */ +package org.opensearch.rule.attribute_extractor; diff --git a/libs/autotagging-commons/src/main/java/org/opensearch/rule/package-info.java b/libs/autotagging-commons/src/main/java/org/opensearch/rule/package-info.java new file mode 100644 index 0000000000000..12341deae29e7 --- /dev/null +++ b/libs/autotagging-commons/src/main/java/org/opensearch/rule/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Rule based auto-tagging generic constructs + */ +package org.opensearch.rule; diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/storage/AttributeValueStore.java b/libs/autotagging-commons/src/main/java/org/opensearch/rule/storage/AttributeValueStore.java similarity index 95% rename from plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/storage/AttributeValueStore.java rename to libs/autotagging-commons/src/main/java/org/opensearch/rule/storage/AttributeValueStore.java index eb2ce8e4764ea..98e9cc4041318 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/storage/AttributeValueStore.java +++ b/libs/autotagging-commons/src/main/java/org/opensearch/rule/storage/AttributeValueStore.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.plugin.wlm.rule.storage; +package org.opensearch.rule.storage; import java.util.Optional; diff --git a/libs/autotagging-commons/src/main/java/org/opensearch/rule/storage/AttributeValueStoreFactory.java b/libs/autotagging-commons/src/main/java/org/opensearch/rule/storage/AttributeValueStoreFactory.java new file mode 100644 index 0000000000000..8cda4bd26fdf0 --- /dev/null +++ b/libs/autotagging-commons/src/main/java/org/opensearch/rule/storage/AttributeValueStoreFactory.java @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rule.storage; + +import org.opensearch.autotagging.Attribute; +import org.opensearch.autotagging.FeatureType; + +import java.util.HashMap; +import java.util.Map; +import java.util.function.Supplier; + +/** + * Factory class for AttributeValueStore per feature type as two feature types can potentially share same attribute + */ +public class AttributeValueStoreFactory { + private final Map> attributeValueStores = new HashMap<>(); + + /** + * Constructor + * @param featureType is the feature which are using rule based auto tagging + * @param attributeValueStoreSupplier supplies the feature level AttributeValueStore instance + */ + public AttributeValueStoreFactory(FeatureType featureType, Supplier> attributeValueStoreSupplier) { + for (Attribute attribute : featureType.getAllowedAttributesRegistry().values()) { + attributeValueStores.put(attribute.getName(), attributeValueStoreSupplier.get()); + } + } + + /** + * Factory method which returns the {@link AttributeValueStore} for the given attribute + * @param attribute + * @return + */ + public AttributeValueStore getAttributeValueStore(final Attribute attribute) { + final String attributeName = attribute.getName(); + if (attributeValueStores == null) { + throw new IllegalStateException("AttributeValueStoreFactory is not initialized yet."); + } + + if (!attributeValueStores.containsKey(attributeName)) { + throw new IllegalArgumentException("[" + attributeName + "] is not a valid attribute for enabled features."); + } + + return attributeValueStores.get(attributeName); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/storage/DefaultAttributeValueStore.java b/libs/autotagging-commons/src/main/java/org/opensearch/rule/storage/DefaultAttributeValueStore.java similarity index 55% rename from plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/storage/DefaultAttributeValueStore.java rename to libs/autotagging-commons/src/main/java/org/opensearch/rule/storage/DefaultAttributeValueStore.java index 8b4c063f7ad1a..9f0584b276d11 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/storage/DefaultAttributeValueStore.java +++ b/libs/autotagging-commons/src/main/java/org/opensearch/rule/storage/DefaultAttributeValueStore.java @@ -6,12 +6,13 @@ * compatible open source license. */ -package org.opensearch.plugin.wlm.rule.storage; +package org.opensearch.rule.storage; import org.apache.commons.collections4.trie.PatriciaTrie; import java.util.Map; import java.util.Optional; +import java.util.concurrent.locks.ReentrantReadWriteLock; /** * This is a patricia trie based implementation of AttributeValueStore @@ -20,7 +21,10 @@ * ref: https://commons.apache.org/proper/commons-collections/javadocs/api-4.4/org/apache/commons/collections4/trie/PatriciaTrie.html */ public class DefaultAttributeValueStore implements AttributeValueStore { - PatriciaTrie trie; + private final PatriciaTrie trie; + private static final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); + private static final ReentrantReadWriteLock.ReadLock readLock = lock.readLock(); + private static final ReentrantReadWriteLock.WriteLock writeLock = lock.writeLock(); /** * Default constructor @@ -39,34 +43,48 @@ public DefaultAttributeValueStore(PatriciaTrie trie) { @Override public void put(K key, V value) { - trie.put(key, value); + writeLock.lock(); + try { + trie.put(key, value); + } finally { + writeLock.unlock(); + } } @Override public void remove(String key) { - trie.remove(key); + writeLock.lock(); + try { + trie.remove(key); + } finally { + writeLock.unlock(); + } } @Override public Optional get(String key) { - /** - * Since we are inserting prefixes into the trie and searching for larger strings - * It is important to find the largest matching prefix key in the trie efficiently - * Hence we can do binary search - */ - final String longestMatchingPrefix = findLongestMatchingPrefix(key); + readLock.lock(); + try { + /** + * Since we are inserting prefixes into the trie and searching for larger strings + * It is important to find the largest matching prefix key in the trie efficiently + * Hence we can do binary search + */ + final String longestMatchingPrefix = findLongestMatchingPrefix(key); - /** - * Now there are following cases for this prefix - * 1. There is a Rule which has this prefix as one of the attribute values. In this case we should return the - * Rule's label otherwise send empty - */ - for (Map.Entry possibleMatch : trie.prefixMap(longestMatchingPrefix).entrySet()) { - if (key.startsWith(possibleMatch.getKey())) { - return Optional.of(possibleMatch.getValue()); + /** + * Now there are following cases for this prefix + * 1. There is a Rule which has this prefix as one of the attribute values. In this case we should return the + * Rule's label otherwise send empty + */ + for (Map.Entry possibleMatch : trie.prefixMap(longestMatchingPrefix).entrySet()) { + if (key.startsWith(possibleMatch.getKey())) { + return Optional.of(possibleMatch.getValue()); + } } + } finally { + readLock.unlock(); } - return Optional.empty(); } diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/storage/package-info.java b/libs/autotagging-commons/src/main/java/org/opensearch/rule/storage/package-info.java similarity index 86% rename from plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/storage/package-info.java rename to libs/autotagging-commons/src/main/java/org/opensearch/rule/storage/package-info.java index 6aa721ce22a00..b7aeb8d6756ab 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/storage/package-info.java +++ b/libs/autotagging-commons/src/main/java/org/opensearch/rule/storage/package-info.java @@ -9,4 +9,4 @@ /** * This package contains interfaces and implementations for in memory rule storage mechanisms */ -package org.opensearch.plugin.wlm.rule.storage; +package org.opensearch.rule.storage; diff --git a/libs/autotagging-commons/src/test/java/org/opensearch/rule/InMemoryRuleProcessingServiceTests.java b/libs/autotagging-commons/src/test/java/org/opensearch/rule/InMemoryRuleProcessingServiceTests.java new file mode 100644 index 0000000000000..d12900a79b121 --- /dev/null +++ b/libs/autotagging-commons/src/test/java/org/opensearch/rule/InMemoryRuleProcessingServiceTests.java @@ -0,0 +1,162 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rule; + +import org.opensearch.autotagging.Attribute; +import org.opensearch.autotagging.FeatureType; +import org.opensearch.autotagging.Rule; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.rule.attribute_extractor.AttributeExtractor; +import org.opensearch.rule.storage.DefaultAttributeValueStore; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; + +public class InMemoryRuleProcessingServiceTests extends OpenSearchTestCase { + InMemoryRuleProcessingService sut; + + public void setUp() throws Exception { + super.setUp(); + sut = new InMemoryRuleProcessingService(WLMFeatureType.WLM, DefaultAttributeValueStore::new); + } + + public void testAdd() { + sut.add(getRule(Set.of("test", "change"), "test_id")); + + List> extractors = getAttributeExtractors(List.of("test")); + Optional label = sut.evaluateLabel(extractors); + assertTrue(label.isPresent()); + assertEquals("test_id", label.get()); + } + + public void testRemove() { + Rule rule = getRule(Set.of("test", "change"), "test_id"); + sut.add(rule); + sut.remove(rule); + + List> extractors = getAttributeExtractors(List.of("test")); + Optional label = sut.evaluateLabel(extractors); + assertFalse(label.isPresent()); + } + + public void testEvaluateLabelForExactMatch() { + sut.add(getRule(Set.of("test1", "change"), "test_id")); + sut.add(getRule(Set.of("test", "double"), "test_id1")); + + List> extractors = getAttributeExtractors(List.of("test")); + Optional label = sut.evaluateLabel(extractors); + assertTrue(label.isPresent()); + assertEquals("test_id1", label.get()); + } + + public void testEvaluateLabelForMultiMatch() { + sut.add(getRule(Set.of("key1", "change"), "test_id")); + sut.add(getRule(Set.of("key2", "double"), "new_id")); + + List> extractors = getAttributeExtractors(List.of("key1", "key2")); + Optional label = sut.evaluateLabel(extractors); + assertFalse(label.isPresent()); + } + + public void testEvaluateLabelForNoMatch() { + sut.add(getRule(Set.of("test1", "change"), "test_id")); + sut.add(getRule(Set.of("test", "double"), "test_id")); + + List> extractors = getAttributeExtractors(List.of("dummy_test")); + Optional label = sut.evaluateLabel(extractors); + assertFalse(label.isPresent()); + } + + public void testEvaluateLabelForExactMatchWithLongestMatchingPrefixCase() { + sut.add(getRule(Set.of("test1", "change"), "test_id")); + sut.add(getRule(Set.of("test", "double"), "test_id1")); + + List> extractors = getAttributeExtractors(List.of("testing")); + Optional label = sut.evaluateLabel(extractors); + assertTrue(label.isPresent()); + assertEquals("test_id1", label.get()); + } + + public void testEvaluateLabelForNoMatchWithLongestMatchingPrefixCase() { + sut.add(getRule(Set.of("key1", "change"), "test_id")); + sut.add(getRule(Set.of("key12", "double"), "test_id1")); + + List> extractors = getAttributeExtractors(List.of("key")); + Optional label = sut.evaluateLabel(extractors); + assertFalse(label.isPresent()); + } + + private static Rule getRule(Set attributeValues, String label) { + return new Rule( + "test description", + Map.of(TestAttribute.TEST_ATTRIBUTE, attributeValues), + WLMFeatureType.WLM, + label, + "2025-02-24T07:42:10.123456Z" + ); + } + + private static List> getAttributeExtractors(List extractedAttributes) { + List> extractors = List.of(new AttributeExtractor() { + @Override + public Attribute getAttribute() { + return TestAttribute.TEST_ATTRIBUTE; + } + + @Override + public Iterable extract() { + return extractedAttributes; + } + }); + return extractors; + } + + public enum WLMFeatureType implements FeatureType { + WLM; + + @Override + public String getName() { + return ""; + } + + @Override + public Map getAllowedAttributesRegistry() { + return Map.of("test_attribute", TestAttribute.TEST_ATTRIBUTE); + } + + @Override + public void registerFeatureType() {} + } + + public enum TestAttribute implements Attribute { + TEST_ATTRIBUTE("test_attribute"), + INVALID_ATTRIBUTE("invalid_attribute"); + + private final String name; + + TestAttribute(String name) { + this.name = name; + } + + @Override + public String getName() { + return name; + } + + @Override + public void validateAttribute() {} + + @Override + public void writeTo(StreamOutput out) throws IOException {} + } +} diff --git a/libs/autotagging-commons/src/test/java/org/opensearch/rule/storage/AttributeValueStoreFactoryTests.java b/libs/autotagging-commons/src/test/java/org/opensearch/rule/storage/AttributeValueStoreFactoryTests.java new file mode 100644 index 0000000000000..5cdc128c50620 --- /dev/null +++ b/libs/autotagging-commons/src/test/java/org/opensearch/rule/storage/AttributeValueStoreFactoryTests.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rule.storage; + +import org.opensearch.autotagging.Attribute; +import org.opensearch.rule.InMemoryRuleProcessingServiceTests.TestAttribute; +import org.opensearch.rule.InMemoryRuleProcessingServiceTests.WLMFeatureType; +import org.opensearch.test.OpenSearchTestCase; + +public class AttributeValueStoreFactoryTests extends OpenSearchTestCase { + AttributeValueStoreFactory sut; + + @Override + public void setUp() throws Exception { + super.setUp(); + sut = new AttributeValueStoreFactory(WLMFeatureType.WLM, DefaultAttributeValueStore::new); + } + + public void testFeatureLevelStoreInitialisation() { + for (Attribute attribute : WLMFeatureType.WLM.getAllowedAttributesRegistry().values()) { + assertTrue(sut.getAttributeValueStore(attribute) instanceof DefaultAttributeValueStore); + } + } + + public void testValidGetAttributeValueStore() { + assertTrue(sut.getAttributeValueStore(TestAttribute.TEST_ATTRIBUTE) instanceof DefaultAttributeValueStore); + } + + public void testInValidGetAttributeValueStore() { + assertThrows(IllegalArgumentException.class, () -> { sut.getAttributeValueStore(TestAttribute.INVALID_ATTRIBUTE); }); + } +} diff --git a/libs/autotagging-commons/src/test/java/org/opensearch/rule/storage/AttributeValueStoreTests.java b/libs/autotagging-commons/src/test/java/org/opensearch/rule/storage/AttributeValueStoreTests.java new file mode 100644 index 0000000000000..2340cc3327337 --- /dev/null +++ b/libs/autotagging-commons/src/test/java/org/opensearch/rule/storage/AttributeValueStoreTests.java @@ -0,0 +1,126 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rule.storage; + +import org.apache.commons.collections4.trie.PatriciaTrie; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.ArrayList; +import java.util.List; + +public class AttributeValueStoreTests extends OpenSearchTestCase { + + AttributeValueStore subjectUnderTest; + final static String ALPHA_NUMERIC = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"; + + public void setUp() throws Exception { + super.setUp(); + subjectUnderTest = new DefaultAttributeValueStore<>(new PatriciaTrie<>()); + } + + public void testPut() { + subjectUnderTest.put("foo", "bar"); + assertEquals("bar", subjectUnderTest.get("foo").get()); + } + + public void testRemove() { + subjectUnderTest.put("foo", "bar"); + subjectUnderTest.remove("foo"); + assertEquals(0, subjectUnderTest.size()); + } + + public void tesGet() { + subjectUnderTest.put("foo", "bar"); + assertEquals("bar", subjectUnderTest.get("foo").get()); + } + + public void testGetWhenNoProperPrefixIsPresent() { + subjectUnderTest.put("foo", "bar"); + subjectUnderTest.put("foodip", "sing"); + assertTrue(subjectUnderTest.get("foxtail").isEmpty()); + subjectUnderTest.put("fox", "lucy"); + + assertFalse(subjectUnderTest.get("foxtail").isEmpty()); + } + + public void testClear() { + subjectUnderTest.put("foo", "bar"); + subjectUnderTest.clear(); + assertEquals(0, subjectUnderTest.size()); + } + + public void testConcurrentUpdatesAndReads() { + final List randomStrings = new ArrayList<>(); + for (int i = 0; i < 100; i++) { + randomStrings.add(generateRandom(20)); + } + List readerThreads = new ArrayList<>(); + List writerThreads = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + readerThreads.add(new AttributeValueStoreReader(subjectUnderTest, randomStrings)); + writerThreads.add(new AttributeValueStoreWriter(subjectUnderTest, randomStrings)); + } + + for (int ii = 0; ii < 10; ii++) { + readerThreads.get(ii).start(); + writerThreads.get(ii).start(); + } + } + + public static String generateRandom(int maxLength) { + int length = random().nextInt(maxLength) + 1; // +1 to avoid length 0 + StringBuilder sb = new StringBuilder(length); + for (int i = 0; i < length; i++) { + sb.append(ALPHA_NUMERIC.charAt(random().nextInt(ALPHA_NUMERIC.length()))); + } + return sb.toString(); + } + + private static class AttributeValueStoreReader extends Thread { + private final AttributeValueStore subjectUnderTest; + private final List toReadKeys; + + public AttributeValueStoreReader(AttributeValueStore subjectUnderTest, List toReadKeys) { + super(); + this.subjectUnderTest = subjectUnderTest; + this.toReadKeys = toReadKeys; + } + + @Override + public void run() { + try { + Thread.sleep(random().nextInt(100)); + for (String key : toReadKeys) { + subjectUnderTest.get(key); + } + } catch (InterruptedException e) {} + } + } + + private static class AttributeValueStoreWriter extends Thread { + private final AttributeValueStore subjectUnderTest; + private final List toWriteKeys; + + public AttributeValueStoreWriter(AttributeValueStore subjectUnderTest, List toWriteKeys) { + super(); + this.subjectUnderTest = subjectUnderTest; + this.toWriteKeys = toWriteKeys; + } + + @Override + public void run() { + try { + Thread.sleep(random().nextInt(100)); + for (String key : toWriteKeys) { + subjectUnderTest.put(key, key); + } + } catch (InterruptedException e) {} + } + } +} diff --git a/plugins/workload-management/build.gradle b/plugins/workload-management/build.gradle index c73c63e84ed1f..5396a74361b77 100644 --- a/plugins/workload-management/build.gradle +++ b/plugins/workload-management/build.gradle @@ -19,9 +19,5 @@ opensearchplugin { } dependencies { - api 'org.apache.commons:commons-collections4:4.4' -} - -tasks.named("dependencyLicenses").configure { - mapping from: /commons-collections.*/, to: 'commons-collections' + api project(":libs:opensearch-autotagging-commons") } diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/attribute_extractor/IndicesExtractor.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/attribute_extractor/IndicesExtractor.java new file mode 100644 index 0000000000000..a3230ac919eb1 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/attribute_extractor/IndicesExtractor.java @@ -0,0 +1,41 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.rule.attribute_extractor; + +import org.opensearch.action.IndicesRequest; +import org.opensearch.autotagging.Attribute; +import org.opensearch.rule.attribute_extractor.AttributeExtractor; + +import java.util.List; + +/** + * This class extracts the indices from a request + */ +public class IndicesExtractor implements AttributeExtractor { + private final IndicesRequest indicesRequest; + + /** + * Default constructor + * @param indicesRequest + */ + public IndicesExtractor(IndicesRequest indicesRequest) { + this.indicesRequest = indicesRequest; + } + + @Override + public Attribute getAttribute() { + // TODO: this will be replaced by WLM defined index_pattern attribute + return null; + } + + @Override + public Iterable extract() { + return List.of(indicesRequest.indices()); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/attribute_extractor/package-info.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/attribute_extractor/package-info.java new file mode 100644 index 0000000000000..ca1cc902b6ca1 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/attribute_extractor/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains feature attribute extractor interface and its implementations + */ +package org.opensearch.plugin.wlm.rule.attribute_extractor; diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/package-info.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/package-info.java index 85c0562dae5ee..d6a196cca4672 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/package-info.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/package-info.java @@ -6,8 +6,8 @@ * compatible open source license. */ -package org.opensearch.plugin.wlm.rule; /** * This package holds constructs for the Rule's in-memory storage, processing and syncing the in-memory view * with the index view */ +package org.opensearch.plugin.wlm.rule; diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rule/storage/AttributeValueStoreTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rule/storage/AttributeValueStoreTests.java deleted file mode 100644 index 29c42e51efeb0..0000000000000 --- a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rule/storage/AttributeValueStoreTests.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.wlm.rule.storage; - -import org.apache.commons.collections4.trie.PatriciaTrie; -import org.opensearch.test.OpenSearchTestCase; - -public class AttributeValueStoreTests extends OpenSearchTestCase { - - AttributeValueStore subjectUnderTest; - - public void setUp() throws Exception { - super.setUp(); - subjectUnderTest = new DefaultAttributeValueStore<>(new PatriciaTrie<>()); - } - - public void testPut() { - subjectUnderTest.put("foo", "bar"); - assertEquals("bar", subjectUnderTest.get("foo").get()); - } - - public void testRemove() { - subjectUnderTest.put("foo", "bar"); - subjectUnderTest.remove("foo"); - assertEquals(0, subjectUnderTest.size()); - } - - public void tesGet() { - subjectUnderTest.put("foo", "bar"); - assertEquals("bar", subjectUnderTest.get("foo").get()); - } - - public void testGetWhenNoProperPrefixIsPresent() { - subjectUnderTest.put("foo", "bar"); - subjectUnderTest.put("foodip", "sing"); - assertTrue(subjectUnderTest.get("foxtail").isEmpty()); - subjectUnderTest.put("fox", "lucy"); - - assertFalse(subjectUnderTest.get("foxtail").isEmpty()); - } - - public void testClear() { - subjectUnderTest.put("foo", "bar"); - subjectUnderTest.clear(); - assertEquals(0, subjectUnderTest.size()); - } -} From b5195f453ebb66082e61b15adbe9e7aaf2bcaa99 Mon Sep 17 00:00:00 2001 From: "Samuel.G" <1148690954@qq.com> Date: Thu, 3 Apr 2025 21:00:48 +0900 Subject: [PATCH 133/550] Fix weight calculation of decideRebalance when explain (#17720) Signed-off-by: gesong.samuel --- .../routing/allocation/allocator/LocalShardsBalancer.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java index 89fa728438062..ba61ea5f8cd47 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java @@ -233,7 +233,7 @@ MoveDecision decideRebalance(final ShardRouting shard) { // balance the shard, if a better node can be found final String idxName = shard.getIndexName(); - final float currentWeight = weight.weight(this, currentNode, idxName); + final float currentWeight = weight.weightWithRebalanceConstraints(this, currentNode, idxName); final AllocationDeciders deciders = allocation.deciders(); Decision.Type rebalanceDecisionType = Decision.Type.NO; BalancedShardsAllocator.ModelNode assignedNode = null; From 5f1b4ab6199f64754898d49251ce16a70ecd5e66 Mon Sep 17 00:00:00 2001 From: Ashish Singh Date: Thu, 3 Apr 2025 17:43:14 +0530 Subject: [PATCH 134/550] Implement fixed interval refresh task scheduling (#17777) Signed-off-by: Ashish Singh --- CHANGELOG.md | 1 + .../common/settings/ClusterSettings.java | 5 +- .../util/concurrent/AbstractAsyncTask.java | 50 ++++++++- .../org/opensearch/index/IndexModule.java | 4 + .../org/opensearch/index/IndexService.java | 13 ++- .../opensearch/indices/IndicesService.java | 30 ++++++ .../opensearch/index/IndexModuleTests.java | 1 + .../opensearch/index/IndexServiceTests.java | 100 ++++++++++++++++++ 8 files changed, 200 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e76ebb48d89d9..6469382b5a18e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add ingestion management APIs for pause, resume and get ingestion state ([#17631](https://github.com/opensearch-project/OpenSearch/pull/17631)) - [Security Manager Replacement] Enhance Java Agent to intercept System::exit ([#17746](https://github.com/opensearch-project/OpenSearch/pull/17746)) - Support AutoExpand for SearchReplica ([#17741](https://github.com/opensearch-project/OpenSearch/pull/17741)) +- Implement fixed interval refresh task scheduling ([#17777](https://github.com/opensearch-project/OpenSearch/pull/17777)) ### Changed - Migrate BC libs to their FIPS counterparts ([#14912](https://github.com/opensearch-project/OpenSearch/pull/14912)) diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 456357005d0ab..84b50041b7c91 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -820,7 +820,10 @@ public void apply(Settings value, Settings current, Settings previous) { ), OpenSearchOnHeapCacheSettings.EXPIRE_AFTER_ACCESS_SETTING.getConcreteSettingForNamespace( CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() - ) + ), + + // Setting related to refresh optimisations + IndicesService.CLUSTER_REFRESH_FIXED_INTERVAL_SCHEDULE_ENABLED_SETTING ) ) ); diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/AbstractAsyncTask.java b/server/src/main/java/org/opensearch/common/util/concurrent/AbstractAsyncTask.java index 7c599476e263d..10138a9469b38 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/AbstractAsyncTask.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/AbstractAsyncTask.java @@ -33,6 +33,7 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.common.Randomness; import org.opensearch.common.unit.TimeValue; import org.opensearch.threadpool.Scheduler; import org.opensearch.threadpool.ThreadPool; @@ -40,6 +41,7 @@ import java.io.Closeable; import java.util.Objects; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Supplier; /** * A base class for tasks that need to repeat. @@ -56,17 +58,31 @@ public abstract class AbstractAsyncTask implements Runnable, Closeable { private volatile boolean isScheduledOrRunning; private volatile Exception lastThrownException; private volatile TimeValue interval; + private volatile long lastRunStartTimeNs = -1; + private final Supplier fixedIntervalSchedulingEnabled; protected AbstractAsyncTask(Logger logger, ThreadPool threadPool, TimeValue interval, boolean autoReschedule) { + this(logger, threadPool, interval, autoReschedule, () -> Boolean.FALSE); + } + + protected AbstractAsyncTask( + Logger logger, + ThreadPool threadPool, + TimeValue interval, + boolean autoReschedule, + Supplier fixedIntervalSchedulingEnabled + ) { this.logger = logger; this.threadPool = threadPool; this.interval = interval; this.autoReschedule = autoReschedule; + this.fixedIntervalSchedulingEnabled = fixedIntervalSchedulingEnabled; } /** * Change the interval between runs. * If a future run is scheduled then this will reschedule it. + * * @param interval The new interval between runs. */ public synchronized void setInterval(TimeValue interval) { @@ -85,6 +101,7 @@ public TimeValue getInterval() { * should be scheduled. This method does *not* need to test if * the task is closed, as being closed automatically prevents * scheduling. + * * @return Should the task be scheduled to run? */ protected abstract boolean mustReschedule(); @@ -106,7 +123,7 @@ public synchronized void rescheduleIfNecessary() { if (logger.isTraceEnabled()) { logger.trace("scheduling {} every {}", toString(), interval); } - cancellable = threadPool.schedule(this, interval, getThreadPool()); + cancellable = threadPool.schedule(this, getSleepDuration(), getThreadPool()); isScheduledOrRunning = true; } else { logger.trace("scheduled {} disabled", toString()); @@ -156,6 +173,7 @@ public final void run() { isScheduledOrRunning = autoReschedule; } try { + lastRunStartTimeNs = System.nanoTime(); runInternal(); } catch (Exception ex) { if (lastThrownException == null || sameException(lastThrownException, ex) == false) { @@ -203,4 +221,34 @@ private static boolean sameException(Exception left, Exception right) { protected String getThreadPool() { return ThreadPool.Names.SAME; } + + /** + * Calculates the sleep duration for the next scheduled execution of the task. + * This method determines the appropriate delay based on the last run time and the configured interval + * to schedule the next execution. + */ + public TimeValue getSleepDuration() { + if (!fixedIntervalSchedulingEnabled.get()) { + return interval; + } + + if (lastRunStartTimeNs == -1) { + // We want to stagger the start of refreshes in random manner so that we avoid refreshes to happen at the same + // when we have refreshes happening in parallel for multiple shards of the same index. a.k.a. Dense shard packing + long sleepTimeNs = Randomness.get().nextLong(interval.nanos()); + return TimeValue.timeValueNanos(sleepTimeNs); + } + + long timeSinceLastRunNs = System.nanoTime() - lastRunStartTimeNs; + if (timeSinceLastRunNs >= interval.nanos()) { + // If the time taken for refresh is more than the configured refresh interval, then we schedule the next refresh + // immediately. This is to avoid the case where the time taken for refresh is more than the configured refresh + // interval due to the processing of the refresh request. + return TimeValue.ZERO; + } else { + // If the time taken for refresh is less than the configured refresh interval, then we schedule the next refresh + // after the remaining time for the refresh interval. + return TimeValue.timeValueNanos(interval.nanos() - timeSinceLastRunNs); + } + } } diff --git a/server/src/main/java/org/opensearch/index/IndexModule.java b/server/src/main/java/org/opensearch/index/IndexModule.java index 961b77ac20c5e..3dfcad48fb9d5 100644 --- a/server/src/main/java/org/opensearch/index/IndexModule.java +++ b/server/src/main/java/org/opensearch/index/IndexModule.java @@ -630,6 +630,7 @@ public IndexService newIndexService( IndexStorePlugin.DirectoryFactory remoteDirectoryFactory, BiFunction translogFactorySupplier, Supplier clusterDefaultRefreshIntervalSupplier, + Supplier fixedRefreshIntervalSchedulingEnabled, RecoverySettings recoverySettings, RemoteStoreSettings remoteStoreSettings ) throws IOException { @@ -653,6 +654,7 @@ public IndexService newIndexService( remoteDirectoryFactory, translogFactorySupplier, clusterDefaultRefreshIntervalSupplier, + fixedRefreshIntervalSchedulingEnabled, recoverySettings, remoteStoreSettings, (s) -> {}, @@ -680,6 +682,7 @@ public IndexService newIndexService( IndexStorePlugin.DirectoryFactory remoteDirectoryFactory, BiFunction translogFactorySupplier, Supplier clusterDefaultRefreshIntervalSupplier, + Supplier fixedRefreshIntervalSchedulingEnabled, RecoverySettings recoverySettings, RemoteStoreSettings remoteStoreSettings, Consumer replicator, @@ -741,6 +744,7 @@ public IndexService newIndexService( recoveryStateFactory, translogFactorySupplier, clusterDefaultRefreshIntervalSupplier, + fixedRefreshIntervalSchedulingEnabled, recoverySettings, remoteStoreSettings, fileCache, diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index 0696058e86f08..de269f1676f1c 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -192,6 +192,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust private final ValuesSourceRegistry valuesSourceRegistry; private final BiFunction translogFactorySupplier; private final Supplier clusterDefaultRefreshIntervalSupplier; + private final Supplier fixedRefreshIntervalSchedulingEnabled; private final RecoverySettings recoverySettings; private final RemoteStoreSettings remoteStoreSettings; private final FileCache fileCache; @@ -232,6 +233,7 @@ public IndexService( IndexStorePlugin.RecoveryStateFactory recoveryStateFactory, BiFunction translogFactorySupplier, Supplier clusterDefaultRefreshIntervalSupplier, + Supplier fixedRefreshIntervalSchedulingEnabled, RecoverySettings recoverySettings, RemoteStoreSettings remoteStoreSettings, FileCache fileCache, @@ -307,6 +309,7 @@ public IndexService( this.searchOperationListeners = Collections.unmodifiableList(searchOperationListeners); this.indexingOperationListeners = Collections.unmodifiableList(indexingOperationListeners); this.clusterDefaultRefreshIntervalSupplier = clusterDefaultRefreshIntervalSupplier; + this.fixedRefreshIntervalSchedulingEnabled = fixedRefreshIntervalSchedulingEnabled; // kick off async ops for the first shard in this index this.refreshTask = new AsyncRefreshTask(this); this.trimTranslogTask = new AsyncTrimTranslogTask(this); @@ -361,6 +364,7 @@ public IndexService( IndexStorePlugin.RecoveryStateFactory recoveryStateFactory, BiFunction translogFactorySupplier, Supplier clusterDefaultRefreshIntervalSupplier, + Supplier fixedRefreshIntervalSchedulingEnabled, RecoverySettings recoverySettings, RemoteStoreSettings remoteStoreSettings ) { @@ -397,6 +401,7 @@ public IndexService( recoveryStateFactory, translogFactorySupplier, clusterDefaultRefreshIntervalSupplier, + fixedRefreshIntervalSchedulingEnabled, recoverySettings, remoteStoreSettings, null, @@ -1316,7 +1321,11 @@ abstract static class BaseAsyncTask extends AbstractAsyncTask { protected final IndexService indexService; BaseAsyncTask(final IndexService indexService, final TimeValue interval) { - super(indexService.logger, indexService.threadPool, interval, true); + this(indexService, interval, () -> Boolean.FALSE); + } + + BaseAsyncTask(final IndexService indexService, final TimeValue interval, Supplier fixedIntervalSchedulingEnabled) { + super(indexService.logger, indexService.threadPool, interval, true, fixedIntervalSchedulingEnabled); this.indexService = indexService; rescheduleIfNecessary(); } @@ -1366,7 +1375,7 @@ public String toString() { final class AsyncRefreshTask extends BaseAsyncTask { AsyncRefreshTask(IndexService indexService) { - super(indexService, indexService.getRefreshInterval()); + super(indexService, indexService.getRefreshInterval(), fixedRefreshIntervalSchedulingEnabled); } @Override diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index f3b0121dd5c88..ec60fa61d0c99 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -290,6 +290,17 @@ public class IndicesService extends AbstractLifecycleComponent Property.Dynamic ); + /** + * This setting is used to enable fixed interval scheduling capability for refresh tasks to ensure consistent intervals + * between refreshes. + */ + public static final Setting CLUSTER_REFRESH_FIXED_INTERVAL_SCHEDULE_ENABLED_SETTING = Setting.boolSetting( + "cluster.index.refresh.fixed_interval_scheduling.enabled", + false, + Property.NodeScope, + Property.Dynamic + ); + /** * This setting is used to restrict creation or updation of index where the `index.translog.durability` index setting * is set as ASYNC if enabled. If disabled, any of the durability mode can be used and switched at any later time from @@ -363,6 +374,7 @@ public class IndicesService extends AbstractLifecycleComponent private final IndexStorePlugin.DirectoryFactory remoteDirectoryFactory; private final BiFunction translogFactorySupplier; private volatile TimeValue clusterDefaultRefreshInterval; + private volatile boolean fixedRefreshIntervalSchedulingEnabled; private final SearchRequestStats searchRequestStats; private final FileCache fileCache; private final CompositeIndexSettings compositeIndexSettings; @@ -514,6 +526,15 @@ protected void closeInternal() { this.clusterDefaultRefreshInterval = CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.get(clusterService.getSettings()); clusterService.getClusterSettings() .addSettingsUpdateConsumer(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING, this::onRefreshIntervalUpdate); + this.fixedRefreshIntervalSchedulingEnabled = CLUSTER_REFRESH_FIXED_INTERVAL_SCHEDULE_ENABLED_SETTING.get( + clusterService.getSettings() + ); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer( + CLUSTER_REFRESH_FIXED_INTERVAL_SCHEDULE_ENABLED_SETTING, + this::setFixedRefreshIntervalSchedulingEnabled + ); + this.recoverySettings = recoverySettings; this.remoteStoreSettings = remoteStoreSettings; this.compositeIndexSettings = compositeIndexSettings; @@ -1006,6 +1027,7 @@ private synchronized IndexService createIndexService( remoteDirectoryFactory, translogFactorySupplier, this::getClusterDefaultRefreshInterval, + this::isFixedRefreshIntervalSchedulingEnabled, this.recoverySettings, this.remoteStoreSettings, replicator, @@ -2167,4 +2189,12 @@ public CompositeIndexSettings getCompositeIndexSettings() { void setMaxSizeInRequestCache(Integer maxSizeInRequestCache) { this.maxSizeInRequestCache = maxSizeInRequestCache; } + + public void setFixedRefreshIntervalSchedulingEnabled(boolean fixedRefreshIntervalSchedulingEnabled) { + this.fixedRefreshIntervalSchedulingEnabled = fixedRefreshIntervalSchedulingEnabled; + } + + private boolean isFixedRefreshIntervalSchedulingEnabled() { + return fixedRefreshIntervalSchedulingEnabled; + } } diff --git a/server/src/test/java/org/opensearch/index/IndexModuleTests.java b/server/src/test/java/org/opensearch/index/IndexModuleTests.java index 90f2b0b21cc8a..b12c1c0eca628 100644 --- a/server/src/test/java/org/opensearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/opensearch/index/IndexModuleTests.java @@ -263,6 +263,7 @@ private IndexService newIndexService(IndexModule module) throws IOException { new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService, threadPool, ""), translogFactorySupplier, () -> IndexSettings.DEFAULT_REFRESH_INTERVAL, + () -> Boolean.FALSE, DefaultRecoverySettings.INSTANCE, DefaultRemoteStoreSettings.INSTANCE, s -> {}, diff --git a/server/src/test/java/org/opensearch/index/IndexServiceTests.java b/server/src/test/java/org/opensearch/index/IndexServiceTests.java index 0e62953f52e1e..b2db510477a34 100644 --- a/server/src/test/java/org/opensearch/index/IndexServiceTests.java +++ b/server/src/test/java/org/opensearch/index/IndexServiceTests.java @@ -636,6 +636,106 @@ public void testReplicationTask() throws Exception { assertEquals(1000, updatedTask.getInterval().millis()); } + public void testBaseAsyncTaskWithFixedIntervalDisabled() throws Exception { + IndexService indexService = createIndex("test", Settings.EMPTY); + CountDownLatch latch = new CountDownLatch(1); + try ( + IndexService.BaseAsyncTask task = new IndexService.BaseAsyncTask( + indexService, + TimeValue.timeValueSeconds(5), + () -> Boolean.FALSE + ) { + @Override + protected void runInternal() { + try { + Thread.sleep(2000); + latch.countDown(); + } catch (InterruptedException e) { + throw new AssertionError(e); + } + } + } + ) { + // With refresh fixed interval disabled, the sleep duration is always the refresh interval + long sleepDuration = task.getSleepDuration().seconds(); + assertEquals(5, sleepDuration); + task.run(); + latch.await(); + sleepDuration = task.getSleepDuration().seconds(); + assertEquals(0, latch.getCount()); + indexService.close("test", false); + assertEquals(5, sleepDuration); + } + } + + public void testBaseAsyncTaskWithFixedIntervalEnabled() throws Exception { + IndexService indexService = createIndex("test", Settings.EMPTY); + CountDownLatch latch = new CountDownLatch(1); + try ( + IndexService.BaseAsyncTask task = new IndexService.BaseAsyncTask( + indexService, + TimeValue.timeValueSeconds(5), + () -> Boolean.TRUE + ) { + @Override + protected void runInternal() { + try { + Thread.sleep(2000); + latch.countDown(); + } catch (InterruptedException e) { + throw new AssertionError(e); + } + } + } + ) { + // In zero state, we have a random sleep duration + long sleepDurationMs = task.getSleepDuration().millis(); + assertTrue(sleepDurationMs > 0); + task.run(); + latch.await(); + // Since we have refresh taking up 2s, then the next refresh should have sleep duration of 3s. Here we check + // the sleep duration to be non-zero since the sleep duration is calculated dynamically. + sleepDurationMs = task.getSleepDuration().millis(); + assertTrue(sleepDurationMs > 0); + assertEquals(0, latch.getCount()); + indexService.close("test", false); + assertBusy(() -> { assertEquals(TimeValue.ZERO, task.getSleepDuration()); }); + } + } + + public void testBaseAsyncTaskWithFixedIntervalEnabledAndLongerRefresh() throws Exception { + IndexService indexService = createIndex("test", Settings.EMPTY); + CountDownLatch latch = new CountDownLatch(1); + try ( + IndexService.BaseAsyncTask task = new IndexService.BaseAsyncTask( + indexService, + TimeValue.timeValueSeconds(1), + () -> Boolean.TRUE + ) { + @Override + protected void runInternal() { + try { + Thread.sleep(2000); + latch.countDown(); + } catch (InterruptedException e) { + throw new AssertionError(e); + } + } + } + ) { + // In zero state, we have a random sleep duration + long sleepDurationMs = task.getSleepDuration().millis(); + assertTrue(sleepDurationMs > 0); + task.run(); + latch.await(); + indexService.close("test", false); + // Since we have refresh taking up 2s and refresh interval as 1s, then the next refresh should happen immediately. + sleepDurationMs = task.getSleepDuration().millis(); + assertEquals(0, sleepDurationMs); + assertEquals(0, latch.getCount()); + } + } + @Override protected Settings featureFlagSettings() { return Settings.builder() From cf3193167da0f7f703112f8917a08d8ab4f517d6 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Thu, 3 Apr 2025 11:09:48 -0400 Subject: [PATCH 135/550] Enhance Java Agent to intercept Runtime::halt (#17757) Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + .../java/org/opensearch/javaagent/Agent.java | 12 +- .../javaagent/RuntimeHaltInterceptor.java | 49 ++++++++ .../javaagent/SocketChannelInterceptor.java | 2 +- .../StackCallerClassChainExtractor.java | 42 +++++++ ...CallerProtectionDomainChainExtractor.java} | 6 +- .../javaagent/SystemExitInterceptor.java | 11 +- ...tInterceptorTests.java => AgentTests.java} | 9 +- .../javaagent/bootstrap/AgentPolicy.java | 113 ++++++++++++++++-- 9 files changed, 225 insertions(+), 20 deletions(-) create mode 100644 libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/RuntimeHaltInterceptor.java create mode 100644 libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/StackCallerClassChainExtractor.java rename libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/{StackCallerChainExtractor.java => StackCallerProtectionDomainChainExtractor.java} (73%) rename libs/agent-sm/agent/src/test/java/org/opensearch/javaagent/{SystemExitInterceptorTests.java => AgentTests.java} (68%) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6469382b5a18e..60820ab196261 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Security Manager Replacement] Create initial Java Agent to intercept Socket::connect calls ([#17724](https://github.com/opensearch-project/OpenSearch/pull/17724)) - Add ingestion management APIs for pause, resume and get ingestion state ([#17631](https://github.com/opensearch-project/OpenSearch/pull/17631)) - [Security Manager Replacement] Enhance Java Agent to intercept System::exit ([#17746](https://github.com/opensearch-project/OpenSearch/pull/17746)) +- [Security Manager Replacement] Enhance Java Agent to intercept Runtime::halt ([#17757](https://github.com/opensearch-project/OpenSearch/pull/17757)) - Support AutoExpand for SearchReplica ([#17741](https://github.com/opensearch-project/OpenSearch/pull/17741)) - Implement fixed interval refresh task scheduling ([#17777](https://github.com/opensearch-project/OpenSearch/pull/17777)) diff --git a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/Agent.java b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/Agent.java index 4b65d841f9768..4eb7baa93ab7e 100644 --- a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/Agent.java +++ b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/Agent.java @@ -64,8 +64,10 @@ private static AgentBuilder createAgentBuilder(Instrumentation inst) throws Exce ClassInjector.UsingUnsafe.ofBootLoader() .inject( Map.of( - new TypeDescription.ForLoadedType(StackCallerChainExtractor.class), - ClassFileLocator.ForClassLoader.read(StackCallerChainExtractor.class), + new TypeDescription.ForLoadedType(StackCallerProtectionDomainChainExtractor.class), + ClassFileLocator.ForClassLoader.read(StackCallerProtectionDomainChainExtractor.class), + new TypeDescription.ForLoadedType(StackCallerClassChainExtractor.class), + ClassFileLocator.ForClassLoader.read(StackCallerClassChainExtractor.class), new TypeDescription.ForLoadedType(AgentPolicy.class), ClassFileLocator.ForClassLoader.read(AgentPolicy.class) ) @@ -83,6 +85,12 @@ private static AgentBuilder createAgentBuilder(Instrumentation inst) throws Exce (b, typeDescription, classLoader, module, pd) -> b.visit( Advice.to(SystemExitInterceptor.class).on(ElementMatchers.named("exit")) ) + ) + .type(ElementMatchers.is(java.lang.Runtime.class)) + .transform( + (b, typeDescription, classLoader, module, pd) -> b.visit( + Advice.to(RuntimeHaltInterceptor.class).on(ElementMatchers.named("halt")) + ) ); return agentBuilder; diff --git a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/RuntimeHaltInterceptor.java b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/RuntimeHaltInterceptor.java new file mode 100644 index 0000000000000..806d519221424 --- /dev/null +++ b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/RuntimeHaltInterceptor.java @@ -0,0 +1,49 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.javaagent; + +import org.opensearch.javaagent.bootstrap.AgentPolicy; + +import java.lang.StackWalker.Option; +import java.security.Policy; +import java.util.stream.Stream; + +import net.bytebuddy.asm.Advice; + +/** + * {@link Runtime#halt} interceptor + */ +public class RuntimeHaltInterceptor { + /** + * RuntimeHaltInterceptor + */ + public RuntimeHaltInterceptor() {} + + /** + * Interceptor + * @param code exit code + * @throws Exception exceptions + */ + @Advice.OnMethodEnter + @SuppressWarnings("removal") + public static void intercept(int code) throws Exception { + final Policy policy = AgentPolicy.getPolicy(); + if (policy == null) { + return; /* noop */ + } + + final StackWalker walker = StackWalker.getInstance(Option.RETAIN_CLASS_REFERENCE); + final Class caller = walker.getCallerClass(); + final Stream> chain = walker.walk(StackCallerClassChainExtractor.INSTANCE); + + if (AgentPolicy.isChainThatCanExit(caller, chain) == false) { + throw new SecurityException("The class " + caller + " is not allowed to call Runtime::halt(" + code + ")"); + } + } +} diff --git a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/SocketChannelInterceptor.java b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/SocketChannelInterceptor.java index 40b8118882b58..36daed518710f 100644 --- a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/SocketChannelInterceptor.java +++ b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/SocketChannelInterceptor.java @@ -47,7 +47,7 @@ public static void intercept(@Advice.AllArguments Object[] args, @Origin Method } final StackWalker walker = StackWalker.getInstance(Option.RETAIN_CLASS_REFERENCE); - final Stream callers = walker.walk(StackCallerChainExtractor.INSTANCE); + final Stream callers = walker.walk(StackCallerProtectionDomainChainExtractor.INSTANCE); if (args[0] instanceof InetSocketAddress address) { if (!AgentPolicy.isTrustedHost(address.getHostString())) { diff --git a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/StackCallerClassChainExtractor.java b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/StackCallerClassChainExtractor.java new file mode 100644 index 0000000000000..824e23a8deb85 --- /dev/null +++ b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/StackCallerClassChainExtractor.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.javaagent; + +import java.lang.StackWalker.StackFrame; +import java.util.function.Function; +import java.util.stream.Stream; + +/** + * Stack Caller Class Chain Extractor + */ +public final class StackCallerClassChainExtractor implements Function, Stream>> { + /** + * Single instance of stateless class. + */ + public static final StackCallerClassChainExtractor INSTANCE = new StackCallerClassChainExtractor(); + + /** + * Constructor + */ + private StackCallerClassChainExtractor() {} + + /** + * Folds the stack + * @param frames stack frames + */ + @Override + public Stream> apply(Stream frames) { + return cast(frames); + } + + @SuppressWarnings("unchecked") + private static Stream cast(Stream frames) { + return (Stream) frames.map(StackFrame::getDeclaringClass).filter(c -> !c.isHidden()).distinct(); + } +} diff --git a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/StackCallerChainExtractor.java b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/StackCallerProtectionDomainChainExtractor.java similarity index 73% rename from libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/StackCallerChainExtractor.java rename to libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/StackCallerProtectionDomainChainExtractor.java index 3586f638edfdb..e9684362f193a 100644 --- a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/StackCallerChainExtractor.java +++ b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/StackCallerProtectionDomainChainExtractor.java @@ -16,16 +16,16 @@ /** * Stack Caller Chain Extractor */ -public final class StackCallerChainExtractor implements Function, Stream> { +public final class StackCallerProtectionDomainChainExtractor implements Function, Stream> { /** * Single instance of stateless class. */ - public static final StackCallerChainExtractor INSTANCE = new StackCallerChainExtractor(); + public static final StackCallerProtectionDomainChainExtractor INSTANCE = new StackCallerProtectionDomainChainExtractor(); /** * Constructor */ - private StackCallerChainExtractor() {} + private StackCallerProtectionDomainChainExtractor() {} /** * Folds the stack diff --git a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/SystemExitInterceptor.java b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/SystemExitInterceptor.java index 20087500f1df4..3e1bb2b9d3bbe 100644 --- a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/SystemExitInterceptor.java +++ b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/SystemExitInterceptor.java @@ -11,6 +11,8 @@ import org.opensearch.javaagent.bootstrap.AgentPolicy; import java.lang.StackWalker.Option; +import java.security.Policy; +import java.util.stream.Stream; import net.bytebuddy.asm.Advice; @@ -29,11 +31,18 @@ public SystemExitInterceptor() {} * @throws Exception exceptions */ @Advice.OnMethodEnter() + @SuppressWarnings("removal") public static void intercept(int code) throws Exception { + final Policy policy = AgentPolicy.getPolicy(); + if (policy == null) { + return; /* noop */ + } + final StackWalker walker = StackWalker.getInstance(Option.RETAIN_CLASS_REFERENCE); final Class caller = walker.getCallerClass(); + final Stream> chain = walker.walk(StackCallerClassChainExtractor.INSTANCE); - if (!AgentPolicy.isClassThatCanExit(caller.getName())) { + if (AgentPolicy.isChainThatCanExit(caller, chain) == false) { throw new SecurityException("The class " + caller + " is not allowed to call System::exit(" + code + ")"); } } diff --git a/libs/agent-sm/agent/src/test/java/org/opensearch/javaagent/SystemExitInterceptorTests.java b/libs/agent-sm/agent/src/test/java/org/opensearch/javaagent/AgentTests.java similarity index 68% rename from libs/agent-sm/agent/src/test/java/org/opensearch/javaagent/SystemExitInterceptorTests.java rename to libs/agent-sm/agent/src/test/java/org/opensearch/javaagent/AgentTests.java index de5f84fa68e6b..bde048d7f12c9 100644 --- a/libs/agent-sm/agent/src/test/java/org/opensearch/javaagent/SystemExitInterceptorTests.java +++ b/libs/agent-sm/agent/src/test/java/org/opensearch/javaagent/AgentTests.java @@ -15,16 +15,21 @@ import java.security.Policy; import java.util.Set; -public class SystemExitInterceptorTests { +public class AgentTests { @SuppressWarnings("removal") @BeforeClass public static void setUp() { AgentPolicy.setPolicy(new Policy() { - }, Set.of(), new String[] { "worker.org.gradle.process.internal.worker.GradleWorkerMain" }); + }, Set.of(), (caller, chain) -> caller.getName().equalsIgnoreCase("worker.org.gradle.process.internal.worker.GradleWorkerMain")); } @Test(expected = SecurityException.class) public void testSystemExitIsForbidden() { System.exit(0); } + + @Test(expected = SecurityException.class) + public void testRuntimeHaltIsForbidden() { + Runtime.getRuntime().halt(0); + } } diff --git a/libs/agent-sm/bootstrap/src/main/java/org/opensearch/javaagent/bootstrap/AgentPolicy.java b/libs/agent-sm/bootstrap/src/main/java/org/opensearch/javaagent/bootstrap/AgentPolicy.java index 7f64646a0ca29..332d2af6bf102 100644 --- a/libs/agent-sm/bootstrap/src/main/java/org/opensearch/javaagent/bootstrap/AgentPolicy.java +++ b/libs/agent-sm/bootstrap/src/main/java/org/opensearch/javaagent/bootstrap/AgentPolicy.java @@ -13,12 +13,13 @@ import java.security.Permission; import java.security.Policy; import java.security.ProtectionDomain; -import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Set; +import java.util.function.BiFunction; import java.util.logging.Logger; import java.util.stream.Collectors; +import java.util.stream.Stream; /** * Agent Policy @@ -28,7 +29,92 @@ public class AgentPolicy { private static final Logger LOGGER = Logger.getLogger(AgentPolicy.class.getName()); private static volatile Policy policy; private static volatile Set trustedHosts; - private static volatile Set classesThatCanExit; + private static volatile BiFunction, Stream>, Boolean> classesThatCanExit; + + /** + * None of the classes is allowed to call {@link System#exit} or {@link Runtime#halt} + */ + public static final class NoneCanExit implements BiFunction, Stream>, Boolean> { + /** + * NoneCanExit + */ + public NoneCanExit() {} + + /** + * Check if class is allowed to call {@link System#exit}, {@link Runtime#halt} + * @param caller caller class + * @param chain chain of call classes + * @return is class allowed to call {@link System#exit}, {@link Runtime#halt} or not + */ + @Override + public Boolean apply(Class caller, Stream> chain) { + return true; + } + } + + /** + * Only caller is allowed to call {@link System#exit} or {@link Runtime#halt} + */ + public static final class CallerCanExit implements BiFunction, Stream>, Boolean> { + private final String[] classesThatCanExit; + + /** + * CallerCanExit + * @param classesThatCanExit classes that can exit + */ + public CallerCanExit(final String[] classesThatCanExit) { + this.classesThatCanExit = classesThatCanExit; + } + + /** + * Check if class is allowed to call {@link System#exit}, {@link Runtime#halt} + * @param caller caller class + * @param chain chain of call classes + * @return is class allowed to call {@link System#exit}, {@link Runtime#halt} or not + */ + @Override + public Boolean apply(Class caller, Stream> chain) { + for (final String classThatCanExit : classesThatCanExit) { + if (caller.getName().equalsIgnoreCase(classThatCanExit)) { + return true; + } + } + return false; + } + } + + /** + * Any caller in the chain is allowed to call {@link System#exit} or {@link Runtime#halt} + */ + public static final class AnyCanExit implements BiFunction, Stream>, Boolean> { + private final String[] classesThatCanExit; + + /** + * AnyCanExit + * @param classesThatCanExit classes that can exit + */ + public AnyCanExit(final String[] classesThatCanExit) { + this.classesThatCanExit = classesThatCanExit; + } + + /** + * Check if class is allowed to call {@link System#exit}, {@link Runtime#halt} + * @param caller caller class + * @param chain chain of call classes + * @return is class allowed to call {@link System#exit}, {@link Runtime#halt} or not + */ + @Override + public Boolean apply(Class caller, Stream> chain) { + return chain.anyMatch(clazz -> { + for (final String classThatCanExit : classesThatCanExit) { + if (clazz.getName().matches(classThatCanExit)) { + return true; + } + } + return false; + }); + } + } private AgentPolicy() {} @@ -37,20 +123,24 @@ private AgentPolicy() {} * @param policy policy */ public static void setPolicy(Policy policy) { - setPolicy(policy, Set.of(), new String[0]); + setPolicy(policy, Set.of(), new NoneCanExit()); } /** * Set Agent policy * @param policy policy * @param trustedHosts trusted hosts - * @param classesThatCanExit classed that are allowed to call {@link System#exit} + * @param classesThatCanExit classed that are allowed to call {@link System#exit}, {@link Runtime#halt} */ - public static void setPolicy(Policy policy, final Set trustedHosts, final String[] classesThatCanExit) { + public static void setPolicy( + Policy policy, + final Set trustedHosts, + final BiFunction, Stream>, Boolean> classesThatCanExit + ) { if (AgentPolicy.policy == null) { AgentPolicy.policy = policy; AgentPolicy.trustedHosts = Collections.unmodifiableSet(trustedHosts); - AgentPolicy.classesThatCanExit = Arrays.stream(classesThatCanExit).collect(Collectors.toSet()); + AgentPolicy.classesThatCanExit = classesThatCanExit; LOGGER.info("Policy attached successfully: " + policy); } else { throw new SecurityException("The Policy has been set already: " + AgentPolicy.policy); @@ -92,11 +182,12 @@ public static boolean isTrustedHost(String hostname) { } /** - * Check if class is allowed to call {@link System#exit} - * @param name class name - * @return is class allowed to call {@link System#exit} or not + * Check if class is allowed to call {@link System#exit}, {@link Runtime#halt} + * @param caller caller class + * @param chain chain of call classes + * @return is class allowed to call {@link System#exit}, {@link Runtime#halt} or not */ - public static boolean isClassThatCanExit(String name) { - return AgentPolicy.classesThatCanExit.contains(name); + public static boolean isChainThatCanExit(Class caller, Stream> chain) { + return classesThatCanExit.apply(caller, chain); } } From 5487374d9b546f23a69ea6a293610580f6ea59b8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 3 Apr 2025 16:32:29 -0500 Subject: [PATCH 136/550] Bump ch.qos.logback:logback-classic from 1.5.17 to 1.5.18 in /test/fixtures/hdfs-fixture (#17730) * Bump ch.qos.logback:logback-classic in /test/fixtures/hdfs-fixture Bumps [ch.qos.logback:logback-classic](https://github.com/qos-ch/logback) from 1.5.17 to 1.5.18. - [Release notes](https://github.com/qos-ch/logback/releases) - [Commits](https://github.com/qos-ch/logback/compare/v_1.5.17...v_1.5.18) --- updated-dependencies: - dependency-name: ch.qos.logback:logback-classic dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- CHANGELOG.md | 1 + test/fixtures/hdfs-fixture/build.gradle | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 60820ab196261..d83ee41339b2f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,6 +40,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.netflix.nebula.ospackage-base` from 11.11.1 to 11.11.2 ([#17734](https://github.com/opensearch-project/OpenSearch/pull/17734)) - Bump `com.nimbusds:oauth2-oidc-sdk` from 11.21 to 11.23.1 ([#17729](https://github.com/opensearch-project/OpenSearch/pull/17729)) - Bump `com.google.api.grpc:proto-google-common-protos` from 2.52.0 to 2.54.1 ([#17733](https://github.com/opensearch-project/OpenSearch/pull/17733)) +- Bump `ch.qos.logback:logback-classic` from 1.5.17 to 1.5.18 ([#17730](https://github.com/opensearch-project/OpenSearch/pull/17730)) ### Changed diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index faf20b2070cf6..b3949062598be 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -75,7 +75,7 @@ dependencies { api "org.apache.commons:commons-text:1.13.0" api "commons-net:commons-net:3.11.1" api "ch.qos.logback:logback-core:1.5.17" - api "ch.qos.logback:logback-classic:1.5.17" + api "ch.qos.logback:logback-classic:1.5.18" api "org.jboss.xnio:xnio-nio:3.8.16.Final" api 'org.jline:jline:3.29.0' api 'org.apache.commons:commons-configuration2:2.11.0' From eb905709242eff2f95807f74981590251dad85e7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 3 Apr 2025 17:55:48 -0500 Subject: [PATCH 137/550] Bump reactor_netty from 1.1.27 to 1.2.3 (#17377) * Bump reactor_netty from 1.1.27 to 1.2.3 Bumps `reactor_netty` from 1.1.27 to 1.2.3. Updates `io.projectreactor.netty:reactor-netty-core` from 1.1.27 to 1.2.3 - [Release notes](https://github.com/reactor/reactor-netty/releases) - [Commits](https://github.com/reactor/reactor-netty/compare/v1.1.27...v1.2.3) Updates `io.projectreactor.netty:reactor-netty-http` from 1.1.27 to 1.2.3 - [Release notes](https://github.com/reactor/reactor-netty/releases) - [Commits](https://github.com/reactor/reactor-netty/compare/v1.1.27...v1.2.3) --- updated-dependencies: - dependency-name: io.projectreactor.netty:reactor-netty-core dependency-type: direct:production update-type: version-update:semver-minor - dependency-name: io.projectreactor.netty:reactor-netty-http dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] * Ignore missing classes Signed-off-by: Binlong Gao * Ignore missing classes Signed-off-by: Binlong Gao --------- Signed-off-by: dependabot[bot] Signed-off-by: Binlong Gao Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Binlong Gao --- CHANGELOG.md | 1 + gradle/libs.versions.toml | 2 +- plugins/repository-azure/build.gradle | 17 +++++++++++++++++ .../reactor-netty-core-1.1.27.jar.sha1 | 1 - .../licenses/reactor-netty-core-1.2.4.jar.sha1 | 1 + .../reactor-netty-http-1.1.27.jar.sha1 | 1 - .../licenses/reactor-netty-http-1.2.4.jar.sha1 | 1 + plugins/transport-reactor-netty4/build.gradle | 18 +++++++++++++++++- .../reactor-netty-core-1.1.27.jar.sha1 | 1 - .../licenses/reactor-netty-core-1.2.4.jar.sha1 | 1 + .../reactor-netty-http-1.1.27.jar.sha1 | 1 - .../licenses/reactor-netty-http-1.2.4.jar.sha1 | 1 + 12 files changed, 40 insertions(+), 6 deletions(-) delete mode 100644 plugins/repository-azure/licenses/reactor-netty-core-1.1.27.jar.sha1 create mode 100644 plugins/repository-azure/licenses/reactor-netty-core-1.2.4.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/reactor-netty-http-1.1.27.jar.sha1 create mode 100644 plugins/repository-azure/licenses/reactor-netty-http-1.2.4.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.27.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.2.4.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.27.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.2.4.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index d83ee41339b2f..3e6b2181c0738 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -41,6 +41,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.nimbusds:oauth2-oidc-sdk` from 11.21 to 11.23.1 ([#17729](https://github.com/opensearch-project/OpenSearch/pull/17729)) - Bump `com.google.api.grpc:proto-google-common-protos` from 2.52.0 to 2.54.1 ([#17733](https://github.com/opensearch-project/OpenSearch/pull/17733)) - Bump `ch.qos.logback:logback-classic` from 1.5.17 to 1.5.18 ([#17730](https://github.com/opensearch-project/OpenSearch/pull/17730)) +- Bump `reactor_netty` from 1.1.26 to 1.2.3 ([#17322](https://github.com/opensearch-project/OpenSearch/pull/17322), [#17377](https://github.com/opensearch-project/OpenSearch/pull/17377)) ### Changed diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index 4d3aebf83eecc..53b616e2329d6 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -38,7 +38,7 @@ joda = "2.12.7" roaringbitmap = "1.3.0" # project reactor -reactor_netty = "1.1.27" +reactor_netty = "1.2.4" reactor = "3.5.20" # client dependencies diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index b8a674c6d8a6c..0bf07695745d3 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -261,6 +261,23 @@ thirdPartyAudit { 'org.opensaml.xmlsec.signature.support.SignatureValidator', 'org.opensaml.xmlsec.signature.support.Signer', // End of the list of classes from the optional compile/provided dependencies used in "com.nimbusds:oauth2-oidc-sdk". + + // from reactor_netty + 'io.netty.incubator.codec.http3.Http3', + 'io.netty.incubator.codec.http3.Http3ClientConnectionHandler', + 'io.netty.incubator.codec.http3.Http3DataFrame', + 'io.netty.incubator.codec.http3.Http3Headers', + 'io.netty.incubator.codec.http3.Http3HeadersFrame', + 'io.netty.incubator.codec.quic.QuicChannel', + 'io.netty.incubator.codec.quic.QuicChannelBootstrap', + 'io.netty.incubator.codec.quic.QuicClientCodecBuilder', + 'io.netty.incubator.codec.quic.QuicServerCodecBuilder', + 'io.netty.incubator.codec.quic.QuicSslContext', + 'io.netty.incubator.codec.quic.QuicSslContextBuilder', + 'io.netty.incubator.codec.quic.QuicSslEngine', + 'io.netty.incubator.codec.quic.QuicStreamChannel', + 'io.netty.incubator.codec.quic.QuicStreamChannelBootstrap', + 'io.netty.incubator.codec.quic.QuicTokenHandler', ) ignoreViolations( diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.27.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.27.jar.sha1 deleted file mode 100644 index 3eac15e74ad19..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-core-1.1.27.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -094428d25b65a0bdb89d639934d39b7ba7f169ee \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.2.4.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.2.4.jar.sha1 new file mode 100644 index 0000000000000..7bf9a31f0c7ca --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-core-1.2.4.jar.sha1 @@ -0,0 +1 @@ +5e51a8633d5ece1081216edb5455605d24570d9b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.27.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.27.jar.sha1 deleted file mode 100644 index 7236917a621c7..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-http-1.1.27.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -56dab8976b8d79e37597e736d62bb4111cc28e9c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.2.4.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.2.4.jar.sha1 new file mode 100644 index 0000000000000..89b8fe9f06dbc --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-http-1.2.4.jar.sha1 @@ -0,0 +1 @@ +f155caf727991cddcd2ff134783ff5451e5f7dea \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/build.gradle b/plugins/transport-reactor-netty4/build.gradle index ba8b17c5877a7..1fe8862e2eab7 100644 --- a/plugins/transport-reactor-netty4/build.gradle +++ b/plugins/transport-reactor-netty4/build.gradle @@ -226,7 +226,23 @@ thirdPartyAudit { 'io.micrometer.tracing.handler.DefaultTracingObservationHandler', 'io.micrometer.tracing.handler.PropagatingReceiverTracingObservationHandler', 'io.micrometer.tracing.handler.PropagatingSenderTracingObservationHandler', - 'io.micrometer.tracing.propagation.Propagator' + 'io.micrometer.tracing.propagation.Propagator', + // from reactor_netty + 'io.netty.incubator.codec.http3.Http3', + 'io.netty.incubator.codec.http3.Http3ClientConnectionHandler', + 'io.netty.incubator.codec.http3.Http3DataFrame', + 'io.netty.incubator.codec.http3.Http3Headers', + 'io.netty.incubator.codec.http3.Http3HeadersFrame', + 'io.netty.incubator.codec.quic.QuicChannel', + 'io.netty.incubator.codec.quic.QuicChannelBootstrap', + 'io.netty.incubator.codec.quic.QuicClientCodecBuilder', + 'io.netty.incubator.codec.quic.QuicServerCodecBuilder', + 'io.netty.incubator.codec.quic.QuicSslContext', + 'io.netty.incubator.codec.quic.QuicSslContextBuilder', + 'io.netty.incubator.codec.quic.QuicSslEngine', + 'io.netty.incubator.codec.quic.QuicStreamChannel', + 'io.netty.incubator.codec.quic.QuicStreamChannelBootstrap', + 'io.netty.incubator.codec.quic.QuicTokenHandler', ) ignoreViolations( diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.27.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.27.jar.sha1 deleted file mode 100644 index 3eac15e74ad19..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.27.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -094428d25b65a0bdb89d639934d39b7ba7f169ee \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.2.4.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.2.4.jar.sha1 new file mode 100644 index 0000000000000..7bf9a31f0c7ca --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.2.4.jar.sha1 @@ -0,0 +1 @@ +5e51a8633d5ece1081216edb5455605d24570d9b \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.27.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.27.jar.sha1 deleted file mode 100644 index 7236917a621c7..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.27.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -56dab8976b8d79e37597e736d62bb4111cc28e9c \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.2.4.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.2.4.jar.sha1 new file mode 100644 index 0000000000000..89b8fe9f06dbc --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.2.4.jar.sha1 @@ -0,0 +1 @@ +f155caf727991cddcd2ff134783ff5451e5f7dea \ No newline at end of file From 5ec96c5852d0a65b374e0c44dff404b030d56821 Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Thu, 3 Apr 2025 16:38:26 -0700 Subject: [PATCH 138/550] Fix socket interceptor and add unit test (#17784) The optimization of using a Stream instead of a Collection caused problems with class not found and/or illegal access errors when using the lambda function in the `Stream::forEach` call in the intercept method. Signed-off-by: Andrew Ross --- .../javaagent/SocketChannelInterceptor.java | 12 ++++---- ...kCallerProtectionDomainChainExtractor.java | 8 +++-- .../opensearch/javaagent/AgentTestCase.java | 24 +++++++++++++++ .../org/opensearch/javaagent/AgentTests.java | 14 +-------- .../SocketChannelInterceptorTests.java | 29 +++++++++++++++++++ 5 files changed, 65 insertions(+), 22 deletions(-) create mode 100644 libs/agent-sm/agent/src/test/java/org/opensearch/javaagent/AgentTestCase.java create mode 100644 libs/agent-sm/agent/src/test/java/org/opensearch/javaagent/SocketChannelInterceptorTests.java diff --git a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/SocketChannelInterceptor.java b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/SocketChannelInterceptor.java index 36daed518710f..3ac48f9e72f74 100644 --- a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/SocketChannelInterceptor.java +++ b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/SocketChannelInterceptor.java @@ -18,7 +18,7 @@ import java.net.UnixDomainSocketAddress; import java.security.Policy; import java.security.ProtectionDomain; -import java.util.stream.Stream; +import java.util.Collection; import net.bytebuddy.asm.Advice; import net.bytebuddy.asm.Advice.Origin; @@ -47,26 +47,26 @@ public static void intercept(@Advice.AllArguments Object[] args, @Origin Method } final StackWalker walker = StackWalker.getInstance(Option.RETAIN_CLASS_REFERENCE); - final Stream callers = walker.walk(StackCallerProtectionDomainChainExtractor.INSTANCE); + final Collection callers = walker.walk(StackCallerProtectionDomainChainExtractor.INSTANCE); if (args[0] instanceof InetSocketAddress address) { if (!AgentPolicy.isTrustedHost(address.getHostString())) { final String host = address.getHostString() + ":" + address.getPort(); final SocketPermission permission = new SocketPermission(host, "connect,resolve"); - callers.forEach(domain -> { + for (ProtectionDomain domain : callers) { if (!policy.implies(domain, permission)) { throw new SecurityException("Denied access to: " + host + ", domain " + domain); } - }); + } } } else if (args[0] instanceof UnixDomainSocketAddress address) { final NetPermission permission = new NetPermission("accessUnixDomainSocket"); - callers.forEach(domain -> { + for (ProtectionDomain domain : callers) { if (!policy.implies(domain, permission)) { throw new SecurityException("Denied access to: " + address + ", domain " + domain); } - }); + } } } } diff --git a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/StackCallerProtectionDomainChainExtractor.java b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/StackCallerProtectionDomainChainExtractor.java index e9684362f193a..69b91d0d8b74c 100644 --- a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/StackCallerProtectionDomainChainExtractor.java +++ b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/StackCallerProtectionDomainChainExtractor.java @@ -10,13 +10,15 @@ import java.lang.StackWalker.StackFrame; import java.security.ProtectionDomain; +import java.util.Collection; import java.util.function.Function; +import java.util.stream.Collectors; import java.util.stream.Stream; /** * Stack Caller Chain Extractor */ -public final class StackCallerProtectionDomainChainExtractor implements Function, Stream> { +public final class StackCallerProtectionDomainChainExtractor implements Function, Collection> { /** * Single instance of stateless class. */ @@ -32,10 +34,10 @@ private StackCallerProtectionDomainChainExtractor() {} * @param frames stack frames */ @Override - public Stream apply(Stream frames) { + public Collection apply(Stream frames) { return frames.map(StackFrame::getDeclaringClass) .map(Class::getProtectionDomain) .filter(pd -> pd.getCodeSource() != null) /* JDK */ - .distinct(); + .collect(Collectors.toSet()); } } diff --git a/libs/agent-sm/agent/src/test/java/org/opensearch/javaagent/AgentTestCase.java b/libs/agent-sm/agent/src/test/java/org/opensearch/javaagent/AgentTestCase.java new file mode 100644 index 0000000000000..1cfc97b6352fd --- /dev/null +++ b/libs/agent-sm/agent/src/test/java/org/opensearch/javaagent/AgentTestCase.java @@ -0,0 +1,24 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.javaagent; + +import org.opensearch.javaagent.bootstrap.AgentPolicy; +import org.junit.BeforeClass; + +import java.security.Policy; +import java.util.Set; + +public abstract class AgentTestCase { + @SuppressWarnings("removal") + @BeforeClass + public static void setUp() { + AgentPolicy.setPolicy(new Policy() { + }, Set.of(), (caller, chain) -> caller.getName().equalsIgnoreCase("worker.org.gradle.process.internal.worker.GradleWorkerMain")); + } +} diff --git a/libs/agent-sm/agent/src/test/java/org/opensearch/javaagent/AgentTests.java b/libs/agent-sm/agent/src/test/java/org/opensearch/javaagent/AgentTests.java index bde048d7f12c9..4a7a2976e08a5 100644 --- a/libs/agent-sm/agent/src/test/java/org/opensearch/javaagent/AgentTests.java +++ b/libs/agent-sm/agent/src/test/java/org/opensearch/javaagent/AgentTests.java @@ -8,21 +8,9 @@ package org.opensearch.javaagent; -import org.opensearch.javaagent.bootstrap.AgentPolicy; -import org.junit.BeforeClass; import org.junit.Test; -import java.security.Policy; -import java.util.Set; - -public class AgentTests { - @SuppressWarnings("removal") - @BeforeClass - public static void setUp() { - AgentPolicy.setPolicy(new Policy() { - }, Set.of(), (caller, chain) -> caller.getName().equalsIgnoreCase("worker.org.gradle.process.internal.worker.GradleWorkerMain")); - } - +public class AgentTests extends AgentTestCase { @Test(expected = SecurityException.class) public void testSystemExitIsForbidden() { System.exit(0); diff --git a/libs/agent-sm/agent/src/test/java/org/opensearch/javaagent/SocketChannelInterceptorTests.java b/libs/agent-sm/agent/src/test/java/org/opensearch/javaagent/SocketChannelInterceptorTests.java new file mode 100644 index 0000000000000..38b21f79cf0f6 --- /dev/null +++ b/libs/agent-sm/agent/src/test/java/org/opensearch/javaagent/SocketChannelInterceptorTests.java @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.javaagent; + +import org.junit.Test; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.UnixDomainSocketAddress; +import java.nio.channels.SocketChannel; + +import static org.junit.Assert.assertThrows; + +public class SocketChannelInterceptorTests extends AgentTestCase { + @Test + public void test() throws IOException { + try (SocketChannel channel = SocketChannel.open()) { + assertThrows(SecurityException.class, () -> channel.connect(new InetSocketAddress("localhost", 9200))); + + assertThrows(SecurityException.class, () -> channel.connect(UnixDomainSocketAddress.of("fake-path"))); + } + } +} From 32e3effee75076b21c0fd846c3a7dd5a633a8b71 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Fri, 4 Apr 2025 07:11:05 -0400 Subject: [PATCH 139/550] Fix StackCallerClassChainExtractor and respectively RuntimeHaltInterceptor, SystemExitInterceptor (#17793) Signed-off-by: Andriy Redko --- .../javaagent/RuntimeHaltInterceptor.java | 4 ++-- .../StackCallerClassChainExtractor.java | 11 ++++++---- .../javaagent/SystemExitInterceptor.java | 4 ++-- .../javaagent/bootstrap/AgentPolicy.java | 21 ++++++++++--------- 4 files changed, 22 insertions(+), 18 deletions(-) diff --git a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/RuntimeHaltInterceptor.java b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/RuntimeHaltInterceptor.java index 806d519221424..9f879a744f45f 100644 --- a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/RuntimeHaltInterceptor.java +++ b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/RuntimeHaltInterceptor.java @@ -12,7 +12,7 @@ import java.lang.StackWalker.Option; import java.security.Policy; -import java.util.stream.Stream; +import java.util.Collection; import net.bytebuddy.asm.Advice; @@ -40,7 +40,7 @@ public static void intercept(int code) throws Exception { final StackWalker walker = StackWalker.getInstance(Option.RETAIN_CLASS_REFERENCE); final Class caller = walker.getCallerClass(); - final Stream> chain = walker.walk(StackCallerClassChainExtractor.INSTANCE); + final Collection> chain = walker.walk(StackCallerClassChainExtractor.INSTANCE); if (AgentPolicy.isChainThatCanExit(caller, chain) == false) { throw new SecurityException("The class " + caller + " is not allowed to call Runtime::halt(" + code + ")"); diff --git a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/StackCallerClassChainExtractor.java b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/StackCallerClassChainExtractor.java index 824e23a8deb85..b7be2883b6a79 100644 --- a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/StackCallerClassChainExtractor.java +++ b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/StackCallerClassChainExtractor.java @@ -9,13 +9,16 @@ package org.opensearch.javaagent; import java.lang.StackWalker.StackFrame; +import java.util.Collection; +import java.util.Set; import java.util.function.Function; +import java.util.stream.Collectors; import java.util.stream.Stream; /** * Stack Caller Class Chain Extractor */ -public final class StackCallerClassChainExtractor implements Function, Stream>> { +public final class StackCallerClassChainExtractor implements Function, Collection>> { /** * Single instance of stateless class. */ @@ -31,12 +34,12 @@ private StackCallerClassChainExtractor() {} * @param frames stack frames */ @Override - public Stream> apply(Stream frames) { + public Collection> apply(Stream frames) { return cast(frames); } @SuppressWarnings("unchecked") - private static Stream cast(Stream frames) { - return (Stream) frames.map(StackFrame::getDeclaringClass).filter(c -> !c.isHidden()).distinct(); + private static Set cast(Stream frames) { + return (Set) frames.map(StackFrame::getDeclaringClass).filter(c -> !c.isHidden()).collect(Collectors.toSet()); } } diff --git a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/SystemExitInterceptor.java b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/SystemExitInterceptor.java index 3e1bb2b9d3bbe..6ba4f59e00942 100644 --- a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/SystemExitInterceptor.java +++ b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/SystemExitInterceptor.java @@ -12,7 +12,7 @@ import java.lang.StackWalker.Option; import java.security.Policy; -import java.util.stream.Stream; +import java.util.Collection; import net.bytebuddy.asm.Advice; @@ -40,7 +40,7 @@ public static void intercept(int code) throws Exception { final StackWalker walker = StackWalker.getInstance(Option.RETAIN_CLASS_REFERENCE); final Class caller = walker.getCallerClass(); - final Stream> chain = walker.walk(StackCallerClassChainExtractor.INSTANCE); + final Collection> chain = walker.walk(StackCallerClassChainExtractor.INSTANCE); if (AgentPolicy.isChainThatCanExit(caller, chain) == false) { throw new SecurityException("The class " + caller + " is not allowed to call System::exit(" + code + ")"); diff --git a/libs/agent-sm/bootstrap/src/main/java/org/opensearch/javaagent/bootstrap/AgentPolicy.java b/libs/agent-sm/bootstrap/src/main/java/org/opensearch/javaagent/bootstrap/AgentPolicy.java index 332d2af6bf102..c54a7296cbebe 100644 --- a/libs/agent-sm/bootstrap/src/main/java/org/opensearch/javaagent/bootstrap/AgentPolicy.java +++ b/libs/agent-sm/bootstrap/src/main/java/org/opensearch/javaagent/bootstrap/AgentPolicy.java @@ -13,6 +13,7 @@ import java.security.Permission; import java.security.Policy; import java.security.ProtectionDomain; +import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Set; @@ -29,12 +30,12 @@ public class AgentPolicy { private static final Logger LOGGER = Logger.getLogger(AgentPolicy.class.getName()); private static volatile Policy policy; private static volatile Set trustedHosts; - private static volatile BiFunction, Stream>, Boolean> classesThatCanExit; + private static volatile BiFunction, Collection>, Boolean> classesThatCanExit; /** * None of the classes is allowed to call {@link System#exit} or {@link Runtime#halt} */ - public static final class NoneCanExit implements BiFunction, Stream>, Boolean> { + public static final class NoneCanExit implements BiFunction, Collection>, Boolean> { /** * NoneCanExit */ @@ -47,7 +48,7 @@ public NoneCanExit() {} * @return is class allowed to call {@link System#exit}, {@link Runtime#halt} or not */ @Override - public Boolean apply(Class caller, Stream> chain) { + public Boolean apply(Class caller, Collection> chain) { return true; } } @@ -86,7 +87,7 @@ public Boolean apply(Class caller, Stream> chain) { /** * Any caller in the chain is allowed to call {@link System#exit} or {@link Runtime#halt} */ - public static final class AnyCanExit implements BiFunction, Stream>, Boolean> { + public static final class AnyCanExit implements BiFunction, Collection>, Boolean> { private final String[] classesThatCanExit; /** @@ -104,15 +105,15 @@ public AnyCanExit(final String[] classesThatCanExit) { * @return is class allowed to call {@link System#exit}, {@link Runtime#halt} or not */ @Override - public Boolean apply(Class caller, Stream> chain) { - return chain.anyMatch(clazz -> { + public Boolean apply(Class caller, Collection> chain) { + for (final Class clazz : chain) { for (final String classThatCanExit : classesThatCanExit) { if (clazz.getName().matches(classThatCanExit)) { return true; } } - return false; - }); + } + return false; } } @@ -135,7 +136,7 @@ public static void setPolicy(Policy policy) { public static void setPolicy( Policy policy, final Set trustedHosts, - final BiFunction, Stream>, Boolean> classesThatCanExit + final BiFunction, Collection>, Boolean> classesThatCanExit ) { if (AgentPolicy.policy == null) { AgentPolicy.policy = policy; @@ -187,7 +188,7 @@ public static boolean isTrustedHost(String hostname) { * @param chain chain of call classes * @return is class allowed to call {@link System#exit}, {@link Runtime#halt} or not */ - public static boolean isChainThatCanExit(Class caller, Stream> chain) { + public static boolean isChainThatCanExit(Class caller, Collection> chain) { return classesThatCanExit.apply(caller, chain); } } From 94da0df088033f544d94b202905ab4ff10ce8a8b Mon Sep 17 00:00:00 2001 From: Rajat Gupta <72070007+RajatGupta02@users.noreply.github.com> Date: Fri, 4 Apr 2025 17:11:36 +0530 Subject: [PATCH 140/550] Add some additional tests for SocketChannelInterceptor (#17798) * Add some additional tests for SocketChannelInterceptor Signed-off-by: Rajat Gupta * Fix format violations Signed-off-by: Rajat Gupta --------- Signed-off-by: Rajat Gupta Co-authored-by: Rajat Gupta --- .../javaagent/SocketChannelInterceptorTests.java | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/libs/agent-sm/agent/src/test/java/org/opensearch/javaagent/SocketChannelInterceptorTests.java b/libs/agent-sm/agent/src/test/java/org/opensearch/javaagent/SocketChannelInterceptorTests.java index 38b21f79cf0f6..3a4a7b5576ebb 100644 --- a/libs/agent-sm/agent/src/test/java/org/opensearch/javaagent/SocketChannelInterceptorTests.java +++ b/libs/agent-sm/agent/src/test/java/org/opensearch/javaagent/SocketChannelInterceptorTests.java @@ -11,6 +11,7 @@ import org.junit.Test; import java.io.IOException; +import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.UnixDomainSocketAddress; import java.nio.channels.SocketChannel; @@ -19,11 +20,23 @@ public class SocketChannelInterceptorTests extends AgentTestCase { @Test - public void test() throws IOException { + public void testConnections() throws IOException { try (SocketChannel channel = SocketChannel.open()) { assertThrows(SecurityException.class, () -> channel.connect(new InetSocketAddress("localhost", 9200))); assertThrows(SecurityException.class, () -> channel.connect(UnixDomainSocketAddress.of("fake-path"))); + + assertThrows(SecurityException.class, () -> channel.connect(new InetSocketAddress("opensearch.org", 80))); + } + } + + @Test + public void testHostnameResolution() throws IOException { + try (SocketChannel channel = SocketChannel.open()) { + InetAddress[] addresses = InetAddress.getAllByName("localhost"); + for (InetAddress address : addresses) { + assertThrows(SecurityException.class, () -> channel.connect(new InetSocketAddress(address, 9200))); + } } } } From 2665984f7819f6c8cb052ba323f08a264ce30bcf Mon Sep 17 00:00:00 2001 From: Owais Kazi Date: Fri, 4 Apr 2025 11:25:41 -0700 Subject: [PATCH 141/550] Introduce 512 byte limit to search and ingest pipeline IDs (#17786) * Improve validation for ingest and search pipeline id length Signed-off-by: Owais * Used UTF-8 bytes for length Signed-off-by: Owais --------- Signed-off-by: Owais --- CHANGELOG.md | 1 + .../org/opensearch/ingest/IngestService.java | 17 ++++++++++ .../pipeline/SearchPipelineService.java | 18 ++++++++++ .../opensearch/ingest/IngestServiceTests.java | 34 +++++++++++++++++++ .../pipeline/SearchPipelineServiceTests.java | 21 +++++++++++- 5 files changed, 90 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3e6b2181c0738..5f58cde750761 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Changed - Migrate BC libs to their FIPS counterparts ([#14912](https://github.com/opensearch-project/OpenSearch/pull/14912)) - Increase the floor segment size to 16MB ([#17699](https://github.com/opensearch-project/OpenSearch/pull/17699)) +- Introduce 512 byte limit to search and ingest pipeline IDs ([#17786](https://github.com/opensearch-project/OpenSearch/pull/17786)) ### Dependencies - Bump `com.nimbusds:nimbus-jose-jwt` from 9.41.1 to 10.0.2 ([#17607](https://github.com/opensearch-project/OpenSearch/pull/17607), [#17669](https://github.com/opensearch-project/OpenSearch/pull/17669)) diff --git a/server/src/main/java/org/opensearch/ingest/IngestService.java b/server/src/main/java/org/opensearch/ingest/IngestService.java index 3a8b9da64203b..48697eafd3bbb 100644 --- a/server/src/main/java/org/opensearch/ingest/IngestService.java +++ b/server/src/main/java/org/opensearch/ingest/IngestService.java @@ -35,6 +35,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.util.UnicodeUtil; import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchParseException; import org.opensearch.ResourceNotFoundException; @@ -86,6 +87,7 @@ import java.util.HashSet; import java.util.Iterator; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -107,6 +109,7 @@ public class IngestService implements ClusterStateApplier, ReportingService ingestInfos, PutPipelineReq throw new IllegalStateException("Ingest info is empty"); } + int pipelineIdLength = UnicodeUtil.calcUTF16toUTF8Length(request.getId(), 0, request.getId().length()); + + if (pipelineIdLength > MAX_PIPELINE_ID_BYTES) { + throw new IllegalArgumentException( + String.format( + Locale.ROOT, + "Pipeline id [%s] exceeds maximum length of %d UTF-8 bytes (actual: %d bytes)", + request.getId(), + MAX_PIPELINE_ID_BYTES, + pipelineIdLength + ) + ); + } + Map pipelineConfig = XContentHelper.convertToMap(request.getSource(), false, request.getMediaType()).v2(); Pipeline pipeline = Pipeline.create(request.getId(), pipelineConfig, processorFactories, scriptService); diff --git a/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java b/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java index abd306269471d..5e4bdc00a3b0e 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java +++ b/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.UnicodeUtil; import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchParseException; import org.opensearch.ResourceNotFoundException; @@ -54,6 +55,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -73,6 +75,7 @@ public class SearchPipelineService implements ClusterStateApplier, ReportingServ public static final String SEARCH_PIPELINE_ORIGIN = "search_pipeline"; public static final String AD_HOC_PIPELINE_ID = "_ad_hoc_pipeline"; public static final String NOOP_PIPELINE_ID = "_none"; + private static final int MAX_PIPELINE_ID_BYTES = 512; private static final Logger logger = LogManager.getLogger(SearchPipelineService.class); private final ClusterService clusterService; private final ScriptService scriptService; @@ -278,6 +281,21 @@ void validatePipeline(Map searchPipelineInfos if (searchPipelineInfos.isEmpty()) { throw new IllegalStateException("Search pipeline info is empty"); } + + int pipelineIdLength = UnicodeUtil.calcUTF16toUTF8Length(request.getId(), 0, request.getId().length()); + + if (pipelineIdLength > MAX_PIPELINE_ID_BYTES) { + throw new IllegalArgumentException( + String.format( + Locale.ROOT, + "Search Pipeline id [%s] exceeds maximum length of %d UTF-8 bytes (actual: %d bytes)", + request.getId(), + MAX_PIPELINE_ID_BYTES, + pipelineIdLength + ) + ); + } + Map pipelineConfig = XContentHelper.convertToMap(request.getSource(), false, request.getMediaType()).v2(); Pipeline pipeline = PipelineWithMetrics.create( request.getId(), diff --git a/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java b/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java index 73ee77a06b806..6fd21ddd24121 100644 --- a/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java +++ b/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java @@ -89,6 +89,7 @@ import java.util.Comparator; import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -395,6 +396,39 @@ public void testValidateNoIngestInfo() throws Exception { ingestService.validatePipeline(Collections.singletonMap(discoveryNode, ingestInfo), putRequest); } + public void testValidatePipelineId_WithNotValidLength_ShouldThrowException() throws Exception { + IngestService ingestService = createWithProcessors(); + + String longId = "a".repeat(512) + "a"; + PutPipelineRequest putRequest = new PutPipelineRequest( + longId, + new BytesArray( + "{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\", \"tag\": \"tag1\"}}," + + "{\"remove\" : {\"field\": \"_field\", \"tag\": \"tag2\"}}]}" + ), + MediaTypeRegistry.JSON + ); + DiscoveryNode discoveryNode = new DiscoveryNode( + "_node_id", + buildNewFakeTransportAddress(), + emptyMap(), + emptySet(), + Version.CURRENT + ); + IngestInfo ingestInfo = new IngestInfo(Collections.singletonList(new ProcessorInfo("set"))); + + Exception e = expectThrows( + IllegalArgumentException.class, + () -> ingestService.validatePipeline(Collections.singletonMap(discoveryNode, ingestInfo), putRequest) + ); + String errorMessage = String.format( + Locale.ROOT, + "Pipeline id [%s] exceeds maximum length of 512 UTF-8 bytes (actual: 513 bytes)", + longId + ); + assertEquals(errorMessage, e.getMessage()); + } + public void testGetProcessorsInPipeline() throws Exception { IngestService ingestService = createWithProcessors(); String id = "_id"; diff --git a/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineServiceTests.java b/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineServiceTests.java index cb9ea7839b53c..b378f7b0e0a10 100644 --- a/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineServiceTests.java +++ b/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineServiceTests.java @@ -70,6 +70,7 @@ import java.util.Comparator; import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.concurrent.ExecutorService; import java.util.concurrent.atomic.AtomicReference; @@ -878,7 +879,6 @@ public void testValidatePipeline() throws Exception { ProcessorInfo reqProcessor = new ProcessorInfo("scale_request_size"); ProcessorInfo rspProcessor = new ProcessorInfo("fixed_score"); - ProcessorInfo injProcessor = new ProcessorInfo("max_score"); DiscoveryNode n1 = new DiscoveryNode("n1", buildNewFakeTransportAddress(), Version.CURRENT); DiscoveryNode n2 = new DiscoveryNode("n2", buildNewFakeTransportAddress(), Version.CURRENT); PutSearchPipelineRequest putRequest = new PutSearchPipelineRequest( @@ -893,6 +893,13 @@ public void testValidatePipeline() throws Exception { MediaTypeRegistry.JSON ); + String longId = "a".repeat(512) + "a"; + PutSearchPipelineRequest maxLengthIdPutRequest = new PutSearchPipelineRequest( + longId, + new BytesArray("{\"request_processors\" : [ { \"scale_request_size\": { \"scale\" : \"foo\" } } ] }"), + MediaTypeRegistry.JSON + ); + SearchPipelineInfo completePipelineInfo = new SearchPipelineInfo( Map.of(Pipeline.REQUEST_PROCESSORS_KEY, List.of(reqProcessor), Pipeline.RESPONSE_PROCESSORS_KEY, List.of(rspProcessor)) ); @@ -906,6 +913,18 @@ public void testValidatePipeline() throws Exception { // Discovery failed, no infos passed. expectThrows(IllegalStateException.class, () -> searchPipelineService.validatePipeline(Collections.emptyMap(), putRequest)); + // Max length of pipeline length + Exception e = expectThrows( + IllegalArgumentException.class, + () -> searchPipelineService.validatePipeline(Map.of(n1, completePipelineInfo), maxLengthIdPutRequest) + ); + String errorMessage = String.format( + Locale.ROOT, + "Search Pipeline id [%s] exceeds maximum length of 512 UTF-8 bytes (actual: 513 bytes)", + longId + ); + assertEquals(errorMessage, e.getMessage()); + // Invalid configuration in request PutSearchPipelineRequest badPutRequest = new PutSearchPipelineRequest( "p1", From 2d42bd0a9f881a26f67179125394249387c28237 Mon Sep 17 00:00:00 2001 From: Michael Froh Date: Fri, 4 Apr 2025 12:57:57 -0700 Subject: [PATCH 142/550] Check for gradle.xml before trying to modify it (#17799) Signed-off-by: Michael Froh --- gradle/ide.gradle | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/gradle/ide.gradle b/gradle/ide.gradle index c16205468d63d..aa86e47dcabde 100644 --- a/gradle/ide.gradle +++ b/gradle/ide.gradle @@ -114,10 +114,12 @@ if (System.getProperty('idea.active') == 'true') { */ void modifyXml(Object path, Action action) { File xmlFile = project.file(path) - Node xml = new XmlParser().parse(xmlFile) - action.execute(xml) + if (xmlFile.exists()) { + Node xml = new XmlParser().parse(xmlFile) + action.execute(xml) - xmlFile.withPrintWriter { writer -> - new XmlNodePrinter(writer).print(xml) + xmlFile.withPrintWriter { writer -> + new XmlNodePrinter(writer).print(xml) + } } } From 98568e893783f3eb852eb9492cbb13b8da87bdda Mon Sep 17 00:00:00 2001 From: Karen X Date: Fri, 4 Apr 2025 18:12:03 -0400 Subject: [PATCH 143/550] [GRPC] Add DocumentService and Bulk GRPC endpoint v1 (#17727) Signed-off-by: Karen Xu Signed-off-by: Karen X --- CHANGELOG.md | 1 + .../org/opensearch/OpenSearchException.java | 12 +- .../core/common/ParsingException.java | 2 +- plugins/transport-grpc/build.gradle | 5 +- .../licenses/protobufs-0.1.0.jar.sha1 | 1 + .../licenses/protobufs-LICENSE.txt | 475 ++++++++++++++++ .../licenses/protobufs-NOTICE.txt | 475 ++++++++++++++++ .../transport/grpc/GrpcTransportIT.java | 59 ++ .../plugin/transport/grpc/GrpcPlugin.java | 164 ++++++ .../grpc/Netty4GrpcServerTransport.java | 14 +- .../listeners/BulkRequestActionListener.java | 67 +++ .../grpc/listeners/package-info.java | 13 + .../transport/grpc/package-info.java | 2 +- .../transport/grpc/proto/package-info.java | 13 + .../common/FetchSourceContextProtoUtils.java | 109 ++++ .../request/common/ObjectMapProtoUtils.java | 85 +++ .../request/common/ScriptProtoUtils.java | 119 ++++ .../proto/request/common/package-info.java | 13 + .../bulk/ActiveShardCountProtoUtils.java | 65 +++ .../bulk/BulkRequestParserProtoUtils.java | 514 ++++++++++++++++++ .../document/bulk/BulkRequestProtoUtils.java | 105 ++++ .../request/document/bulk/package-info.java | 13 + .../response/common/ObjectMapProtoUtils.java | 83 +++ .../common/OpenSearchExceptionProtoUtils.java | 264 +++++++++ .../response/common/StructProtoUtils.java | 72 +++ .../proto/response/common/package-info.java | 13 + .../bulk/BulkItemResponseProtoUtils.java | 106 ++++ .../document/bulk/BulkResponseProtoUtils.java | 65 +++ .../response/document/bulk/package-info.java | 13 + .../common/DocWriteResponseProtoUtils.java | 73 +++ .../common/DocumentFieldProtoUtils.java | 51 ++ .../document/common/ShardInfoProtoUtils.java | 68 +++ .../document/common/package-info.java | 13 + .../document/get/GetResultProtoUtils.java | 106 ++++ .../response/document/get/package-info.java | 13 + .../CircuitBreakingExceptionProtoUtils.java | 45 ++ .../FailedNodeExceptionProtoUtils.java | 43 ++ .../ParsingExceptionProtoUtils.java | 48 ++ ...ponseLimitBreachedExceptionProtoUtils.java | 45 ++ .../exceptions/ScriptExceptionProtoUtils.java | 75 +++ .../SearchParseExceptionProtoUtils.java | 44 ++ ...archPhaseExecutionExceptionProtoUtils.java | 54 ++ ...ardOperationFailedExceptionProtoUtils.java | 38 ++ .../TooManyBucketsExceptionProtoUtils.java | 43 ++ .../response/exceptions/package-info.java | 17 + .../grpc/services/DocumentServiceImpl.java | 53 ++ .../transport/grpc/services/package-info.java | 13 + .../opensearch/transport/grpc/GrpcPlugin.java | 69 --- .../transport/grpc/GrpcPluginTests.java | 119 ++++ .../grpc/Netty4GrpcServerTransportTests.java | 172 ++++++ .../BulkRequestActionListenerTests.java | 84 +++ .../FetchSourceContextProtoUtilsTests.java | 180 ++++++ .../common/ObjectMapProtoUtilsTests.java | 187 +++++++ .../request/common/ScriptProtoUtilsTests.java | 273 ++++++++++ .../bulk/ActiveShardCountProtoUtilsTests.java | 122 +++++ .../BulkRequestParserProtoUtilsTests.java | 340 ++++++++++++ .../bulk/BulkRequestProtoUtilsTests.java | 117 ++++ .../response/BulkResponseProtoUtilsTests.java | 101 ++++ .../common/ObjectMapProtoUtilsTests.java | 267 +++++++++ .../OpenSearchExceptionProtoUtilsTests.java | 436 +++++++++++++++ .../common/StructProtoUtilsTests.java | 177 ++++++ .../bulk/BulkItemResponseProtoUtilsTests.java | 220 ++++++++ .../DocWriteResponseProtoUtilsTests.java | 114 ++++ .../common/DocumentFieldProtoUtilsTests.java | 52 ++ .../common/ShardInfoProtoUtilsTests.java | 92 ++++ .../get/GetResultProtoUtilsTests.java | 172 ++++++ ...rcuitBreakingExceptionProtoUtilsTests.java | 44 ++ .../FailedNodeExceptionProtoUtilsTests.java | 34 ++ .../ParsingExceptionProtoUtilsTests.java | 53 ++ ...LimitBreachedExceptionProtoUtilsTests.java | 44 ++ .../ScriptExceptionProtoUtilsTests.java | 103 ++++ .../SearchParseExceptionProtoUtilsTests.java | 67 +++ ...haseExecutionExceptionProtoUtilsTests.java | 57 ++ ...erationFailedExceptionProtoUtilsTests.java | 67 +++ ...ooManyBucketsExceptionProtoUtilsTests.java | 37 ++ .../services/BulkRequestProtoUtilsTests.java | 157 ++++++ .../document/DocumentServiceImplTests.java | 81 +++ .../grpc/Netty4GrpcServerTransportTests.java | 49 -- 78 files changed, 7746 insertions(+), 125 deletions(-) create mode 100644 plugins/transport-grpc/licenses/protobufs-0.1.0.jar.sha1 create mode 100644 plugins/transport-grpc/licenses/protobufs-LICENSE.txt create mode 100644 plugins/transport-grpc/licenses/protobufs-NOTICE.txt create mode 100644 plugins/transport-grpc/src/internalClusterTest/java/org/opensearch/plugin/transport/grpc/GrpcTransportIT.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/GrpcPlugin.java rename plugins/transport-grpc/src/main/java/org/opensearch/{ => plugin}/transport/grpc/Netty4GrpcServerTransport.java (96%) create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/listeners/BulkRequestActionListener.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/listeners/package-info.java rename plugins/transport-grpc/src/main/java/org/opensearch/{ => plugin}/transport/grpc/package-info.java (87%) create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/package-info.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/common/FetchSourceContextProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/common/ObjectMapProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/common/ScriptProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/common/package-info.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/ActiveShardCountProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/BulkRequestParserProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/BulkRequestProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/package-info.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/common/ObjectMapProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/common/OpenSearchExceptionProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/common/StructProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/common/package-info.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/bulk/BulkItemResponseProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/bulk/BulkResponseProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/bulk/package-info.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/DocWriteResponseProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/DocumentFieldProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/ShardInfoProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/package-info.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/get/GetResultProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/get/package-info.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/CircuitBreakingExceptionProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/FailedNodeExceptionProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/ParsingExceptionProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/ResponseLimitBreachedExceptionProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/ScriptExceptionProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/SearchParseExceptionProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/SearchPhaseExecutionExceptionProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/ShardOperationFailedExceptionProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/TooManyBucketsExceptionProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/package-info.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/services/DocumentServiceImpl.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/services/package-info.java delete mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/GrpcPlugin.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/GrpcPluginTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/Netty4GrpcServerTransportTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/listeners/BulkRequestActionListenerTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/common/FetchSourceContextProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/common/ObjectMapProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/common/ScriptProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/ActiveShardCountProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/BulkRequestParserProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/BulkRequestProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/BulkResponseProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/common/ObjectMapProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/common/OpenSearchExceptionProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/common/StructProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/document/bulk/BulkItemResponseProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/DocWriteResponseProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/DocumentFieldProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/ShardInfoProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/document/get/GetResultProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/CircuitBreakingExceptionProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/FailedNodeExceptionProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/ParsingExceptionProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/ResponseLimitBreachedExceptionProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/ScriptExceptionProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/SearchParseExceptionProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/SearchPhaseExecutionExceptionProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/ShardOperationFailedExceptionProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/TooManyBucketsExceptionProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/services/BulkRequestProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/services/document/DocumentServiceImplTests.java delete mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/transport/grpc/Netty4GrpcServerTransportTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 5f58cde750761..976017cae8da0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Security Manager Replacement] Enhance Java Agent to intercept Runtime::halt ([#17757](https://github.com/opensearch-project/OpenSearch/pull/17757)) - Support AutoExpand for SearchReplica ([#17741](https://github.com/opensearch-project/OpenSearch/pull/17741)) - Implement fixed interval refresh task scheduling ([#17777](https://github.com/opensearch-project/OpenSearch/pull/17777)) +- Add GRPC DocumentService and Bulk endpoint ([#17727](https://github.com/opensearch-project/OpenSearch/pull/17727)) ### Changed - Migrate BC libs to their FIPS counterparts ([#14912](https://github.com/opensearch-project/OpenSearch/pull/14912)) diff --git a/libs/core/src/main/java/org/opensearch/OpenSearchException.java b/libs/core/src/main/java/org/opensearch/OpenSearchException.java index dda3983fbb4d1..8f1f5c929d865 100644 --- a/libs/core/src/main/java/org/opensearch/OpenSearchException.java +++ b/libs/core/src/main/java/org/opensearch/OpenSearchException.java @@ -96,7 +96,7 @@ public class OpenSearchException extends RuntimeException implements Writeable, private static final String INDEX_METADATA_KEY = "opensearch.index"; private static final String INDEX_METADATA_KEY_UUID = "opensearch.index_uuid"; private static final String SHARD_METADATA_KEY = "opensearch.shard"; - private static final String OPENSEARCH_PREFIX_KEY = "opensearch."; + public static final String OPENSEARCH_PREFIX_KEY = "opensearch."; private static final String TYPE = "type"; private static final String REASON = "reason"; @@ -248,7 +248,10 @@ public List getMetadata(String key) { return metadata.get(key); } - protected Map> getMetadata() { + /** + * Returns the map of metadata keys and values. + */ + public Map> getMetadata() { return metadata; } @@ -288,7 +291,10 @@ public List getHeader(String key) { return headers.get(key); } - protected Map> getHeaders() { + /** + * Returns the map of header keys and values. + */ + public Map> getHeaders() { return headers; } diff --git a/libs/core/src/main/java/org/opensearch/core/common/ParsingException.java b/libs/core/src/main/java/org/opensearch/core/common/ParsingException.java index b6dc7dc928b3e..732a5bf9f3ff0 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/ParsingException.java +++ b/libs/core/src/main/java/org/opensearch/core/common/ParsingException.java @@ -51,7 +51,7 @@ */ public class ParsingException extends OpenSearchException { - protected static final int UNKNOWN_POSITION = -1; + public static final int UNKNOWN_POSITION = -1; private final int lineNumber; private final int columnNumber; diff --git a/plugins/transport-grpc/build.gradle b/plugins/transport-grpc/build.gradle index 5c6bc8efe1098..2db49c237b75f 100644 --- a/plugins/transport-grpc/build.gradle +++ b/plugins/transport-grpc/build.gradle @@ -8,9 +8,11 @@ import org.gradle.api.attributes.java.TargetJvmEnvironment * compatible open source license. */ +apply plugin: 'opensearch.internal-cluster-test' + opensearchplugin { description = 'gRPC based transport implementation' - classname = 'org.opensearch.transport.grpc.GrpcPlugin' + classname = 'org.opensearch.plugin.transport.grpc.GrpcPlugin' } dependencies { @@ -27,6 +29,7 @@ dependencies { implementation "io.grpc:grpc-stub:${versions.grpc}" implementation "io.grpc:grpc-util:${versions.grpc}" implementation "io.perfmark:perfmark-api:0.26.0" + implementation "org.opensearch:protobufs:0.1.0" } tasks.named("dependencyLicenses").configure { diff --git a/plugins/transport-grpc/licenses/protobufs-0.1.0.jar.sha1 b/plugins/transport-grpc/licenses/protobufs-0.1.0.jar.sha1 new file mode 100644 index 0000000000000..8eec0266319df --- /dev/null +++ b/plugins/transport-grpc/licenses/protobufs-0.1.0.jar.sha1 @@ -0,0 +1 @@ +48ba4377d529cf71ec7d8716e155b928118325d5 \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/protobufs-LICENSE.txt b/plugins/transport-grpc/licenses/protobufs-LICENSE.txt new file mode 100644 index 0000000000000..44cbce8f123a1 --- /dev/null +++ b/plugins/transport-grpc/licenses/protobufs-LICENSE.txt @@ -0,0 +1,475 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +Some code in core/src/java/org/apache/lucene/util/UnicodeUtil.java was +derived from unicode conversion examples available at +http://www.unicode.org/Public/PROGRAMS/CVTUTF. Here is the copyright +from those sources: + +/* + * Copyright 2001-2004 Unicode, Inc. + * + * Disclaimer + * + * This source code is provided as is by Unicode, Inc. No claims are + * made as to fitness for any particular purpose. No warranties of any + * kind are expressed or implied. The recipient agrees to determine + * applicability of information provided. If this file has been + * purchased on magnetic or optical media from Unicode, Inc., the + * sole remedy for any claim will be exchange of defective media + * within 90 days of receipt. + * + * Limitations on Rights to Redistribute This Code + * + * Unicode, Inc. hereby grants the right to freely use the information + * supplied in this file in the creation of products supporting the + * Unicode Standard, and to make copies of this file in any form + * for internal or external distribution as long as this notice + * remains attached. + */ + + +Some code in core/src/java/org/apache/lucene/util/ArrayUtil.java was +derived from Python 2.4.2 sources available at +http://www.python.org. Full license is here: + + http://www.python.org/download/releases/2.4.2/license/ + +Some code in core/src/java/org/apache/lucene/util/UnicodeUtil.java was +derived from Python 3.1.2 sources available at +http://www.python.org. Full license is here: + + http://www.python.org/download/releases/3.1.2/license/ + +Some code in core/src/java/org/apache/lucene/util/automaton was +derived from Brics automaton sources available at +www.brics.dk/automaton/. Here is the copyright from those sources: + +/* + * Copyright (c) 2001-2009 Anders Moeller + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +The levenshtein automata tables in core/src/java/org/apache/lucene/util/automaton +were automatically generated with the moman/finenight FSA package. +Here is the copyright for those sources: + +# Copyright (c) 2010, Jean-Philippe Barrette-LaPierre, +# +# Permission is hereby granted, free of charge, to any person +# obtaining a copy of this software and associated documentation +# files (the "Software"), to deal in the Software without +# restriction, including without limitation the rights to use, +# copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following +# conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. + +Some code in core/src/java/org/apache/lucene/util/UnicodeUtil.java was +derived from ICU (http://www.icu-project.org) +The full license is available here: + http://source.icu-project.org/repos/icu/icu/trunk/license.html + +/* + * Copyright (C) 1999-2010, International Business Machines + * Corporation and others. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, and/or sell copies of the + * Software, and to permit persons to whom the Software is furnished to do so, + * provided that the above copyright notice(s) and this permission notice appear + * in all copies of the Software and that both the above copyright notice(s) and + * this permission notice appear in supporting documentation. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + * IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE BE + * LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR + * ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER + * IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * Except as contained in this notice, the name of a copyright holder shall not + * be used in advertising or otherwise to promote the sale, use or other + * dealings in this Software without prior written authorization of the + * copyright holder. + */ + +The following license applies to the Snowball stemmers: + +Copyright (c) 2001, Dr Martin Porter +Copyright (c) 2002, Richard Boulton +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * Neither the name of the copyright holders nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +The following license applies to the KStemmer: + +Copyright © 2003, +Center for Intelligent Information Retrieval, +University of Massachusetts, Amherst. +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +3. The names "Center for Intelligent Information Retrieval" and +"University of Massachusetts" must not be used to endorse or promote products +derived from this software without prior written permission. To obtain +permission, contact info@ciir.cs.umass.edu. + +THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF MASSACHUSETTS AND OTHER CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +The following license applies to the Morfologik project: + +Copyright (c) 2006 Dawid Weiss +Copyright (c) 2007-2011 Dawid Weiss, Marcin Miłkowski +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name of Morfologik nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +--- + +The dictionary comes from Morfologik project. Morfologik uses data from +Polish ispell/myspell dictionary hosted at http://www.sjp.pl/slownik/en/ and +is licenced on the terms of (inter alia) LGPL and Creative Commons +ShareAlike. The part-of-speech tags were added in Morfologik project and +are not found in the data from sjp.pl. The tagset is similar to IPI PAN +tagset. + +--- + +The following license applies to the Morfeusz project, +used by org.apache.lucene.analysis.morfologik. + +BSD-licensed dictionary of Polish (SGJP) +http://sgjp.pl/morfeusz/ + +Copyright © 2011 Zygmunt Saloni, Włodzimierz Gruszczyński, + Marcin Woliński, Robert Wołosz + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the + distribution. + +THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDERS “AS IS” AND ANY EXPRESS +OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/plugins/transport-grpc/licenses/protobufs-NOTICE.txt b/plugins/transport-grpc/licenses/protobufs-NOTICE.txt new file mode 100644 index 0000000000000..44cbce8f123a1 --- /dev/null +++ b/plugins/transport-grpc/licenses/protobufs-NOTICE.txt @@ -0,0 +1,475 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +Some code in core/src/java/org/apache/lucene/util/UnicodeUtil.java was +derived from unicode conversion examples available at +http://www.unicode.org/Public/PROGRAMS/CVTUTF. Here is the copyright +from those sources: + +/* + * Copyright 2001-2004 Unicode, Inc. + * + * Disclaimer + * + * This source code is provided as is by Unicode, Inc. No claims are + * made as to fitness for any particular purpose. No warranties of any + * kind are expressed or implied. The recipient agrees to determine + * applicability of information provided. If this file has been + * purchased on magnetic or optical media from Unicode, Inc., the + * sole remedy for any claim will be exchange of defective media + * within 90 days of receipt. + * + * Limitations on Rights to Redistribute This Code + * + * Unicode, Inc. hereby grants the right to freely use the information + * supplied in this file in the creation of products supporting the + * Unicode Standard, and to make copies of this file in any form + * for internal or external distribution as long as this notice + * remains attached. + */ + + +Some code in core/src/java/org/apache/lucene/util/ArrayUtil.java was +derived from Python 2.4.2 sources available at +http://www.python.org. Full license is here: + + http://www.python.org/download/releases/2.4.2/license/ + +Some code in core/src/java/org/apache/lucene/util/UnicodeUtil.java was +derived from Python 3.1.2 sources available at +http://www.python.org. Full license is here: + + http://www.python.org/download/releases/3.1.2/license/ + +Some code in core/src/java/org/apache/lucene/util/automaton was +derived from Brics automaton sources available at +www.brics.dk/automaton/. Here is the copyright from those sources: + +/* + * Copyright (c) 2001-2009 Anders Moeller + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +The levenshtein automata tables in core/src/java/org/apache/lucene/util/automaton +were automatically generated with the moman/finenight FSA package. +Here is the copyright for those sources: + +# Copyright (c) 2010, Jean-Philippe Barrette-LaPierre, +# +# Permission is hereby granted, free of charge, to any person +# obtaining a copy of this software and associated documentation +# files (the "Software"), to deal in the Software without +# restriction, including without limitation the rights to use, +# copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following +# conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. + +Some code in core/src/java/org/apache/lucene/util/UnicodeUtil.java was +derived from ICU (http://www.icu-project.org) +The full license is available here: + http://source.icu-project.org/repos/icu/icu/trunk/license.html + +/* + * Copyright (C) 1999-2010, International Business Machines + * Corporation and others. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, and/or sell copies of the + * Software, and to permit persons to whom the Software is furnished to do so, + * provided that the above copyright notice(s) and this permission notice appear + * in all copies of the Software and that both the above copyright notice(s) and + * this permission notice appear in supporting documentation. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + * IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE BE + * LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR + * ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER + * IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * Except as contained in this notice, the name of a copyright holder shall not + * be used in advertising or otherwise to promote the sale, use or other + * dealings in this Software without prior written authorization of the + * copyright holder. + */ + +The following license applies to the Snowball stemmers: + +Copyright (c) 2001, Dr Martin Porter +Copyright (c) 2002, Richard Boulton +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * Neither the name of the copyright holders nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +The following license applies to the KStemmer: + +Copyright © 2003, +Center for Intelligent Information Retrieval, +University of Massachusetts, Amherst. +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +3. The names "Center for Intelligent Information Retrieval" and +"University of Massachusetts" must not be used to endorse or promote products +derived from this software without prior written permission. To obtain +permission, contact info@ciir.cs.umass.edu. + +THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF MASSACHUSETTS AND OTHER CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +The following license applies to the Morfologik project: + +Copyright (c) 2006 Dawid Weiss +Copyright (c) 2007-2011 Dawid Weiss, Marcin Miłkowski +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name of Morfologik nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +--- + +The dictionary comes from Morfologik project. Morfologik uses data from +Polish ispell/myspell dictionary hosted at http://www.sjp.pl/slownik/en/ and +is licenced on the terms of (inter alia) LGPL and Creative Commons +ShareAlike. The part-of-speech tags were added in Morfologik project and +are not found in the data from sjp.pl. The tagset is similar to IPI PAN +tagset. + +--- + +The following license applies to the Morfeusz project, +used by org.apache.lucene.analysis.morfologik. + +BSD-licensed dictionary of Polish (SGJP) +http://sgjp.pl/morfeusz/ + +Copyright © 2011 Zygmunt Saloni, Włodzimierz Gruszczyński, + Marcin Woliński, Robert Wołosz + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the + distribution. + +THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDERS “AS IS” AND ANY EXPRESS +OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/plugins/transport-grpc/src/internalClusterTest/java/org/opensearch/plugin/transport/grpc/GrpcTransportIT.java b/plugins/transport-grpc/src/internalClusterTest/java/org/opensearch/plugin/transport/grpc/GrpcTransportIT.java new file mode 100644 index 0000000000000..a5e40c16b323e --- /dev/null +++ b/plugins/transport-grpc/src/internalClusterTest/java/org/opensearch/plugin/transport/grpc/GrpcTransportIT.java @@ -0,0 +1,59 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc; + +import org.opensearch.common.network.NetworkAddress; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.net.InetSocketAddress; +import java.util.Collection; +import java.util.Collections; + +import static org.opensearch.plugin.transport.grpc.Netty4GrpcServerTransport.GRPC_TRANSPORT_SETTING_KEY; +import static org.opensearch.plugin.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_PORT; +import static org.opensearch.plugins.NetworkPlugin.AuxTransport.AUX_TRANSPORT_TYPES_KEY; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 2) +public class GrpcTransportIT extends OpenSearchIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return Collections.singletonList(GrpcPlugin.class); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(SETTING_GRPC_PORT.getKey(), "0") + .put(AUX_TRANSPORT_TYPES_KEY, GRPC_TRANSPORT_SETTING_KEY) + .build(); + } + + public void testGrpcTransportStarted() { + // Verify that the gRPC transport is started on all nodes + for (String nodeName : internalCluster().getNodeNames()) { + Netty4GrpcServerTransport transport = internalCluster().getInstance(Netty4GrpcServerTransport.class, nodeName); + assertNotNull("gRPC transport should be started on node " + nodeName, transport); + + // Verify that the transport is bound to an address + TransportAddress[] boundAddresses = transport.boundAddress().boundAddresses(); + assertTrue("gRPC transport should be bound to at least one address", boundAddresses.length > 0); + + // Log the bound addresses for debugging + for (TransportAddress address : boundAddresses) { + InetSocketAddress inetAddress = address.address(); + logger.info("Node {} gRPC transport bound to {}", nodeName, NetworkAddress.format(inetAddress)); + } + } + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/GrpcPlugin.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/GrpcPlugin.java new file mode 100644 index 0000000000000..257b252ca23d1 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/GrpcPlugin.java @@ -0,0 +1,164 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc; + +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.indices.breaker.CircuitBreakerService; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.env.Environment; +import org.opensearch.env.NodeEnvironment; +import org.opensearch.plugin.transport.grpc.services.DocumentServiceImpl; +import org.opensearch.plugins.NetworkPlugin; +import org.opensearch.plugins.Plugin; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.script.ScriptService; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.client.Client; +import org.opensearch.watcher.ResourceWatcherService; + +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; + +import io.grpc.BindableService; + +import static org.opensearch.plugin.transport.grpc.Netty4GrpcServerTransport.GRPC_TRANSPORT_SETTING_KEY; +import static org.opensearch.plugin.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_BIND_HOST; +import static org.opensearch.plugin.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_HOST; +import static org.opensearch.plugin.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_PORT; +import static org.opensearch.plugin.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_PUBLISH_HOST; +import static org.opensearch.plugin.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_PUBLISH_PORT; +import static org.opensearch.plugin.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_WORKER_COUNT; + +/** + * Main class for the gRPC plugin. + */ +public final class GrpcPlugin extends Plugin implements NetworkPlugin { + + private Client client; + + /** + * Creates a new GrpcPlugin instance. + */ + public GrpcPlugin() {} + + /** + * Provides auxiliary transports for the plugin. + * Creates and returns a map of transport names to transport suppliers. + * + * @param settings The node settings + * @param threadPool The thread pool + * @param circuitBreakerService The circuit breaker service + * @param networkService The network service + * @param clusterSettings The cluster settings + * @param tracer The tracer + * @return A map of transport names to transport suppliers + */ + @Override + public Map> getAuxTransports( + Settings settings, + ThreadPool threadPool, + CircuitBreakerService circuitBreakerService, + NetworkService networkService, + ClusterSettings clusterSettings, + Tracer tracer + ) { + if (client == null) { + throw new RuntimeException("client cannot be null"); + } + List grpcServices = registerGRPCServices(new DocumentServiceImpl(client)); + return Collections.singletonMap( + GRPC_TRANSPORT_SETTING_KEY, + () -> new Netty4GrpcServerTransport(settings, grpcServices, networkService) + ); + } + + /** + * Registers gRPC services to be exposed by the transport. + * + * @param services The gRPC services to register + * @return A list of registered bindable services + */ + protected List registerGRPCServices(BindableService... services) { + return List.of(services); + } + + /** + * Returns the settings defined by this plugin. + * + * @return A list of settings + */ + @Override + public List> getSettings() { + return List.of( + SETTING_GRPC_PORT, + SETTING_GRPC_HOST, + SETTING_GRPC_PUBLISH_HOST, + SETTING_GRPC_BIND_HOST, + SETTING_GRPC_WORKER_COUNT, + SETTING_GRPC_PUBLISH_PORT + ); + } + + /** + * Creates components used by the plugin. + * Stores the client for later use in creating gRPC services. + * + * @param client The client + * @param clusterService The cluster service + * @param threadPool The thread pool + * @param resourceWatcherService The resource watcher service + * @param scriptService The script service + * @param xContentRegistry The named content registry + * @param environment The environment + * @param nodeEnvironment The node environment + * @param namedWriteableRegistry The named writeable registry + * @param indexNameExpressionResolver The index name expression resolver + * @param repositoriesServiceSupplier The repositories service supplier + * @return A collection of components + */ + @Override + public Collection createComponents( + Client client, + ClusterService clusterService, + ThreadPool threadPool, + ResourceWatcherService resourceWatcherService, + ScriptService scriptService, + NamedXContentRegistry xContentRegistry, + Environment environment, + NodeEnvironment nodeEnvironment, + NamedWriteableRegistry namedWriteableRegistry, + IndexNameExpressionResolver indexNameExpressionResolver, + Supplier repositoriesServiceSupplier + ) { + this.client = client; + + return super.createComponents( + client, + clusterService, + threadPool, + resourceWatcherService, + scriptService, + xContentRegistry, + environment, + nodeEnvironment, + namedWriteableRegistry, + indexNameExpressionResolver, + repositoriesServiceSupplier + ); + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/Netty4GrpcServerTransport.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/Netty4GrpcServerTransport.java similarity index 96% rename from plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/Netty4GrpcServerTransport.java rename to plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/Netty4GrpcServerTransport.java index 1fb6a0bca03ea..622834401970e 100644 --- a/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/Netty4GrpcServerTransport.java +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/Netty4GrpcServerTransport.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.transport.grpc; +package org.opensearch.plugin.transport.grpc; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -156,6 +156,10 @@ BoundTransportAddress boundAddress() { return this.boundAddress; } + /** + * Starts the gRPC server transport. + * Initializes the event loop group and binds the server to the configured addresses. + */ @Override protected void doStart() { boolean success = false; @@ -171,6 +175,10 @@ protected void doStart() { } } + /** + * Stops the gRPC server transport. + * Shuts down all running servers and the event loop group. + */ @Override protected void doStop() { for (Server server : servers) { @@ -196,6 +204,10 @@ protected void doStop() { } } + /** + * Closes the gRPC server transport. + * Performs any necessary cleanup after stopping the transport. + */ @Override protected void doClose() { diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/listeners/BulkRequestActionListener.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/listeners/BulkRequestActionListener.java new file mode 100644 index 0000000000000..589e15d2db489 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/listeners/BulkRequestActionListener.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.listeners; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.bulk.BulkResponse; +import org.opensearch.core.action.ActionListener; +import org.opensearch.plugin.transport.grpc.proto.response.document.bulk.BulkResponseProtoUtils; + +import java.io.IOException; + +import io.grpc.stub.StreamObserver; + +/** + * Listener for bulk request execution completion, handling successful and failure scenarios. + */ +public class BulkRequestActionListener implements ActionListener { + private static final Logger logger = LogManager.getLogger(BulkRequestActionListener.class); + private StreamObserver responseObserver; + + /** + * Creates a new BulkRequestActionListener. + * + * @param responseObserver The gRPC stream observer to send the response back to the client + */ + public BulkRequestActionListener(StreamObserver responseObserver) { + super(); + this.responseObserver = responseObserver; + } + + /** + * Handles successful bulk request execution. + * Converts the OpenSearch internal response to protobuf format and sends it to the client. + * + * @param response The bulk response from OpenSearch + */ + @Override + public void onResponse(org.opensearch.action.bulk.BulkResponse response) { + // Bulk execution succeeded. Convert the opensearch internal response to protobuf + try { + org.opensearch.protobufs.BulkResponse protoResponse = BulkResponseProtoUtils.toProto(response); + responseObserver.onNext(protoResponse); + responseObserver.onCompleted(); + } catch (RuntimeException | IOException e) { + responseObserver.onError(e); + } + } + + /** + * Handles bulk request execution failures. + * Converts the exception to an appropriate gRPC error and sends it to the client. + * + * @param e The exception that occurred during execution + */ + @Override + public void onFailure(Exception e) { + logger.error("BulkRequestActionListener failed to process bulk request:" + e.getMessage()); + responseObserver.onError(e); + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/listeners/package-info.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/listeners/package-info.java new file mode 100644 index 0000000000000..f10871a20236a --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/listeners/package-info.java @@ -0,0 +1,13 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Action listeners for the gRPC transport plugin. + * This package contains listeners that handle responses from OpenSearch actions and convert them to gRPC responses. + */ +package org.opensearch.plugin.transport.grpc.listeners; diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/package-info.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/package-info.java similarity index 87% rename from plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/package-info.java rename to plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/package-info.java index 4a5d9d02b5b91..c847a49481218 100644 --- a/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/package-info.java +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/package-info.java @@ -10,4 +10,4 @@ * gRPC transport implementation for OpenSearch. * Provides network communication using the gRPC protocol. */ -package org.opensearch.transport.grpc; +package org.opensearch.plugin.transport.grpc; diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/package-info.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/package-info.java new file mode 100644 index 0000000000000..0a1cff67d46cb --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/package-info.java @@ -0,0 +1,13 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains Protocol Buffer conversion methods for OpenSearch objects. + * These methods aim to centralize all Protocol Buffer conversion logic in the transport-grpc module. + */ +package org.opensearch.plugin.transport.grpc.proto; diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/common/FetchSourceContextProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/common/FetchSourceContextProtoUtils.java new file mode 100644 index 0000000000000..d24e62ed09d99 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/common/FetchSourceContextProtoUtils.java @@ -0,0 +1,109 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.common; + +import org.opensearch.core.common.Strings; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.protobufs.SourceConfig; +import org.opensearch.protobufs.SourceConfigParam; +import org.opensearch.protobufs.SourceFilter; +import org.opensearch.rest.RestRequest; +import org.opensearch.search.fetch.subphase.FetchSourceContext; + +import java.util.ArrayList; +import java.util.List; + +/** + * Utility class for converting SourceConfig Protocol Buffers to FetchSourceContext objects. + * This class handles the conversion of Protocol Buffer representations to their + * corresponding OpenSearch objects. + */ +public class FetchSourceContextProtoUtils { + + private FetchSourceContextProtoUtils() { + // Utility class, no instances + } + + /** + * Converts a SourceConfig Protocol Buffer to a FetchSourceContext object. + * Similar to {@link FetchSourceContext#parseFromRestRequest(RestRequest)} + * + * @param request + * @return + */ + public static FetchSourceContext parseFromProtoRequest(org.opensearch.protobufs.BulkRequest request) { + Boolean fetchSource = true; + String[] sourceExcludes = null; + String[] sourceIncludes = null; + + // Set up source context if source parameters are provided + if (request.hasSource()) { + switch (request.getSource().getSourceConfigParamCase()) { + case SourceConfigParam.SourceConfigParamCase.BOOL_VALUE: + fetchSource = request.getSource().getBoolValue(); + break; + case SourceConfigParam.SourceConfigParamCase.STRING_ARRAY: + sourceIncludes = request.getSource().getStringArray().getStringArrayList().toArray(new String[0]); + break; + default: + throw new UnsupportedOperationException("Invalid sourceConfig provided."); + } + } + + if (request.getSourceIncludesList().size() > 0) { + sourceIncludes = request.getSourceIncludesList().toArray(new String[0]); + } + + if (request.getSourceExcludesList().size() > 0) { + sourceExcludes = request.getSourceExcludesList().toArray(new String[0]); + } + if (fetchSource != null || sourceIncludes != null || sourceExcludes != null) { + return new FetchSourceContext(fetchSource == null ? true : fetchSource, sourceIncludes, sourceExcludes); + } + return null; + } + + /** + * Converts a SourceConfig Protocol Buffer to a FetchSourceContext object. + * Similar to {@link FetchSourceContext#fromXContent(XContentParser)}. + * + * @param sourceConfig The SourceConfig Protocol Buffer to convert + * @return A FetchSourceContext object + */ + public static FetchSourceContext fromProto(SourceConfig sourceConfig) { + boolean fetchSource = true; + String[] includes = Strings.EMPTY_ARRAY; + String[] excludes = Strings.EMPTY_ARRAY; + if (sourceConfig.getSourceConfigCase() == SourceConfig.SourceConfigCase.FETCH) { + fetchSource = sourceConfig.getFetch(); + } else if (sourceConfig.hasIncludes()) { + ArrayList list = new ArrayList<>(); + for (String string : sourceConfig.getIncludes().getStringArrayList()) { + list.add(string); + } + includes = list.toArray(new String[0]); + } else if (sourceConfig.hasFilter()) { + SourceFilter sourceFilter = sourceConfig.getFilter(); + if (!sourceFilter.getIncludesList().isEmpty()) { + List includesList = new ArrayList<>(); + for (String s : sourceFilter.getIncludesList()) { + includesList.add(s); + } + includes = includesList.toArray(new String[0]); + } else if (!sourceFilter.getExcludesList().isEmpty()) { + List excludesList = new ArrayList<>(); + for (String s : sourceFilter.getExcludesList()) { + excludesList.add(s); + } + excludes = excludesList.toArray(new String[0]); + } + } + return new FetchSourceContext(fetchSource, includes, excludes); + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/common/ObjectMapProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/common/ObjectMapProtoUtils.java new file mode 100644 index 0000000000000..4fd8288fd8d63 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/common/ObjectMapProtoUtils.java @@ -0,0 +1,85 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.request.common; + +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.protobufs.ObjectMap; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Utility class for converting ObjectMap Protobuf type to a Java object. + */ +public class ObjectMapProtoUtils { + + private ObjectMapProtoUtils() { + // Utility class, no instances + } + + /** + * Converts a ObjectMap to Java POJO representation. + * Similar to {@link XContentParser#map()} + * + * @param objectMap The generic protobuf objectMap to convert + * @return A Protobuf builder .google.protobuf.Struct representation + */ + public static Map fromProto(ObjectMap objectMap) { + + Map map = new HashMap<>(); + for (Map.Entry entry : objectMap.getFieldsMap().entrySet()) { + map.put(entry.getKey(), fromProto(entry.getValue())); + // TODO how to keep the type of the map values, instead of having them all as generic 'Object' types'? + } + + return map; + } + + /** + * Converts a ObjectMap.Value to Java POJO representation. + * Similar to {@link XContentParser#map()} + * + * @param value The generic protobuf ObjectMap.Value to convert + * @return A Protobuf builder .google.protobuf.Struct representation + */ + private static Object fromProto(ObjectMap.Value value) { + if (value.hasNullValue()) { + // Null + throw new UnsupportedOperationException("Cannot add null value in ObjectMap.value " + value.toString() + " to a Java map."); + } else if (value.hasDouble()) { + // Numbers + return value.getDouble(); + } else if (value.hasFloat()) { + return value.getFloat(); + } else if (value.hasInt32()) { + return value.getInt32(); + } else if (value.hasInt64()) { + return value.getInt64(); + } else if (value.hasString()) { + // String + return value.getString(); + } else if (value.hasBool()) { + // Boolean + return value.getBool(); + } else if (value.hasListValue()) { + // List + List list = new ArrayList<>(); + for (ObjectMap.Value listEntry : value.getListValue().getValueList()) { + list.add(fromProto(listEntry)); + } + return list; + } else if (value.hasObjectMap()) { + // Map + return fromProto(value.getObjectMap()); + } else { + throw new IllegalArgumentException("Cannot convert " + value.toString() + " to protobuf Object.Value"); + } + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/common/ScriptProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/common/ScriptProtoUtils.java new file mode 100644 index 0000000000000..408b548fa6fbf --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/common/ScriptProtoUtils.java @@ -0,0 +1,119 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.common; + +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.protobufs.InlineScript; +import org.opensearch.protobufs.ScriptLanguage; +import org.opensearch.protobufs.StoredScriptId; +import org.opensearch.script.Script; +import org.opensearch.script.ScriptType; + +import java.util.Collections; +import java.util.Map; +import java.util.Objects; + +import static org.opensearch.script.Script.CONTENT_TYPE_OPTION; +import static org.opensearch.script.Script.DEFAULT_SCRIPT_LANG; + +/** + * Utility class for converting SourceConfig Protocol Buffers to FetchSourceContext objects. + * This class handles the conversion of Protocol Buffer representations to their + * corresponding OpenSearch objects. + */ +public class ScriptProtoUtils { + + private ScriptProtoUtils() { + // Utility class, no instances + } + + /** + * + * Convenience method to call {@link ScriptProtoUtils#parseFromProtoRequest(org.opensearch.protobufs.Script, String)} + * Similar to {@link Script#parse(XContentParser)} + * + * @param script + * @return + */ + public static Script parseFromProtoRequest(org.opensearch.protobufs.Script script) { + return parseFromProtoRequest(script, DEFAULT_SCRIPT_LANG); + } + + /** + * Converts a Script Protocol Buffer to a Script object. + * Similar to {@link Script#parse(XContentParser, String)}, which internally calls Script#build(). + */ + private static Script parseFromProtoRequest(org.opensearch.protobufs.Script script, String defaultLang) { + Objects.requireNonNull(defaultLang); + + if (script.hasInlineScript()) { + return parseInlineScript(script.getInlineScript(), defaultLang); + } else if (script.hasStoredScriptId()) { + return parseStoredScriptId(script.getStoredScriptId()); + } else { + throw new UnsupportedOperationException("No valid script type detected"); + } + } + + /** + * Parses a protobuf InlineScript to a Script object + */ + private static Script parseInlineScript(InlineScript inlineScript, String defaultLang) { + + ScriptType type = ScriptType.INLINE; + + String lang = parseScriptLanguage(inlineScript.getLang(), defaultLang); + String idOrCode = inlineScript.getSource(); + + Map options = inlineScript.getOptionsMap(); + if (options.size() > 1 || options.size() == 1 && options.get(CONTENT_TYPE_OPTION) == null) { + throw new IllegalArgumentException("illegal compiler options [" + options + "] specified"); + } + + Map params = inlineScript.hasParams() + ? ObjectMapProtoUtils.fromProto(inlineScript.getParams()) + : Collections.emptyMap(); + + return new Script(type, lang, idOrCode, options, params); + } + + /** + * Parses a protobuf StoredScriptId to a Script object + */ + private static Script parseStoredScriptId(StoredScriptId storedScriptId) { + ScriptType type = ScriptType.STORED; + String lang = null; + String idOrCode = storedScriptId.getId(); + Map options = null; + Map params = storedScriptId.hasParams() + ? ObjectMapProtoUtils.fromProto(storedScriptId.getParams()) + : Collections.emptyMap(); + + return new Script(type, lang, idOrCode, options, params); + } + + private static String parseScriptLanguage(ScriptLanguage language, String defaultLang) { + if (language.hasStringValue()) { + return language.getStringValue(); + } + switch (language.getBuiltinScriptLanguage()) { + case BUILTIN_SCRIPT_LANGUAGE_EXPRESSION: + return "expression"; + case BUILTIN_SCRIPT_LANGUAGE_JAVA: + return "java"; + case BUILTIN_SCRIPT_LANGUAGE_MUSTACHE: + return "mustache"; + case BUILTIN_SCRIPT_LANGUAGE_PAINLESS: + return "painless"; + case BUILTIN_SCRIPT_LANGUAGE_UNSPECIFIED: + default: + throw new UnsupportedOperationException("no language was specified"); + } + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/common/package-info.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/common/package-info.java new file mode 100644 index 0000000000000..511fc3851c2f1 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/common/package-info.java @@ -0,0 +1,13 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Common utility classes for request handling in the gRPC transport plugin. + * This package contains utilities for converting Protocol Buffer common request elements to OpenSearch internal requests. + */ +package org.opensearch.plugin.transport.grpc.proto.request.common; diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/ActiveShardCountProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/ActiveShardCountProtoUtils.java new file mode 100644 index 0000000000000..56bc53ccbc422 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/ActiveShardCountProtoUtils.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.document.bulk; + +import org.opensearch.action.support.ActiveShardCount; +import org.opensearch.protobufs.BulkRequest; +import org.opensearch.protobufs.WaitForActiveShards; + +/** + * Handler for bulk requests in gRPC. + */ +public class ActiveShardCountProtoUtils { + // protected final Settings settings; + + /** + * Private constructor to prevent instantiation of utility class. + */ + protected ActiveShardCountProtoUtils() { + // Utility class, no instances + } + + /** + * Sets the active shard count on the bulk request based on the protobuf request. + * Similar to {@link ActiveShardCount#parseString(String)} + * + * @param bulkRequest The bulk request to modify + * @param request The protobuf request containing the active shard count + * @return The modified bulk request + */ + public static org.opensearch.action.bulk.BulkRequest getActiveShardCount( + org.opensearch.action.bulk.BulkRequest bulkRequest, + BulkRequest request + ) { + if (!request.hasWaitForActiveShards()) { + return bulkRequest; + } + WaitForActiveShards waitForActiveShards = request.getWaitForActiveShards(); + switch (waitForActiveShards.getWaitForActiveShardsCase()) { + case WaitForActiveShards.WaitForActiveShardsCase.WAIT_FOR_ACTIVE_SHARD_OPTIONS: + switch (waitForActiveShards.getWaitForActiveShardOptions()) { + case WAIT_FOR_ACTIVE_SHARD_OPTIONS_UNSPECIFIED: + throw new UnsupportedOperationException("No mapping for WAIT_FOR_ACTIVE_SHARD_OPTIONS_UNSPECIFIED"); + case WAIT_FOR_ACTIVE_SHARD_OPTIONS_ALL: + bulkRequest.waitForActiveShards(ActiveShardCount.ALL); + break; + default: + bulkRequest.waitForActiveShards(ActiveShardCount.DEFAULT); + break; + } + break; + case WaitForActiveShards.WaitForActiveShardsCase.INT32_VALUE: + bulkRequest.waitForActiveShards(waitForActiveShards.getInt32Value()); + break; + default: + throw new UnsupportedOperationException("No mapping for WAIT_FOR_ACTIVE_SHARD_OPTIONS_UNSPECIFIED"); + } + return bulkRequest; + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/BulkRequestParserProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/BulkRequestParserProtoUtils.java new file mode 100644 index 0000000000000..b78d7d305b868 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/BulkRequestParserProtoUtils.java @@ -0,0 +1,514 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.document.bulk; + +import org.opensearch.action.DocWriteRequest; +import org.opensearch.action.bulk.BulkRequestParser; +import org.opensearch.action.delete.DeleteRequest; +import org.opensearch.action.index.IndexRequest; +import org.opensearch.action.update.UpdateRequest; +import org.opensearch.common.lucene.uid.Versions; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.index.VersionType; +import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.plugin.transport.grpc.proto.request.common.FetchSourceContextProtoUtils; +import org.opensearch.plugin.transport.grpc.proto.request.common.ScriptProtoUtils; +import org.opensearch.protobufs.BulkRequest; +import org.opensearch.protobufs.BulkRequestBody; +import org.opensearch.protobufs.CreateOperation; +import org.opensearch.protobufs.DeleteOperation; +import org.opensearch.protobufs.IndexOperation; +import org.opensearch.protobufs.UpdateOperation; +import org.opensearch.script.Script; +import org.opensearch.search.fetch.subphase.FetchSourceContext; + +import java.util.List; +import java.util.Objects; +import java.util.function.Consumer; + +import static org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; + +/** + * Parses bulk requests. + * + */ +public class BulkRequestParserProtoUtils { + // protected final Settings settings; + + /** + * Private constructor to prevent instantiation of utility class. + */ + protected BulkRequestParserProtoUtils() { + // Utility class, no instances + } + + /** + * Same as { BulkRequest#valueOrDefault(String, String) + * @param value + * @param globalDefault + * @return + */ + private static String valueOrDefault(String value, String globalDefault) { + if (Strings.isNullOrEmpty(value) && Strings.isNullOrEmpty(globalDefault) == false) { + return globalDefault; + } + return value; + } + + /** + * Same as { BulkRequest#valueOrDefault(Boolean, Boolean)} + * @param value + * @param globalDefault + * @return + */ + private static Boolean valueOrDefault(Boolean value, Boolean globalDefault) { + if (Objects.isNull(value) && !Objects.isNull(globalDefault)) { + return globalDefault; + } + return value; + } + + /** + * Similar to {@link BulkRequestParser#parse(BytesReference, String, String, FetchSourceContext, String, Boolean, boolean, MediaType, Consumer, Consumer, Consumer)}, except that it takes into account global values. + * + * @param request + * @param defaultIndex + * @param defaultRouting + * @param defaultFetchSourceContext + * @param defaultPipeline + * @param defaultRequireAlias + * @return + */ + public static DocWriteRequest[] getDocWriteRequests( + BulkRequest request, + String defaultIndex, + String defaultRouting, + FetchSourceContext defaultFetchSourceContext, + String defaultPipeline, + Boolean defaultRequireAlias + ) { + List bulkRequestBodyList = request.getRequestBodyList(); + DocWriteRequest[] docWriteRequests = new DocWriteRequest[bulkRequestBodyList.size()]; + + // Process each operation in the request body + for (int i = 0; i < bulkRequestBodyList.size(); i++) { + BulkRequestBody bulkRequestBodyEntry = bulkRequestBodyList.get(i); + DocWriteRequest docWriteRequest; + + // Set default values, taking into account global values, similar to BulkRequest#add(BytesReference, ...., ) + String index = defaultIndex; + String id = null; + String routing = valueOrDefault(defaultRouting, request.getRouting()); + FetchSourceContext fetchSourceContext = defaultFetchSourceContext; + IndexOperation.OpType opType = null; + long version = Versions.MATCH_ANY; + VersionType versionType = VersionType.INTERNAL; + long ifSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; + long ifPrimaryTerm = UNASSIGNED_PRIMARY_TERM; + int retryOnConflict = 0; + String pipeline = valueOrDefault(defaultPipeline, request.getPipeline()); + Boolean requireAlias = valueOrDefault(defaultRequireAlias, request.getRequireAlias()); + + // Parse the operation type: create, index, update, delete, or none provided (which is invalid). + switch (bulkRequestBodyEntry.getOperationContainerCase()) { + case CREATE: + docWriteRequest = buildCreateRequest( + bulkRequestBodyEntry.getCreate(), + bulkRequestBodyEntry.getDoc().toByteArray(), + index, + id, + routing, + version, + versionType, + pipeline, + ifSeqNo, + ifPrimaryTerm, + requireAlias + ); + break; + case INDEX: + docWriteRequest = buildIndexRequest( + bulkRequestBodyEntry.getIndex(), + bulkRequestBodyEntry.getDoc().toByteArray(), + opType, + index, + id, + routing, + version, + versionType, + pipeline, + ifSeqNo, + ifPrimaryTerm, + requireAlias + ); + break; + case UPDATE: + docWriteRequest = buildUpdateRequest( + bulkRequestBodyEntry.getUpdate(), + bulkRequestBodyEntry.getDoc().toByteArray(), + bulkRequestBodyEntry, + index, + id, + routing, + fetchSourceContext, + retryOnConflict, + pipeline, + ifSeqNo, + ifPrimaryTerm, + requireAlias + ); + break; + case DELETE: + docWriteRequest = buildDeleteRequest( + bulkRequestBodyEntry.getDelete(), + index, + id, + routing, + version, + versionType, + ifSeqNo, + ifPrimaryTerm + ); + break; + case OPERATIONCONTAINER_NOT_SET: + default: + throw new IllegalArgumentException( + "Invalid BulkRequestBody. An OperationContainer (create, index, update, or delete) must be provided." + ); + } + // Add the request to the bulk request + docWriteRequests[i] = docWriteRequest; + } + return docWriteRequests; + } + + /** + * Builds an IndexRequest with create flag set to true from a CreateOperation protobuf message. + * + * @param createOperation The create operation protobuf message + * @param document The document content as bytes + * @param index The default index name + * @param id The default document ID + * @param routing The default routing value + * @param version The default version + * @param versionType The default version type + * @param pipeline The default pipeline + * @param ifSeqNo The default sequence number for optimistic concurrency control + * @param ifPrimaryTerm The default primary term for optimistic concurrency control + * @param requireAlias Whether the index must be an alias + * @return The constructed IndexRequest + */ + public static IndexRequest buildCreateRequest( + CreateOperation createOperation, + byte[] document, + String index, + String id, + String routing, + long version, + VersionType versionType, + String pipeline, + long ifSeqNo, + long ifPrimaryTerm, + boolean requireAlias + ) { + index = createOperation.hasIndex() ? createOperation.getIndex() : index; + id = createOperation.hasId() ? createOperation.getId() : id; + routing = createOperation.hasRouting() ? createOperation.getRouting() : routing; + version = createOperation.hasVersion() ? createOperation.getVersion() : version; + if (createOperation.hasVersionType()) { + switch (createOperation.getVersionType()) { + case VERSION_TYPE_EXTERNAL: + versionType = VersionType.EXTERNAL; + break; + case VERSION_TYPE_EXTERNAL_GTE: + versionType = VersionType.EXTERNAL_GTE; + break; + default: + versionType = VersionType.INTERNAL; + break; + } + } + pipeline = createOperation.hasPipeline() ? createOperation.getPipeline() : pipeline; + ifSeqNo = createOperation.hasIfSeqNo() ? createOperation.getIfSeqNo() : ifSeqNo; + ifPrimaryTerm = createOperation.hasIfPrimaryTerm() ? createOperation.getIfPrimaryTerm() : ifPrimaryTerm; + requireAlias = createOperation.hasRequireAlias() ? createOperation.getRequireAlias() : requireAlias; + + IndexRequest indexRequest = new IndexRequest(index).id(id) + .routing(routing) + .version(version) + .versionType(versionType) + .create(true) + .setPipeline(pipeline) + .setIfSeqNo(ifSeqNo) + .setIfPrimaryTerm(ifPrimaryTerm) + .source(document, MediaTypeRegistry.JSON) + .setRequireAlias(requireAlias); + return indexRequest; + } + + /** + * Builds an IndexRequest from an IndexOperation protobuf message. + * + * @param indexOperation The index operation protobuf message + * @param document The document content as bytes + * @param opType The default operation type + * @param index The default index name + * @param id The default document ID + * @param routing The default routing value + * @param version The default version + * @param versionType The default version type + * @param pipeline The default pipeline + * @param ifSeqNo The default sequence number for optimistic concurrency control + * @param ifPrimaryTerm The default primary term for optimistic concurrency control + * @param requireAlias Whether the index must be an alias + * @return The constructed IndexRequest + */ + public static IndexRequest buildIndexRequest( + IndexOperation indexOperation, + byte[] document, + IndexOperation.OpType opType, + String index, + String id, + String routing, + long version, + VersionType versionType, + String pipeline, + long ifSeqNo, + long ifPrimaryTerm, + boolean requireAlias + ) { + opType = indexOperation.hasOpType() ? indexOperation.getOpType() : opType; + index = indexOperation.hasIndex() ? indexOperation.getIndex() : index; + id = indexOperation.hasId() ? indexOperation.getId() : id; + routing = indexOperation.hasRouting() ? indexOperation.getRouting() : routing; + version = indexOperation.hasVersion() ? indexOperation.getVersion() : version; + if (indexOperation.hasVersionType()) { + switch (indexOperation.getVersionType()) { + case VERSION_TYPE_EXTERNAL: + versionType = VersionType.EXTERNAL; + break; + case VERSION_TYPE_EXTERNAL_GTE: + versionType = VersionType.EXTERNAL_GTE; + break; + default: + versionType = VersionType.INTERNAL; + break; + } + } + pipeline = indexOperation.hasPipeline() ? indexOperation.getPipeline() : pipeline; + ifSeqNo = indexOperation.hasIfSeqNo() ? indexOperation.getIfSeqNo() : ifSeqNo; + ifPrimaryTerm = indexOperation.hasIfPrimaryTerm() ? indexOperation.getIfPrimaryTerm() : ifPrimaryTerm; + requireAlias = indexOperation.hasRequireAlias() ? indexOperation.getRequireAlias() : requireAlias; + + IndexRequest indexRequest; + if (opType == null) { + indexRequest = new IndexRequest(index).id(id) + .routing(routing) + .version(version) + .versionType(versionType) + .setPipeline(pipeline) + .setIfSeqNo(ifSeqNo) + .setIfPrimaryTerm(ifPrimaryTerm) + .source(document, MediaTypeRegistry.JSON) + .setRequireAlias(requireAlias); + } else { + indexRequest = new IndexRequest(index).id(id) + .routing(routing) + .version(version) + .versionType(versionType) + .create(opType.equals(IndexOperation.OpType.OP_TYPE_CREATE)) + .setPipeline(pipeline) + .setIfSeqNo(ifSeqNo) + .setIfPrimaryTerm(ifPrimaryTerm) + .source(document, MediaTypeRegistry.JSON) + .setRequireAlias(requireAlias); + } + return indexRequest; + } + + /** + * Builds an UpdateRequest from an UpdateOperation protobuf message. + * + * @param updateOperation The update operation protobuf message + * @param document The document content as bytes + * @param bulkRequestBody The bulk request body containing additional update options + * @param index The default index name + * @param id The default document ID + * @param routing The default routing value + * @param fetchSourceContext The default fetch source context + * @param retryOnConflict The default number of retries on conflict + * @param pipeline The default pipeline + * @param ifSeqNo The default sequence number for optimistic concurrency control + * @param ifPrimaryTerm The default primary term for optimistic concurrency control + * @param requireAlias Whether the index must be an alias + * @return The constructed UpdateRequest + */ + public static UpdateRequest buildUpdateRequest( + UpdateOperation updateOperation, + byte[] document, + BulkRequestBody bulkRequestBody, + String index, + String id, + String routing, + FetchSourceContext fetchSourceContext, + int retryOnConflict, + String pipeline, + long ifSeqNo, + long ifPrimaryTerm, + boolean requireAlias + ) { + index = updateOperation.hasIndex() ? updateOperation.getIndex() : index; + id = updateOperation.hasId() ? updateOperation.getId() : id; + routing = updateOperation.hasRouting() ? updateOperation.getRouting() : routing; + fetchSourceContext = bulkRequestBody.hasSource() + ? FetchSourceContextProtoUtils.fromProto(bulkRequestBody.getSource()) + : fetchSourceContext; + retryOnConflict = updateOperation.hasRetryOnConflict() ? updateOperation.getRetryOnConflict() : retryOnConflict; + ifSeqNo = updateOperation.hasIfSeqNo() ? updateOperation.getIfSeqNo() : ifSeqNo; + ifPrimaryTerm = updateOperation.hasIfPrimaryTerm() ? updateOperation.getIfPrimaryTerm() : ifPrimaryTerm; + requireAlias = updateOperation.hasRequireAlias() ? updateOperation.getRequireAlias() : requireAlias; + + UpdateRequest updateRequest = new UpdateRequest().index(index) + .id(id) + .routing(routing) + .retryOnConflict(retryOnConflict) + .setIfSeqNo(ifSeqNo) + .setIfPrimaryTerm(ifPrimaryTerm) + .setRequireAlias(requireAlias) + .routing(routing); + + updateRequest = fromProto(updateRequest, document, bulkRequestBody, updateOperation); + + if (fetchSourceContext != null) { + updateRequest.fetchSource(fetchSourceContext); + } + // TODO: how is upsertRequest used? + // IndexRequest upsertRequest = updateRequest.upsertRequest(); + // if (upsertRequest != null) { + // upsertRequest.setPipeline(pipeline); + // } + + return updateRequest; + } + + /** + * Populates an UpdateRequest with values from protobuf messages. + * Similar to {@link UpdateRequest#fromXContent(XContentParser)} + * + * @param updateRequest The update request to populate + * @param document The document content as bytes + * @param bulkRequestBody The bulk request body containing update options + * @param updateOperation The update operation protobuf message + * @return The populated UpdateRequest + */ + public static UpdateRequest fromProto( + UpdateRequest updateRequest, + byte[] document, + BulkRequestBody bulkRequestBody, + UpdateOperation updateOperation + ) { + if (bulkRequestBody.hasScript()) { + Script script = ScriptProtoUtils.parseFromProtoRequest(bulkRequestBody.getScript()); + updateRequest.script(script); + } + + if (bulkRequestBody.hasScriptedUpsert()) { + updateRequest.scriptedUpsert(bulkRequestBody.getScriptedUpsert()); + } + + if (bulkRequestBody.hasUpsert()) { + updateRequest.upsert(bulkRequestBody.getUpsert(), MediaTypeRegistry.JSON); + } + + updateRequest.doc(document, MediaTypeRegistry.JSON); + + if (bulkRequestBody.hasDocAsUpsert()) { + updateRequest.docAsUpsert(bulkRequestBody.getDocAsUpsert()); + } + + if (bulkRequestBody.hasDetectNoop()) { + updateRequest.detectNoop(bulkRequestBody.getDetectNoop()); + } + + if (bulkRequestBody.hasDocAsUpsert()) { + updateRequest.docAsUpsert(bulkRequestBody.getDocAsUpsert()); + } + + if (bulkRequestBody.hasSource()) { + updateRequest.fetchSource(FetchSourceContextProtoUtils.fromProto(bulkRequestBody.getSource())); + } + + if (updateOperation.hasIfSeqNo()) { + updateRequest.setIfSeqNo(updateOperation.getIfSeqNo()); + } + + if (updateOperation.hasIfPrimaryTerm()) { + updateRequest.setIfPrimaryTerm(updateOperation.getIfPrimaryTerm()); + } + + return updateRequest; + } + + /** + * Builds a DeleteRequest from a DeleteOperation protobuf message. + * + * @param deleteOperation The delete operation protobuf message + * @param index The default index name + * @param id The default document ID + * @param routing The default routing value + * @param version The default version + * @param versionType The default version type + * @param ifSeqNo The default sequence number for optimistic concurrency control + * @param ifPrimaryTerm The default primary term for optimistic concurrency control + * @return The constructed DeleteRequest + */ + public static DeleteRequest buildDeleteRequest( + DeleteOperation deleteOperation, + String index, + String id, + String routing, + long version, + VersionType versionType, + long ifSeqNo, + long ifPrimaryTerm + ) { + index = deleteOperation.hasIndex() ? deleteOperation.getIndex() : index; + id = deleteOperation.hasId() ? deleteOperation.getId() : id; + routing = deleteOperation.hasRouting() ? deleteOperation.getRouting() : routing; + version = deleteOperation.hasVersion() ? deleteOperation.getVersion() : version; + if (deleteOperation.hasVersionType()) { + switch (deleteOperation.getVersionType()) { + case VERSION_TYPE_EXTERNAL: + versionType = VersionType.EXTERNAL; + break; + case VERSION_TYPE_EXTERNAL_GTE: + versionType = VersionType.EXTERNAL_GTE; + break; + default: + versionType = VersionType.INTERNAL; + break; + } + } + ifSeqNo = deleteOperation.hasIfSeqNo() ? deleteOperation.getIfSeqNo() : ifSeqNo; + ifPrimaryTerm = deleteOperation.hasIfPrimaryTerm() ? deleteOperation.getIfPrimaryTerm() : ifPrimaryTerm; + + DeleteRequest deleteRequest = new DeleteRequest(index).id(id) + .routing(routing) + .version(version) + .versionType(versionType) + .setIfSeqNo(ifSeqNo) + .setIfPrimaryTerm(ifPrimaryTerm); + + return deleteRequest; + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/BulkRequestProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/BulkRequestProtoUtils.java new file mode 100644 index 0000000000000..d3e3824dde3dd --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/BulkRequestProtoUtils.java @@ -0,0 +1,105 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.document.bulk; + +import org.opensearch.action.bulk.BulkShardRequest; +import org.opensearch.action.support.WriteRequest; +import org.opensearch.plugin.transport.grpc.proto.request.common.FetchSourceContextProtoUtils; +import org.opensearch.protobufs.BulkRequest; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.document.RestBulkAction; +import org.opensearch.search.fetch.subphase.FetchSourceContext; +import org.opensearch.transport.client.Requests; +import org.opensearch.transport.client.node.NodeClient; + +/** + * Handler for bulk requests in gRPC. + */ +public class BulkRequestProtoUtils { + // protected final Settings settings; + + /** + * Private constructor to prevent instantiation of utility class. + */ + protected BulkRequestProtoUtils() { + // Utility class, no instances + } + + /** + * Prepare the request for execution. + * Similar to {@link RestBulkAction#prepareRequest(RestRequest, NodeClient)} ()} + * Please ensure to keep both implementations consistent. + * + * @param request the request to execute + * @return a future of the bulk action that was executed + */ + public static org.opensearch.action.bulk.BulkRequest prepareRequest(BulkRequest request) { + org.opensearch.action.bulk.BulkRequest bulkRequest = Requests.bulkRequest(); + + String defaultIndex = request.hasIndex() ? request.getIndex() : null; + String defaultRouting = request.hasRouting() ? request.getRouting() : null; + FetchSourceContext defaultFetchSourceContext = FetchSourceContextProtoUtils.parseFromProtoRequest(request); + String defaultPipeline = request.hasPipeline() ? request.getPipeline() : null; + + bulkRequest = ActiveShardCountProtoUtils.getActiveShardCount(bulkRequest, request); + + Boolean defaultRequireAlias = request.hasRequireAlias() ? request.getRequireAlias() : null; + + if (request.hasTimeout()) { + bulkRequest.timeout(request.getTimeout()); + } else { + bulkRequest.timeout(BulkShardRequest.DEFAULT_TIMEOUT); + } + + bulkRequest.setRefreshPolicy(getRefreshPolicy(request)); + + // Note: batch_size is deprecated in OS 3.x. Add batch_size parameter when backporting to OS 2.x + /* + if (request.hasBatchSize()){ + logger.info("The batch size option in bulk API is deprecated and will be removed in 3.0."); + } + bulkRequest.batchSize(request.hasBatchSize() ? request.getBatchSize() : Integer.MAX_VALUE); + */ + + bulkRequest.add( + BulkRequestParserProtoUtils.getDocWriteRequests( + request, + defaultIndex, + defaultRouting, + defaultFetchSourceContext, + defaultPipeline, + defaultRequireAlias + ) + ); + + return bulkRequest; + } + + /** + * Extracts the refresh policy from the bulk request. + * + * @param request The bulk request containing the refresh policy + * @return The refresh policy as a string, or null if not specified + */ + public static String getRefreshPolicy(org.opensearch.protobufs.BulkRequest request) { + if (!request.hasRefresh()) { + return null; + } + switch (request.getRefresh()) { + case REFRESH_TRUE: + return WriteRequest.RefreshPolicy.IMMEDIATE.getValue(); + case REFRESH_WAIT_FOR: + return WriteRequest.RefreshPolicy.WAIT_UNTIL.getValue(); + case REFRESH_FALSE: + case REFRESH_UNSPECIFIED: + default: + return WriteRequest.RefreshPolicy.NONE.getValue(); + } + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/package-info.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/package-info.java new file mode 100644 index 0000000000000..c29c353496e27 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/package-info.java @@ -0,0 +1,13 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Utility classes for handling document bulk requests in the gRPC transport plugin. + * This package contains utilities for converting Protocol Buffer bulk requests to OpenSearch internal requests. + */ +package org.opensearch.plugin.transport.grpc.proto.request.document.bulk; diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/common/ObjectMapProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/common/ObjectMapProtoUtils.java new file mode 100644 index 0000000000000..ec7e6850bba72 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/common/ObjectMapProtoUtils.java @@ -0,0 +1,83 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.response.common; + +import org.opensearch.protobufs.NullValue; +import org.opensearch.protobufs.ObjectMap; + +import java.util.List; +import java.util.Map; + +/** + * Utility class for converting generic Java objects to google.protobuf.Struct Protobuf type. + */ +public class ObjectMapProtoUtils { + + private ObjectMapProtoUtils() { + // Utility class, no instances + } + + /** + * Converts a generic Java Object to its Protocol Buffer representation. + * + * @param javaObject The java object to convert + * @return A Protobuf builder .google.protobuf.Struct representation + */ + public static ObjectMap.Value toProto(Object javaObject) { + ObjectMap.Value.Builder valueBuilder = ObjectMap.Value.newBuilder(); + + if (javaObject == null) { + // Null + valueBuilder.setNullValue(NullValue.NULL_VALUE_NULL); + } + // TODO does the order we set int, long, double, float, matter? + else if (javaObject instanceof Integer) { + // Integer + valueBuilder.setInt32((int) javaObject); + } else if (javaObject instanceof Long) { + // Long + valueBuilder.setInt64((long) javaObject); + } else if (javaObject instanceof Double) { + // Double + valueBuilder.setDouble((double) javaObject); + } else if (javaObject instanceof Float) { + // Float + valueBuilder.setFloat((float) javaObject); + } else if (javaObject instanceof String) { + // String + valueBuilder.setString((String) javaObject); + } else if (javaObject instanceof Boolean) { + // Boolean + valueBuilder.setBool((Boolean) javaObject); + } else if (javaObject instanceof Enum) { + // Enum + valueBuilder.setString(javaObject.toString()); + } else if (javaObject instanceof List) { + // List + ObjectMap.ListValue.Builder listBuilder = ObjectMap.ListValue.newBuilder(); + for (Object listEntry : (List) javaObject) { + listBuilder.addValue(toProto(listEntry)); + } + valueBuilder.setListValue(listBuilder.build()); + } else if (javaObject instanceof Map) { + // Map + ObjectMap.Builder objectMapBuilder = ObjectMap.newBuilder(); + + @SuppressWarnings("unchecked") + Map fieldMap = (Map) javaObject; + for (Map.Entry entry : fieldMap.entrySet()) { + objectMapBuilder.putFields(entry.getKey(), toProto(entry.getValue())); + } + valueBuilder.setObjectMap(objectMapBuilder.build()); + } else { + throw new IllegalArgumentException("Cannot convert " + javaObject.toString() + " to google.protobuf.Struct"); + } + + return valueBuilder.build(); + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/common/OpenSearchExceptionProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/common/OpenSearchExceptionProtoUtils.java new file mode 100644 index 0000000000000..69720ae86c43f --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/common/OpenSearchExceptionProtoUtils.java @@ -0,0 +1,264 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.response.common; + +import org.opensearch.ExceptionsHelper; +import org.opensearch.OpenSearchException; +import org.opensearch.action.FailedNodeException; +import org.opensearch.action.search.SearchPhaseExecutionException; +import org.opensearch.common.breaker.ResponseLimitBreachedException; +import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.breaker.CircuitBreakingException; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.plugin.transport.grpc.proto.response.exceptions.CircuitBreakingExceptionProtoUtils; +import org.opensearch.plugin.transport.grpc.proto.response.exceptions.FailedNodeExceptionProtoUtils; +import org.opensearch.plugin.transport.grpc.proto.response.exceptions.ParsingExceptionProtoUtils; +import org.opensearch.plugin.transport.grpc.proto.response.exceptions.ResponseLimitBreachedExceptionProtoUtils; +import org.opensearch.plugin.transport.grpc.proto.response.exceptions.ScriptExceptionProtoUtils; +import org.opensearch.plugin.transport.grpc.proto.response.exceptions.SearchParseExceptionProtoUtils; +import org.opensearch.plugin.transport.grpc.proto.response.exceptions.SearchPhaseExecutionExceptionProtoUtils; +import org.opensearch.plugin.transport.grpc.proto.response.exceptions.TooManyBucketsExceptionProtoUtils; +import org.opensearch.protobufs.ErrorCause; +import org.opensearch.protobufs.ObjectMap; +import org.opensearch.protobufs.StringArray; +import org.opensearch.protobufs.StringOrStringArray; +import org.opensearch.script.ScriptException; +import org.opensearch.search.SearchParseException; +import org.opensearch.search.aggregations.MultiBucketConsumerService; + +import java.io.IOException; +import java.util.AbstractMap; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.emptyMap; +import static org.opensearch.OpenSearchException.OPENSEARCH_PREFIX_KEY; +import static org.opensearch.OpenSearchException.getExceptionName; + +/** + * Utility class for converting Exception objects to Protocol Buffers. + * This class handles the conversion of OpenSearchException and other Throwable instances + * to their Protocol Buffer representation. + */ +public class OpenSearchExceptionProtoUtils { + + private OpenSearchExceptionProtoUtils() { + // Utility class, no instances + } + + /** + * Converts an OpenSearchException to its Protocol Buffer representation. + * This method is equivalent to the {@link OpenSearchException#toXContent(XContentBuilder, ToXContent.Params)} + * + * @param exception The OpenSearchException to convert + * @return A Protocol Buffer ErrorCause representation + * @throws IOException if there's an error during conversion + */ + public static ErrorCause toProto(OpenSearchException exception) throws IOException { + Throwable ex = ExceptionsHelper.unwrapCause(exception); + if (ex != exception) { + return generateThrowableProto(ex); + } else { + return innerToProto( + exception, + getExceptionName(exception), + exception.getMessage(), + exception.getHeaders(), + exception.getMetadata(), + exception.getCause() + ); + } + } + + /** + * Static helper method that renders {@link OpenSearchException} or {@link Throwable} instances + * as Protocol Buffers. + *

+ * This method is usually used when the {@link Throwable} is rendered as a part of another Protocol Buffer object. + * It is equivalent to the {@link OpenSearchException#generateThrowableXContent(XContentBuilder, ToXContent.Params, Throwable)} + * + * @param t The throwable to convert + * @return A Protocol Buffer ErrorCause representation + * @throws IOException if there's an error during conversion + */ + public static ErrorCause generateThrowableProto(Throwable t) throws IOException { + t = ExceptionsHelper.unwrapCause(t); + + if (t instanceof OpenSearchException) { + return toProto((OpenSearchException) t); + } else { + return innerToProto(t, getExceptionName(t), t.getMessage(), emptyMap(), emptyMap(), t.getCause()); + } + } + + /** + * Inner helper method for converting a Throwable to its Protocol Buffer representation. + * This method is equivalent to the {@link OpenSearchException#innerToXContent(XContentBuilder, ToXContent.Params, Throwable, String, String, Map, Map, Throwable)}. + * + * @param throwable The throwable to convert + * @param type The exception type + * @param message The exception message + * @param headers The exception headers + * @param metadata The exception metadata + * @param cause The exception cause + * @return A Protocol Buffer ErrorCause representation + * @throws IOException if there's an error during conversion + */ + public static ErrorCause innerToProto( + Throwable throwable, + String type, + String message, + Map> headers, + Map> metadata, + Throwable cause + ) throws IOException { + ErrorCause.Builder errorCauseBuilder = ErrorCause.newBuilder(); + + // Set exception type + errorCauseBuilder.setType(type); + + // Set exception message (reason) + if (message != null) { + errorCauseBuilder.setReason(message); + } + + // Add custom metadata fields propogated by the child classes of OpenSearchException + for (Map.Entry> entry : metadata.entrySet()) { + Map.Entry protoEntry = headerToValueProto( + entry.getKey().substring(OPENSEARCH_PREFIX_KEY.length()), + entry.getValue() + ); + errorCauseBuilder.putMetadata(protoEntry.getKey(), protoEntry.getValue()); + } + + // Add metadata if the throwable is an OpenSearchException + if (throwable instanceof OpenSearchException) { + OpenSearchException exception = (OpenSearchException) throwable; + Map moreMetadata = metadataToProto(exception); + for (Map.Entry entry : moreMetadata.entrySet()) { + errorCauseBuilder.putMetadata(entry.getKey(), entry.getValue()); + } + } + + if (cause != null) { + errorCauseBuilder.setCausedBy(generateThrowableProto(cause)); + } + + if (headers.isEmpty() == false) { + for (Map.Entry> entry : headers.entrySet()) { + Map.Entry protoEntry = headerToProto(entry.getKey(), entry.getValue()); + errorCauseBuilder.putHeader(protoEntry.getKey(), protoEntry.getValue()); + } + } + + // Add stack trace + errorCauseBuilder.setStackTrace(ExceptionsHelper.stackTrace(throwable)); + + // Add suppressed exceptions + Throwable[] allSuppressed = throwable.getSuppressed(); + if (allSuppressed.length > 0) { + for (Throwable suppressed : allSuppressed) { + errorCauseBuilder.addSuppressed(generateThrowableProto(suppressed)); + } + } + + return errorCauseBuilder.build(); + } + + /** + * Converts a single entry of a {@code Map>} into a protobuf {@code } + * Similar to {@link OpenSearchException#headerToXContent(XContentBuilder, String, List)} + * + * @param key The key of the header entry + * @param values The list of values for the header entry + * @return A map entry containing the key and its corresponding StringOrStringArray value, or null if values is null or empty + * @throws IOException if there's an error during conversion + */ + public static Map.Entry headerToProto(String key, List values) throws IOException { + if (values != null && values.isEmpty() == false) { + if (values.size() == 1) { + return new AbstractMap.SimpleEntry( + key, + StringOrStringArray.newBuilder().setStringValue(values.get(0)).build() + ); + } else { + StringArray.Builder stringArrayBuilder = StringArray.newBuilder(); + for (String val : values) { + stringArrayBuilder.addStringArray(val); + } + StringOrStringArray stringOrStringArray = StringOrStringArray.newBuilder() + .setStringArray(stringArrayBuilder.build()) + .build(); + + return new AbstractMap.SimpleEntry(key, stringOrStringArray); + } + } + return null; + } + + /** + * Similar to {@link OpenSearchExceptionProtoUtils#headerToProto(String, List)}, + * but returns a {@code Map} instead. + * + * @param key The key of the header entry + * @param values The list of values for the header entry + * @return A map entry containing the key and its corresponding ObjectMap.Value, or null if values is null or empty + * @throws IOException if there's an error during conversion + */ + public static Map.Entry headerToValueProto(String key, List values) throws IOException { + if (values != null && values.isEmpty() == false) { + if (values.size() == 1) { + return new AbstractMap.SimpleEntry( + key, + ObjectMap.Value.newBuilder().setString(values.get(0)).build() + ); + } else { + ObjectMap.ListValue.Builder listValueBuilder = ObjectMap.ListValue.newBuilder(); + for (String val : values) { + listValueBuilder.addValue(ObjectMap.Value.newBuilder().setString(val).build()); + } + return new AbstractMap.SimpleEntry( + key, + ObjectMap.Value.newBuilder().setListValue(listValueBuilder).build() + ); + } + } + return null; + } + + /** + * This method is similar to {@link OpenSearchException#metadataToXContent(XContentBuilder, ToXContent.Params)} + * This method is overridden by various exception classes, which are hardcoded here. + * + * @param exception The OpenSearchException to convert metadata from + * @return A map containing the exception's metadata as ObjectMap.Value objects + */ + public static Map metadataToProto(OpenSearchException exception) { + if (exception instanceof CircuitBreakingException) { + return CircuitBreakingExceptionProtoUtils.metadataToProto((CircuitBreakingException) exception); + } else if (exception instanceof FailedNodeException) { + return FailedNodeExceptionProtoUtils.metadataToProto((FailedNodeException) exception); + } else if (exception instanceof ParsingException) { + return ParsingExceptionProtoUtils.metadataToProto((ParsingException) exception); + } else if (exception instanceof ResponseLimitBreachedException) { + return ResponseLimitBreachedExceptionProtoUtils.metadataToProto((ResponseLimitBreachedException) exception); + } else if (exception instanceof ScriptException) { + return ScriptExceptionProtoUtils.metadataToProto((ScriptException) exception); + } else if (exception instanceof SearchParseException) { + return SearchParseExceptionProtoUtils.metadataToProto((SearchParseException) exception); + } else if (exception instanceof SearchPhaseExecutionException) { + return SearchPhaseExecutionExceptionProtoUtils.metadataToProto((SearchPhaseExecutionException) exception); + } else if (exception instanceof MultiBucketConsumerService.TooManyBucketsException) { + return TooManyBucketsExceptionProtoUtils.metadataToProto((MultiBucketConsumerService.TooManyBucketsException) exception); + } else { + return new HashMap<>(); + } + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/common/StructProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/common/StructProtoUtils.java new file mode 100644 index 0000000000000..d76a692617c66 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/common/StructProtoUtils.java @@ -0,0 +1,72 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.response.common; + +import com.google.protobuf.ListValue; +import com.google.protobuf.NullValue; +import com.google.protobuf.Struct; +import com.google.protobuf.Value; + +import java.util.List; +import java.util.Map; + +/** + * Utility class for converting generic Java objects to google.protobuf.Struct Protobuf type. + */ +public class StructProtoUtils { + + private StructProtoUtils() { + // Utility class, no instances + } + + /** + * Converts a generic Java Object to its Protocol Buffer representation. + * + * @param javaObject The java object to convert + * @return A Protobuf builder .google.protobuf.Struct representation + */ + public static Value toProto(Object javaObject) { + Value.Builder valueBuilder = Value.newBuilder(); + + if (javaObject == null) { + // Null + valueBuilder.setNullValue(NullValue.NULL_VALUE); + } else if (javaObject instanceof Number) { + // Number - use doubleValue() to handle all numeric types + valueBuilder.setNumberValue(((Number) javaObject).doubleValue()); + } else if (javaObject instanceof String) { + // String + valueBuilder.setStringValue((String) javaObject); + } else if (javaObject instanceof Boolean) { + // Boolean + valueBuilder.setBoolValue((Boolean) javaObject); + } else if (javaObject instanceof List) { + // List + ListValue.Builder listBuilder = ListValue.newBuilder(); + for (Object listEntry : (List) javaObject) { + listBuilder.addValues(toProto(listEntry)); + } + valueBuilder.setListValue(listBuilder.build()); + } else if (javaObject instanceof Map) { + // Map + + Struct.Builder structBuilder = Struct.newBuilder(); + + @SuppressWarnings("unchecked") + Map fieldMap = (Map) javaObject; + for (Map.Entry entry : fieldMap.entrySet()) { + structBuilder.putFields(entry.getKey(), toProto(entry.getValue())); + } + valueBuilder.setStructValue(structBuilder.build()); + } else { + throw new IllegalArgumentException("Cannot convert " + javaObject.toString() + " to google.protobuf.Struct"); + } + + return valueBuilder.build(); + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/common/package-info.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/common/package-info.java new file mode 100644 index 0000000000000..831b220393b85 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/common/package-info.java @@ -0,0 +1,13 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Common utility classes for response handling in the gRPC transport plugin. + * This package contains utilities for converting common response elements to Protocol Buffers. + */ +package org.opensearch.plugin.transport.grpc.proto.response.common; diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/bulk/BulkItemResponseProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/bulk/BulkItemResponseProtoUtils.java new file mode 100644 index 0000000000000..16a611e5b3113 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/bulk/BulkItemResponseProtoUtils.java @@ -0,0 +1,106 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.response.document.bulk; + +import org.opensearch.action.DocWriteResponse; +import org.opensearch.action.bulk.BulkItemResponse; +import org.opensearch.action.update.UpdateResponse; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.get.GetResult; +import org.opensearch.plugin.transport.grpc.proto.response.common.OpenSearchExceptionProtoUtils; +import org.opensearch.plugin.transport.grpc.proto.response.document.common.DocWriteResponseProtoUtils; +import org.opensearch.plugin.transport.grpc.proto.response.document.get.GetResultProtoUtils; +import org.opensearch.protobufs.ErrorCause; +import org.opensearch.protobufs.Item; +import org.opensearch.protobufs.NullValue; +import org.opensearch.protobufs.ResponseItem; + +import java.io.IOException; + +/** + * Utility class for converting BulkItemResponse objects to Protocol Buffers. + * This class handles the conversion of individual bulk operation responses to their + * Protocol Buffer representation. + */ +public class BulkItemResponseProtoUtils { + + private BulkItemResponseProtoUtils() { + // Utility class, no instances + } + + /** + * Converts a BulkItemResponse to its Protocol Buffer representation. + * This method is equivalent to the {@link BulkItemResponse#toXContent(XContentBuilder, ToXContent.Params)} + * + * + * @param response The BulkItemResponse to convert + * @return A Protocol Buffer Item representation + * @throws IOException if there's an error during conversion + * + */ + public static Item toProto(BulkItemResponse response) throws IOException { + Item.Builder itemBuilder = Item.newBuilder(); + + ResponseItem.Builder responseItemBuilder; + if (response.isFailed() == false) { + DocWriteResponse docResponse = response.getResponse(); + responseItemBuilder = DocWriteResponseProtoUtils.toProto(docResponse); + + // TODO set the GRPC status instead of HTTP Status + responseItemBuilder.setStatus(docResponse.status().getStatus()); + } else { + BulkItemResponse.Failure failure = response.getFailure(); + responseItemBuilder = ResponseItem.newBuilder(); + + responseItemBuilder.setIndex(failure.getIndex()); + if (response.getId().isEmpty()) { + responseItemBuilder.setId(ResponseItem.Id.newBuilder().setNullValue(NullValue.NULL_VALUE_NULL).build()); + } else { + responseItemBuilder.setId(ResponseItem.Id.newBuilder().setString(response.getId()).build()); + } + // TODO set the GRPC status instead of HTTP Status + responseItemBuilder.setStatus(failure.getStatus().getStatus()); + + ErrorCause errorCause = OpenSearchExceptionProtoUtils.generateThrowableProto(failure.getCause()); + + responseItemBuilder.setError(errorCause); + } + + ResponseItem responseItem; + switch (response.getOpType()) { + case CREATE: + responseItem = responseItemBuilder.build(); + itemBuilder.setCreate(responseItem); + break; + case INDEX: + responseItem = responseItemBuilder.build(); + itemBuilder.setIndex(responseItem); + break; + case UPDATE: + UpdateResponse updateResponse = response.getResponse(); + if (updateResponse != null) { + GetResult getResult = updateResponse.getGetResult(); + if (getResult != null) { + responseItemBuilder = GetResultProtoUtils.toProto(getResult, responseItemBuilder); + } + } + responseItem = responseItemBuilder.build(); + itemBuilder.setUpdate(responseItem); + break; + case DELETE: + responseItem = responseItemBuilder.build(); + itemBuilder.setDelete(responseItem); + break; + default: + throw new UnsupportedOperationException("Invalid op type: " + response.getOpType()); + } + + return itemBuilder.build(); + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/bulk/BulkResponseProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/bulk/BulkResponseProtoUtils.java new file mode 100644 index 0000000000000..c523c86f5ec3e --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/bulk/BulkResponseProtoUtils.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.response.document.bulk; + +import org.opensearch.action.bulk.BulkItemResponse; +import org.opensearch.action.bulk.BulkResponse; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.protobufs.BulkResponseBody; + +import java.io.IOException; + +/** + * Utility class for converting BulkResponse objects to Protocol Buffers. + * This class handles the conversion of bulk operation responses to their + * Protocol Buffer representation. + */ +public class BulkResponseProtoUtils { + + private BulkResponseProtoUtils() { + // Utility class, no instances + } + + /** + * Converts a BulkResponse to its Protocol Buffer representation. + * This method is equivalent to {@link BulkResponse#toXContent(XContentBuilder, ToXContent.Params)} + * + * @param response The BulkResponse to convert + * @return A Protocol Buffer BulkResponse representation + * @throws IOException if there's an error during conversion + */ + public static org.opensearch.protobufs.BulkResponse toProto(BulkResponse response) throws IOException { + // System.out.println("=== grpc bulk response=" + response.toString()); + + org.opensearch.protobufs.BulkResponse.Builder bulkResponse = org.opensearch.protobufs.BulkResponse.newBuilder(); + + // Create the bulk response body + BulkResponseBody.Builder bulkResponseBody = BulkResponseBody.newBuilder(); + + // Set the time taken for the bulk operation (excluding ingest preprocessing) + bulkResponseBody.setTook(response.getTook().getMillis()); + + // Set ingest preprocessing time if available + if (response.getIngestTookInMillis() != BulkResponse.NO_INGEST_TOOK) { + bulkResponseBody.setIngestTook(response.getIngestTookInMillis()); + } + + // Set whether any operations failed + bulkResponseBody.setErrors(response.hasFailures()); + + // Add individual item responses for each operation in the bulk request + for (BulkItemResponse bulkItemResponse : response.getItems()) { + bulkResponseBody.addItems(BulkItemResponseProtoUtils.toProto(bulkItemResponse)); + } + + // Set the bulk response body and build the final response + bulkResponse.setBulkResponseBody(bulkResponseBody.build()); + return bulkResponse.build(); + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/bulk/package-info.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/bulk/package-info.java new file mode 100644 index 0000000000000..90f0097f72ef6 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/bulk/package-info.java @@ -0,0 +1,13 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Utility classes for handling document bulk responses in the gRPC transport plugin. + * This package contains utilities for converting bulk operation responses to Protocol Buffers. + */ +package org.opensearch.plugin.transport.grpc.proto.response.document.bulk; diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/DocWriteResponseProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/DocWriteResponseProtoUtils.java new file mode 100644 index 0000000000000..58a5edeb0b197 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/DocWriteResponseProtoUtils.java @@ -0,0 +1,73 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.response.document.common; + +import org.opensearch.action.DocWriteResponse; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.protobufs.NullValue; +import org.opensearch.protobufs.ResponseItem; +import org.opensearch.protobufs.ShardInfo; + +import java.io.IOException; + +/** + * Utility class for converting DocWriteResponse objects to Protocol Buffers. + * This class handles the conversion of document write operation responses (index, create, update, delete) + * to their Protocol Buffer representation. + */ +public class DocWriteResponseProtoUtils { + + private DocWriteResponseProtoUtils() { + // Utility class, no instances + } + + /** + * Converts a DocWriteResponse to its Protocol Buffer representation. + * This method is equivalent to the {@link DocWriteResponse#innerToXContent(XContentBuilder, ToXContent.Params)} + * + * @param response The DocWriteResponse to convert + * @return A ResponseItem.Builder with the DocWriteResponse data + * + */ + public static ResponseItem.Builder toProto(DocWriteResponse response) throws IOException { + ResponseItem.Builder responseItem = ResponseItem.newBuilder(); + + // Set the index name + responseItem.setIndex(response.getIndex()); + + // Handle document ID (can be null in some cases) + if (response.getId().isEmpty()) { + responseItem.setId(ResponseItem.Id.newBuilder().setNullValue(NullValue.NULL_VALUE_NULL).build()); + } else { + responseItem.setId(ResponseItem.Id.newBuilder().setString(response.getId()).build()); + } + + // Set document version + responseItem.setVersion(response.getVersion()); + + // Set operation result (CREATED, UPDATED, DELETED, NOT_FOUND, NOOP) + responseItem.setResult(response.getResult().getLowercase()); + + // Set forced refresh flag if applicable + if (response.forcedRefresh()) { + responseItem.setForcedRefresh(true); + } + // Handle shard information + ShardInfo shardInfo = ShardInfoProtoUtils.toProto(response.getShardInfo()); + responseItem.setShards(shardInfo); + + // Set sequence number and primary term if available + if (response.getSeqNo() >= 0) { + responseItem.setSeqNo(response.getSeqNo()); + responseItem.setPrimaryTerm(response.getPrimaryTerm()); + } + + return responseItem; + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/DocumentFieldProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/DocumentFieldProtoUtils.java new file mode 100644 index 0000000000000..eedbd69a57057 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/DocumentFieldProtoUtils.java @@ -0,0 +1,51 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.response.document.common; + +import com.google.protobuf.Value; +import org.opensearch.common.document.DocumentField; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.plugin.transport.grpc.proto.response.common.StructProtoUtils; + +import java.util.List; + +/** + * Utility class for converting DocumentField objects to Protocol Buffers. + * This class handles the conversion of document get operation results to their + * Protocol Buffer representation. + */ +public class DocumentFieldProtoUtils { + + private DocumentFieldProtoUtils() { + // Utility class, no instances + } + + /** + * Converts a DocumentField values (list of objects) to its Protocol Buffer representation. + * This method is equivalent to the {@link DocumentField#toXContent(XContentBuilder, ToXContent.Params)} + * + * @param fieldValues The list of DocumentField values to convert + * @return A Protobuf Value representation + */ + public static Value toProto(List fieldValues) { + return StructProtoUtils.toProto(fieldValues); + } + + /** + * Converts a DocumentField value (object) to its Protocol Buffer representation. + * This method is equivalent to the {@link DocumentField#toXContent(XContentBuilder, ToXContent.Params)} + * + * @param fieldValue The DocumentField value to convert + * @return A Protobuf Value representation + */ + public static Value toProto(Object fieldValue) { + return StructProtoUtils.toProto(fieldValue); + } + +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/ShardInfoProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/ShardInfoProtoUtils.java new file mode 100644 index 0000000000000..d8308216bc659 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/ShardInfoProtoUtils.java @@ -0,0 +1,68 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.response.document.common; + +import org.opensearch.action.support.replication.ReplicationResponse; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.plugin.transport.grpc.proto.response.common.OpenSearchExceptionProtoUtils; +import org.opensearch.protobufs.ShardFailure; +import org.opensearch.protobufs.ShardInfo; + +import java.io.IOException; + +/** + * Utility class for converting ReplicationResponse.ShardInfo objects to Protocol Buffers. + */ +public class ShardInfoProtoUtils { + + private ShardInfoProtoUtils() { + // Utility class, no instances + } + + /** + * Converts a ReplicationResponse.ShardInfo Java object to a protobuf ShardStatistics. + * Similar to {@link ReplicationResponse.ShardInfo#toXContent(XContentBuilder, ToXContent.Params)} + * + * @param shardInfo The shard information to convert to protobuf format + * @return The protobuf representation of the shard information + * @throws IOException If there's an error during conversion + */ + public static ShardInfo toProto(ReplicationResponse.ShardInfo shardInfo) throws IOException { + ShardInfo.Builder shardInfoBuilder = ShardInfo.newBuilder(); + shardInfoBuilder.setTotal(shardInfo.getTotal()); + shardInfoBuilder.setSuccessful(shardInfo.getSuccessful()); + shardInfoBuilder.setFailed(shardInfo.getFailed()); + + // Add any shard failures + for (ReplicationResponse.ShardInfo.Failure failure : shardInfo.getFailures()) { + shardInfoBuilder.addFailures(toProto(failure)); + } + + return shardInfoBuilder.build(); + } + + /** + * Converts a ReplicationResponse.ShardInfo.Failure Java object to a protobuf ShardFailure. + * Similar to {@link ReplicationResponse.ShardInfo.Failure#toXContent(XContentBuilder, ToXContent.Params)} + * + * @param failure The shard failure to convert to protobuf format + * @return The protobuf representation of the shard failure + * @throws IOException If there's an error during conversion + */ + private static ShardFailure toProto(ReplicationResponse.ShardInfo.Failure failure) throws IOException { + ShardFailure.Builder shardFailure = ShardFailure.newBuilder(); + shardFailure.setIndex(failure.index()); + shardFailure.setShard(failure.shardId()); + shardFailure.setNode(failure.nodeId()); + shardFailure.setReason(OpenSearchExceptionProtoUtils.generateThrowableProto(failure.getCause())); + shardFailure.setStatus(failure.status().name()); + shardFailure.setPrimary(failure.primary()); + return shardFailure.build(); + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/package-info.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/package-info.java new file mode 100644 index 0000000000000..e477229c77a61 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/package-info.java @@ -0,0 +1,13 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Common utility classes for document response handling in the gRPC transport plugin. + * This package contains utilities for converting common document response elements to Protocol Buffers. + */ +package org.opensearch.plugin.transport.grpc.proto.response.document.common; diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/get/GetResultProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/get/GetResultProtoUtils.java new file mode 100644 index 0000000000000..574179411d27c --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/get/GetResultProtoUtils.java @@ -0,0 +1,106 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.response.document.get; + +import com.google.protobuf.ByteString; +import com.google.protobuf.Struct; +import org.opensearch.common.document.DocumentField; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.get.GetResult; +import org.opensearch.index.mapper.IgnoredFieldMapper; +import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.plugin.transport.grpc.proto.response.document.common.DocumentFieldProtoUtils; +import org.opensearch.protobufs.InlineGetDictUserDefined; +import org.opensearch.protobufs.ResponseItem; + +/** + * Utility class for converting GetResult objects to Protocol Buffers. + * This class handles the conversion of document get operation results to their + * Protocol Buffer representation. + */ +public class GetResultProtoUtils { + + private GetResultProtoUtils() { + // Utility class, no instances + } + + /** + * Converts a GetResult to its Protocol Buffer representation. + * This method is equivalent to the {@link GetResult#toXContent(XContentBuilder, ToXContent.Params)} + * + * @param getResult The GetResult to convert + * @param responseItemBuilder + * @return A Protocol Buffer InlineGetDictUserDefined representation + */ + public static ResponseItem.Builder toProto(GetResult getResult, ResponseItem.Builder responseItemBuilder) { + InlineGetDictUserDefined.Builder inlineGetDictUserDefinedBuilder = InlineGetDictUserDefined.newBuilder(); + + responseItemBuilder.setIndex(getResult.getIndex()); + responseItemBuilder.setId(ResponseItem.Id.newBuilder().setString(getResult.getId()).build()); + + if (getResult.isExists()) { + // Set document version if available + if (getResult.getVersion() != -1) { + responseItemBuilder.setVersion(getResult.getVersion()); + } + inlineGetDictUserDefinedBuilder = toProtoEmbedded(getResult, inlineGetDictUserDefinedBuilder); + } else { + inlineGetDictUserDefinedBuilder.setFound(false); + } + + responseItemBuilder.setGet(inlineGetDictUserDefinedBuilder); + return responseItemBuilder; + } + + /** + * Converts a GetResult to its Protocol Buffer representation for embedding in another message. + * This method is equivalent to the {@link GetResult#toXContentEmbedded(XContentBuilder, ToXContent.Params)} + * + * @param getResult The GetResult to convert + * @param builder The builder to add the GetResult data to + * @return The updated builder with the GetResult data + */ + public static InlineGetDictUserDefined.Builder toProtoEmbedded(GetResult getResult, InlineGetDictUserDefined.Builder builder) { + // Set sequence number and primary term if available + if (getResult.getSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) { + builder.setSeqNo(getResult.getSeqNo()); + builder.setPrimaryTerm(getResult.getPrimaryTerm()); + } + + // TODO test output once GetDocument GRPC endpoint is implemented + Struct.Builder metadataFieldsBuilder = Struct.newBuilder(); + for (DocumentField field : getResult.getMetadataFields().values()) { + if (field.getName().equals(IgnoredFieldMapper.NAME)) { + metadataFieldsBuilder.putFields(field.getName(), DocumentFieldProtoUtils.toProto(field.getValues())); + } else { + metadataFieldsBuilder.putFields(field.getName(), DocumentFieldProtoUtils.toProto(field.getValue())); + } + } + builder.setMetadataFields(metadataFieldsBuilder.build()); + + // Set existence status + builder.setFound(getResult.isExists()); + + // Set source if available + if (getResult.source() != null) { + builder.setSource(ByteString.copyFrom(getResult.source())); + } + + // TODO test output once GetDocument GRPC endpoint is implemented + Struct.Builder documentFieldsBuilder = Struct.newBuilder(); + if (!getResult.getDocumentFields().isEmpty()) { + for (DocumentField field : getResult.getDocumentFields().values()) { + documentFieldsBuilder.putFields(field.getName(), DocumentFieldProtoUtils.toProto(field.getValues())); + } + } + builder.setFields(documentFieldsBuilder.build()); + + return builder; + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/get/package-info.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/get/package-info.java new file mode 100644 index 0000000000000..d5caca6df5b34 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/get/package-info.java @@ -0,0 +1,13 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Utility classes for handling document get responses in the gRPC transport plugin. + * This package contains utilities for converting document get responses to Protocol Buffers. + */ +package org.opensearch.plugin.transport.grpc.proto.response.document.get; diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/CircuitBreakingExceptionProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/CircuitBreakingExceptionProtoUtils.java new file mode 100644 index 0000000000000..a101aff2bae64 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/CircuitBreakingExceptionProtoUtils.java @@ -0,0 +1,45 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.response.exceptions; + +import org.opensearch.core.common.breaker.CircuitBreakingException; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.plugin.transport.grpc.proto.response.common.ObjectMapProtoUtils; +import org.opensearch.protobufs.ObjectMap; + +import java.util.HashMap; +import java.util.Map; + +/** + * Utility class for converting CircuitBreakingException objects to Protocol Buffers. + * This class specifically handles the conversion of CircuitBreakingException instances + * to their Protocol Buffer representation, preserving metadata about memory limits + * and circuit breaker durability. + */ +public class CircuitBreakingExceptionProtoUtils { + + private CircuitBreakingExceptionProtoUtils() { + // Utility class, no instances + } + + /** + * Converts the metadata from a CircuitBreakingException to a Protocol Buffer Struct. + * Similar to {@link CircuitBreakingException#metadataToXContent(XContentBuilder, ToXContent.Params)} * + * + * @param exception The CircuitBreakingException to convert + * @return A Protocol Buffer Struct containing the exception metadata + */ + public static Map metadataToProto(CircuitBreakingException exception) { + Map map = new HashMap<>(); + map.put("bytes_wanted", ObjectMapProtoUtils.toProto(exception.getBytesWanted())); + map.put("bytes_limit", ObjectMapProtoUtils.toProto(exception.getByteLimit())); + map.put("durability", ObjectMapProtoUtils.toProto(exception.getDurability())); + return map; + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/FailedNodeExceptionProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/FailedNodeExceptionProtoUtils.java new file mode 100644 index 0000000000000..102a6963746c1 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/FailedNodeExceptionProtoUtils.java @@ -0,0 +1,43 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.response.exceptions; + +import org.opensearch.action.FailedNodeException; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.plugin.transport.grpc.proto.response.common.ObjectMapProtoUtils; +import org.opensearch.protobufs.ObjectMap; + +import java.util.HashMap; +import java.util.Map; + +/** + * Utility class for converting FailedNodeException objects to Protocol Buffers. + * This class specifically handles the conversion of FailedNodeException instances + * to their Protocol Buffer representation, preserving metadata about node failures + * in a distributed OpenSearch cluster. + */ +public class FailedNodeExceptionProtoUtils { + + private FailedNodeExceptionProtoUtils() { + // Utility class, no instances + } + + /** + * Converts the metadata from a FailedNodeException to a Protocol Buffer Struct. + * Similar to {@link FailedNodeException#metadataToXContent(XContentBuilder, ToXContent.Params)} * + * + * @param exception The FailedNodeException to convert + * @return A Protocol Buffer Struct containing the exception metadata + */ + public static Map metadataToProto(FailedNodeException exception) { + Map map = new HashMap<>(); + map.put("node_id", ObjectMapProtoUtils.toProto(exception.nodeId())); + return map; + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/ParsingExceptionProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/ParsingExceptionProtoUtils.java new file mode 100644 index 0000000000000..8b1025b97ef64 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/ParsingExceptionProtoUtils.java @@ -0,0 +1,48 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.response.exceptions; + +import org.opensearch.core.common.ParsingException; +import org.opensearch.plugin.transport.grpc.proto.response.common.ObjectMapProtoUtils; +import org.opensearch.protobufs.ObjectMap; + +import java.util.HashMap; +import java.util.Map; + +import static org.opensearch.core.common.ParsingException.UNKNOWN_POSITION; + +/** + * Utility class for converting ParsingException objects to Protocol Buffers. + * This class specifically handles the conversion of ParsingException instances + * to their Protocol Buffer representation, preserving metadata about parsing errors + * including line and column position information when available. + */ +public class ParsingExceptionProtoUtils { + + private ParsingExceptionProtoUtils() { + // Utility class, no instances + } + + /** + * Converts the metadata from a ParsingException to a Protocol Buffer Struct. + * This method extracts line and column position information from the exception + * when available (when not equal to UNKNOWN_POSITION), which helps identify + * the exact location of parsing errors in the input content. + * + * @param exception The ParsingException to convert + * @return A Protocol Buffer Struct containing the exception metadata + */ + public static Map metadataToProto(ParsingException exception) { + Map map = new HashMap<>(); + if (exception.getLineNumber() != UNKNOWN_POSITION) { + map.put("line", ObjectMapProtoUtils.toProto(exception.getLineNumber())); + map.put("col", ObjectMapProtoUtils.toProto(exception.getColumnNumber())); + } + return map; + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/ResponseLimitBreachedExceptionProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/ResponseLimitBreachedExceptionProtoUtils.java new file mode 100644 index 0000000000000..a72aab6fdf9c6 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/ResponseLimitBreachedExceptionProtoUtils.java @@ -0,0 +1,45 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.response.exceptions; + +import org.opensearch.common.breaker.ResponseLimitBreachedException; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.plugin.transport.grpc.proto.response.common.ObjectMapProtoUtils; +import org.opensearch.protobufs.ObjectMap; + +import java.util.HashMap; +import java.util.Map; + +/** + * Utility class for converting ResponseLimitBreachedException objects to Protocol Buffers. + * This class specifically handles the conversion of ResponseLimitBreachedException instances + * to their Protocol Buffer representation, preserving metadata about response size limits + * that were exceeded during query execution. + */ +public class ResponseLimitBreachedExceptionProtoUtils { + + private ResponseLimitBreachedExceptionProtoUtils() { + // Utility class, no instances + } + + /** + * Converts the metadata from a ResponseLimitBreachedException to a Protocol Buffer Struct. + * + * Similar to {@link ResponseLimitBreachedException#metadataToXContent(XContentBuilder, ToXContent.Params)} + * + * @param exception The ResponseLimitBreachedException to convert + * @return A Protocol Buffer Struct containing the exception metadata + */ + public static Map metadataToProto(ResponseLimitBreachedException exception) { + Map map = new HashMap<>(); + map.put("response_limit", ObjectMapProtoUtils.toProto(exception.getResponseLimit())); + map.put("limit_entity", ObjectMapProtoUtils.toProto(exception.getLimitEntity())); + return map; + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/ScriptExceptionProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/ScriptExceptionProtoUtils.java new file mode 100644 index 0000000000000..59530b97fe363 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/ScriptExceptionProtoUtils.java @@ -0,0 +1,75 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.response.exceptions; + +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.plugin.transport.grpc.proto.response.common.ObjectMapProtoUtils; +import org.opensearch.protobufs.ObjectMap; +import org.opensearch.script.ScriptException; + +import java.util.HashMap; +import java.util.Map; + +/** + * Utility class for converting ScriptException objects to Protocol Buffers. + * This class specifically handles the conversion of ScriptException instances + * to their Protocol Buffer representation, preserving metadata about script errors + * including script stack traces, language information, and position details. + */ +public class ScriptExceptionProtoUtils { + + private ScriptExceptionProtoUtils() { + // Utility class, no instances + } + + /** + * Converts the metadata from a ScriptException to a Protocol Buffer Struct. + * Similar to {@link ScriptException#metadataToXContent(XContentBuilder, ToXContent.Params)} + * + * @param exception The ScriptException to convert + * @return A Protocol Buffer Struct containing the exception metadata + */ + public static Map metadataToProto(ScriptException exception) { + Map map = new HashMap<>(); + + map.put("script_stack", ObjectMapProtoUtils.toProto(exception.getScriptStack())); + map.put("script", ObjectMapProtoUtils.toProto(exception.getScript())); + map.put("lang", ObjectMapProtoUtils.toProto(exception.getLang())); + if (exception.getPos() != null) { + map = toProto(map, exception.getPos()); + } + return map; + } + + /** + * Converts a ScriptException.Position to Protocol Buffer format and adds it to the given builder. + * Similar to {@link ScriptException.Position#toXContent(XContentBuilder, ToXContent.Params)} + * + * @param map The map to add position information to + * @param pos The ScriptException.Position containing position details + * @return The updated map with position information added + */ + public static Map toProto(Map map, ScriptException.Position pos) { + // Create a map for position information + ObjectMap.Builder positionMapBuilder = ObjectMap.newBuilder(); + + // Add position fields + positionMapBuilder.putFields("offset", ObjectMapProtoUtils.toProto(pos.offset)); + positionMapBuilder.putFields("start", ObjectMapProtoUtils.toProto(pos.start)); + positionMapBuilder.putFields("end", ObjectMapProtoUtils.toProto(pos.end)); + + // Create a value with the position map + ObjectMap.Value positionValue = ObjectMap.Value.newBuilder().setObjectMap(positionMapBuilder.build()).build(); + + // Add the position value to the main map + map.put("position", positionValue); + + return map; + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/SearchParseExceptionProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/SearchParseExceptionProtoUtils.java new file mode 100644 index 0000000000000..c8ab8eb14e7d9 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/SearchParseExceptionProtoUtils.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.response.exceptions; + +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.plugin.transport.grpc.proto.response.common.ObjectMapProtoUtils; +import org.opensearch.protobufs.ObjectMap; +import org.opensearch.search.SearchParseException; + +import java.util.HashMap; +import java.util.Map; + +/** + * Utility class for converting SearchParseException objects to Protocol Buffers. + * This class specifically handles the conversion of SearchParseException instances + * to their Protocol Buffer representation, preserving metadata about search query + * parsing errors including line and column position information. + */ +public class SearchParseExceptionProtoUtils { + + private SearchParseExceptionProtoUtils() { + // Utility class, no instances + } + + /** + * Converts the metadata from a SearchParseException to a Protocol Buffer Struct. + * Similar to {@link SearchParseException#metadataToXContent(XContentBuilder, ToXContent.Params)} + * + * @param exception The SearchParseException to convert + * @return A Protocol Buffer Struct containing the exception metadata + */ + public static Map metadataToProto(SearchParseException exception) { + Map map = new HashMap<>(); + map.put("line", ObjectMapProtoUtils.toProto(exception.getLineNumber())); + map.put("col", ObjectMapProtoUtils.toProto(exception.getColumnNumber())); + return map; + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/SearchPhaseExecutionExceptionProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/SearchPhaseExecutionExceptionProtoUtils.java new file mode 100644 index 0000000000000..7ed36497cabc8 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/SearchPhaseExecutionExceptionProtoUtils.java @@ -0,0 +1,54 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.response.exceptions; + +import org.opensearch.ExceptionsHelper; +import org.opensearch.action.search.SearchPhaseExecutionException; +import org.opensearch.core.action.ShardOperationFailedException; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.plugin.transport.grpc.proto.response.common.ObjectMapProtoUtils; +import org.opensearch.protobufs.ObjectMap; + +import java.util.HashMap; +import java.util.Map; + +/** + * Utility class for converting SearchPhaseExecutionException objects to Protocol Buffers. + * This class specifically handles the conversion of SearchPhaseExecutionException instances + * to their Protocol Buffer representation, preserving metadata about search phase failures + * and associated shard operation failures. + */ +public class SearchPhaseExecutionExceptionProtoUtils { + + private SearchPhaseExecutionExceptionProtoUtils() { + // Utility class, no instances + } + + /** + * Converts the metadata from a SearchPhaseExecutionException to a Protocol Buffer Struct. + * Similar to {@link SearchPhaseExecutionException#metadataToXContent(XContentBuilder, ToXContent.Params)} * + * + * @param exception The SearchPhaseExecutionException to convert + * @return A Protocol Buffer Struct containing the exception metadata + */ + public static Map metadataToProto(SearchPhaseExecutionException exception) { + Map map = new HashMap<>(); + map.put("phase", ObjectMapProtoUtils.toProto(exception.getPhaseName())); + map.put("grouped", ObjectMapProtoUtils.toProto(true)); + + ObjectMap.ListValue.Builder listBuilder = ObjectMap.ListValue.newBuilder(); + ShardOperationFailedException[] failures = ExceptionsHelper.groupBy(exception.shardFailures()); + for (ShardOperationFailedException failure : failures) { + listBuilder.addValue(ShardOperationFailedExceptionProtoUtils.toProto(failure)); + } + map.put("failed_shards", ObjectMap.Value.newBuilder().setListValue(listBuilder.build()).build()); + + return map; + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/ShardOperationFailedExceptionProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/ShardOperationFailedExceptionProtoUtils.java new file mode 100644 index 0000000000000..929eb3b19d646 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/ShardOperationFailedExceptionProtoUtils.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.response.exceptions; + +import org.opensearch.core.action.ShardOperationFailedException; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.protobufs.ObjectMap; + +/** + * Utility class for converting ShardOperationFailedException objects to Protocol Buffers. + * This class specifically handles the conversion of ShardOperationFailedException instances + * to their Protocol Buffer representation, which represent failures that occur during + * operations on specific shards in an OpenSearch cluster. + */ +public class ShardOperationFailedExceptionProtoUtils { + + private ShardOperationFailedExceptionProtoUtils() { + // Utility class, no instances + } + + /** + * Converts a ShardOperationFailedException to a Protocol Buffer Value. + * This method is similar to {@link ShardOperationFailedException#toXContent(XContentBuilder, ToXContent.Params)} + * TODO why is ShardOperationFailedException#toXContent() empty? + * + * @param exception The ShardOperationFailedException to convert + * @return A Protocol Buffer Value representing the exception (currently empty) + */ + public static ObjectMap.Value toProto(ShardOperationFailedException exception) { + return ObjectMap.Value.newBuilder().build(); + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/TooManyBucketsExceptionProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/TooManyBucketsExceptionProtoUtils.java new file mode 100644 index 0000000000000..a89f45ea730f0 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/TooManyBucketsExceptionProtoUtils.java @@ -0,0 +1,43 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.response.exceptions; + +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.plugin.transport.grpc.proto.response.common.ObjectMapProtoUtils; +import org.opensearch.protobufs.ObjectMap; +import org.opensearch.search.aggregations.MultiBucketConsumerService; + +import java.util.HashMap; +import java.util.Map; + +/** + * Utility class for converting TooManyBucketsException objects to Protocol Buffers. + * This class specifically handles the conversion of MultiBucketConsumerService.TooManyBucketsException + * instances to their Protocol Buffer representation, preserving metadata about aggregation + * bucket limits that were exceeded during query execution. + */ +public class TooManyBucketsExceptionProtoUtils { + + private TooManyBucketsExceptionProtoUtils() { + // Utility class, no instances + } + + /** + * Converts the metadata from a TooManyBucketsException to a Protocol Buffer Struct. + * Similar to {@link MultiBucketConsumerService.TooManyBucketsException#metadataToXContent(XContentBuilder, ToXContent.Params)} * + * + * @param exception The TooManyBucketsException to convert + * @return A Protocol Buffer Struct containing the exception metadata + */ + public static Map metadataToProto(MultiBucketConsumerService.TooManyBucketsException exception) { + Map map = new HashMap<>(); + map.put("max_buckets", ObjectMapProtoUtils.toProto(exception.getMaxBuckets())); + return map; + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/package-info.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/package-info.java new file mode 100644 index 0000000000000..912d5de1052bf --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/package-info.java @@ -0,0 +1,17 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains utility classes for converting various OpenSearch exceptions to Protocol Buffer representations. + * Each utility class is specialized for a specific exception type and handles the conversion of that exception's + * metadata to Protocol Buffers, preserving the relevant information about the exception. + *

+ * These utilities are used by the gRPC transport plugin to convert OpenSearch exceptions to a format that can be + * transmitted over gRPC and properly interpreted by clients. + */ +package org.opensearch.plugin.transport.grpc.proto.response.exceptions; diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/services/DocumentServiceImpl.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/services/DocumentServiceImpl.java new file mode 100644 index 0000000000000..865a6b601e702 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/services/DocumentServiceImpl.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.services; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.plugin.transport.grpc.listeners.BulkRequestActionListener; +import org.opensearch.plugin.transport.grpc.proto.request.document.bulk.BulkRequestProtoUtils; +import org.opensearch.protobufs.services.DocumentServiceGrpc; +import org.opensearch.transport.client.Client; + +import io.grpc.stub.StreamObserver; + +/** + * Implementation of the gRPC Document Service. + */ +public class DocumentServiceImpl extends DocumentServiceGrpc.DocumentServiceImplBase { + private static final Logger logger = LogManager.getLogger(DocumentServiceImpl.class); + private final Client client; + + /** + * Creates a new DocumentServiceImpl. + * + * @param client Client for executing actions on the local node + */ + public DocumentServiceImpl(Client client) { + this.client = client; + } + + /** + * Processes a bulk request. + * + * @param request The bulk request to process + * @param responseObserver The observer to send the response back to the client + */ + @Override + public void bulk(org.opensearch.protobufs.BulkRequest request, StreamObserver responseObserver) { + try { + org.opensearch.action.bulk.BulkRequest bulkRequest = BulkRequestProtoUtils.prepareRequest(request); + BulkRequestActionListener listener = new BulkRequestActionListener(responseObserver); + client.bulk(bulkRequest, listener); + } catch (RuntimeException e) { + logger.error("DocumentServiceImpl failed to process bulk request, request=" + request + ", error=" + e.getMessage()); + responseObserver.onError(e); + } + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/services/package-info.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/services/package-info.java new file mode 100644 index 0000000000000..d2c586f629635 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/services/package-info.java @@ -0,0 +1,13 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * gRPC service implementations for the OpenSearch transport plugin. + * This package contains the service implementations that handle gRPC requests and convert them to OpenSearch actions. + */ +package org.opensearch.plugin.transport.grpc.services; diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/GrpcPlugin.java b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/GrpcPlugin.java deleted file mode 100644 index 7f02983010f98..0000000000000 --- a/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/GrpcPlugin.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ -package org.opensearch.transport.grpc; - -import org.opensearch.common.network.NetworkService; -import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.settings.Setting; -import org.opensearch.common.settings.Settings; -import org.opensearch.core.indices.breaker.CircuitBreakerService; -import org.opensearch.plugins.NetworkPlugin; -import org.opensearch.plugins.Plugin; -import org.opensearch.telemetry.tracing.Tracer; -import org.opensearch.threadpool.ThreadPool; - -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.function.Supplier; - -import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.GRPC_TRANSPORT_SETTING_KEY; -import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_BIND_HOST; -import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_HOST; -import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_PORT; -import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_PUBLISH_HOST; -import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_PUBLISH_PORT; -import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_WORKER_COUNT; - -/** - * Main class for the gRPC plugin. - */ -public final class GrpcPlugin extends Plugin implements NetworkPlugin { - - /** - * Creates a new GrpcPlugin instance. - */ - public GrpcPlugin() {} - - @Override - public Map> getAuxTransports( - Settings settings, - ThreadPool threadPool, - CircuitBreakerService circuitBreakerService, - NetworkService networkService, - ClusterSettings clusterSettings, - Tracer tracer - ) { - return Collections.singletonMap( - GRPC_TRANSPORT_SETTING_KEY, - () -> new Netty4GrpcServerTransport(settings, Collections.emptyList(), networkService) - ); - } - - @Override - public List> getSettings() { - return List.of( - SETTING_GRPC_PORT, - SETTING_GRPC_HOST, - SETTING_GRPC_PUBLISH_HOST, - SETTING_GRPC_BIND_HOST, - SETTING_GRPC_WORKER_COUNT, - SETTING_GRPC_PUBLISH_PORT - ); - } -} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/GrpcPluginTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/GrpcPluginTests.java new file mode 100644 index 0000000000000..974602bce3278 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/GrpcPluginTests.java @@ -0,0 +1,119 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc; + +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.indices.breaker.CircuitBreakerService; +import org.opensearch.plugins.NetworkPlugin; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.client.Client; +import org.junit.Before; + +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; + +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +import static org.opensearch.plugin.transport.grpc.Netty4GrpcServerTransport.GRPC_TRANSPORT_SETTING_KEY; +import static org.opensearch.plugin.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_BIND_HOST; +import static org.opensearch.plugin.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_HOST; +import static org.opensearch.plugin.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_PORT; +import static org.opensearch.plugin.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_PUBLISH_HOST; +import static org.opensearch.plugin.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_PUBLISH_PORT; +import static org.opensearch.plugin.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_WORKER_COUNT; + +public class GrpcPluginTests extends OpenSearchTestCase { + + private GrpcPlugin plugin; + + @Mock + private ThreadPool threadPool; + + @Mock + private CircuitBreakerService circuitBreakerService; + + @Mock + private Client client; + + private NetworkService networkService; + + private ClusterSettings clusterSettings; + + @Mock + private Tracer tracer; + + @Before + public void setup() { + MockitoAnnotations.openMocks(this); + // Use real instances instead of mocks for final classes + networkService = new NetworkService(List.of()); + + // Create a real ClusterSettings instance with the plugin's settings + plugin = new GrpcPlugin(); + + // Set the client in the plugin + plugin.createComponents( + client, + null, // ClusterService + null, // ThreadPool + null, // ResourceWatcherService + null, // ScriptService + null, // NamedXContentRegistry + null, // Environment + null, // NodeEnvironment + null, // NamedWriteableRegistry + null, // IndexNameExpressionResolver + null // Supplier + ); + + clusterSettings = new ClusterSettings(Settings.EMPTY, plugin.getSettings().stream().collect(java.util.stream.Collectors.toSet())); + } + + public void testGetSettings() { + List> settings = plugin.getSettings(); + + // Verify that all expected settings are returned + assertTrue("SETTING_GRPC_PORT should be included", settings.contains(SETTING_GRPC_PORT)); + assertTrue("SETTING_GRPC_HOST should be included", settings.contains(SETTING_GRPC_HOST)); + assertTrue("SETTING_GRPC_PUBLISH_HOST should be included", settings.contains(SETTING_GRPC_PUBLISH_HOST)); + assertTrue("SETTING_GRPC_BIND_HOST should be included", settings.contains(SETTING_GRPC_BIND_HOST)); + assertTrue("SETTING_GRPC_WORKER_COUNT should be included", settings.contains(SETTING_GRPC_WORKER_COUNT)); + assertTrue("SETTING_GRPC_PUBLISH_PORT should be included", settings.contains(SETTING_GRPC_PUBLISH_PORT)); + + // Verify the number of settings + assertEquals("Should return 6 settings", 6, settings.size()); + } + + public void testGetAuxTransports() { + Settings settings = Settings.builder().put(SETTING_GRPC_PORT.getKey(), "9200-9300").build(); + + Map> transports = plugin.getAuxTransports( + settings, + threadPool, + circuitBreakerService, + networkService, + clusterSettings, + tracer + ); + + // Verify that the transport map contains the expected key + assertTrue("Should contain GRPC_TRANSPORT_SETTING_KEY", transports.containsKey(GRPC_TRANSPORT_SETTING_KEY)); + + // Verify that the supplier returns a Netty4GrpcServerTransport instance + NetworkPlugin.AuxTransport transport = transports.get(GRPC_TRANSPORT_SETTING_KEY).get(); + assertTrue("Should return a Netty4GrpcServerTransport instance", transport instanceof Netty4GrpcServerTransport); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/Netty4GrpcServerTransportTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/Netty4GrpcServerTransportTests.java new file mode 100644 index 0000000000000..dcade2e8bf880 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/Netty4GrpcServerTransportTests.java @@ -0,0 +1,172 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc; + +import org.opensearch.common.network.InetAddresses; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.test.OpenSearchTestCase; +import org.hamcrest.MatcherAssert; +import org.junit.Before; + +import java.util.List; + +import io.grpc.BindableService; + +import static org.hamcrest.Matchers.emptyArray; +import static org.hamcrest.Matchers.not; + +public class Netty4GrpcServerTransportTests extends OpenSearchTestCase { + + private NetworkService networkService; + private List services; + + @Before + public void setup() { + networkService = new NetworkService(List.of()); + services = List.of(); + } + + public void testBasicStartAndStop() { + try (Netty4GrpcServerTransport transport = new Netty4GrpcServerTransport(createSettings(), services, networkService)) { + transport.start(); + + MatcherAssert.assertThat(transport.boundAddress().boundAddresses(), not(emptyArray())); + assertNotNull(transport.boundAddress().publishAddress().address()); + + transport.stop(); + } + } + + public void testWithCustomPort() { + // Create settings with a specific port + Settings settings = Settings.builder().put(Netty4GrpcServerTransport.SETTING_GRPC_PORT.getKey(), "9000-9010").build(); + + try (Netty4GrpcServerTransport transport = new Netty4GrpcServerTransport(settings, services, networkService)) { + transport.start(); + + MatcherAssert.assertThat(transport.boundAddress().boundAddresses(), not(emptyArray())); + TransportAddress publishAddress = transport.boundAddress().publishAddress(); + assertNotNull(publishAddress.address()); + assertTrue("Port should be in the specified range", publishAddress.getPort() >= 9000 && publishAddress.getPort() <= 9010); + + transport.stop(); + } + } + + public void testWithCustomPublishPort() { + // Create settings with a specific publish port + Settings settings = Settings.builder() + .put(Netty4GrpcServerTransport.SETTING_GRPC_PORT.getKey(), OpenSearchTestCase.getPortRange()) + .put(Netty4GrpcServerTransport.SETTING_GRPC_PUBLISH_PORT.getKey(), 9000) + .build(); + + try (Netty4GrpcServerTransport transport = new Netty4GrpcServerTransport(settings, services, networkService)) { + transport.start(); + + MatcherAssert.assertThat(transport.boundAddress().boundAddresses(), not(emptyArray())); + TransportAddress publishAddress = transport.boundAddress().publishAddress(); + assertNotNull(publishAddress.address()); + assertEquals("Publish port should match the specified value", 9000, publishAddress.getPort()); + + transport.stop(); + } + } + + public void testWithCustomHost() { + // Create settings with a specific host + Settings settings = Settings.builder() + .put(Netty4GrpcServerTransport.SETTING_GRPC_PORT.getKey(), OpenSearchTestCase.getPortRange()) + .put(Netty4GrpcServerTransport.SETTING_GRPC_HOST.getKey(), "127.0.0.1") + .build(); + + try (Netty4GrpcServerTransport transport = new Netty4GrpcServerTransport(settings, services, networkService)) { + transport.start(); + + MatcherAssert.assertThat(transport.boundAddress().boundAddresses(), not(emptyArray())); + TransportAddress publishAddress = transport.boundAddress().publishAddress(); + assertNotNull(publishAddress.address()); + assertEquals( + "Host should match the specified value", + "127.0.0.1", + InetAddresses.toAddrString(publishAddress.address().getAddress()) + ); + + transport.stop(); + } + } + + public void testWithCustomBindHost() { + // Create settings with a specific bind host + Settings settings = Settings.builder() + .put(Netty4GrpcServerTransport.SETTING_GRPC_PORT.getKey(), OpenSearchTestCase.getPortRange()) + .put(Netty4GrpcServerTransport.SETTING_GRPC_BIND_HOST.getKey(), "127.0.0.1") + .build(); + + try (Netty4GrpcServerTransport transport = new Netty4GrpcServerTransport(settings, services, networkService)) { + transport.start(); + + MatcherAssert.assertThat(transport.boundAddress().boundAddresses(), not(emptyArray())); + TransportAddress boundAddress = transport.boundAddress().boundAddresses()[0]; + assertNotNull(boundAddress.address()); + assertEquals( + "Bind host should match the specified value", + "127.0.0.1", + InetAddresses.toAddrString(boundAddress.address().getAddress()) + ); + + transport.stop(); + } + } + + public void testWithCustomPublishHost() { + // Create settings with a specific publish host + Settings settings = Settings.builder() + .put(Netty4GrpcServerTransport.SETTING_GRPC_PORT.getKey(), OpenSearchTestCase.getPortRange()) + .put(Netty4GrpcServerTransport.SETTING_GRPC_PUBLISH_HOST.getKey(), "127.0.0.1") + .build(); + + try (Netty4GrpcServerTransport transport = new Netty4GrpcServerTransport(settings, services, networkService)) { + transport.start(); + + MatcherAssert.assertThat(transport.boundAddress().boundAddresses(), not(emptyArray())); + TransportAddress publishAddress = transport.boundAddress().publishAddress(); + assertNotNull(publishAddress.address()); + assertEquals( + "Publish host should match the specified value", + "127.0.0.1", + InetAddresses.toAddrString(publishAddress.address().getAddress()) + ); + + transport.stop(); + } + } + + public void testWithCustomWorkerCount() { + // Create settings with a specific worker count + Settings settings = Settings.builder() + .put(Netty4GrpcServerTransport.SETTING_GRPC_PORT.getKey(), OpenSearchTestCase.getPortRange()) + .put(Netty4GrpcServerTransport.SETTING_GRPC_WORKER_COUNT.getKey(), 4) + .build(); + + try (Netty4GrpcServerTransport transport = new Netty4GrpcServerTransport(settings, services, networkService)) { + transport.start(); + + MatcherAssert.assertThat(transport.boundAddress().boundAddresses(), not(emptyArray())); + assertNotNull(transport.boundAddress().publishAddress().address()); + + transport.stop(); + } + } + + private static Settings createSettings() { + return Settings.builder().put(Netty4GrpcServerTransport.SETTING_GRPC_PORT.getKey(), OpenSearchTestCase.getPortRange()).build(); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/listeners/BulkRequestActionListenerTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/listeners/BulkRequestActionListenerTests.java new file mode 100644 index 0000000000000..9a6fbd21d7224 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/listeners/BulkRequestActionListenerTests.java @@ -0,0 +1,84 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.listeners; + +import org.opensearch.action.DocWriteRequest; +import org.opensearch.action.bulk.BulkItemResponse; +import org.opensearch.action.bulk.BulkResponse; +import org.opensearch.action.index.IndexResponse; +import org.opensearch.action.support.replication.ReplicationResponse; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +import io.grpc.stub.StreamObserver; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.verify; + +public class BulkRequestActionListenerTests extends OpenSearchTestCase { + + @Mock + private StreamObserver responseObserver; + + private BulkRequestActionListener listener; + + @Override + public void setUp() throws Exception { + super.setUp(); + MockitoAnnotations.openMocks(this); + listener = new BulkRequestActionListener(responseObserver); + } + + public void testOnResponseWithSuccessfulResponse() { + // Create a successful BulkResponse + BulkItemResponse[] responses = new BulkItemResponse[1]; + Index index = new Index("test-index", "_na_"); + ShardId shardId = new ShardId(index, 1); + IndexResponse indexResponse = new IndexResponse(shardId, "test-id", 1, 1, 1, true); + ReplicationResponse.ShardInfo shardInfo = new ReplicationResponse.ShardInfo(); + indexResponse.setShardInfo(shardInfo); + responses[0] = new BulkItemResponse(0, DocWriteRequest.OpType.INDEX, indexResponse); + + BulkResponse bulkResponse = new BulkResponse(responses, 100); + + // Call onResponse + listener.onResponse(bulkResponse); + + // Verify that onNext and onCompleted were called + verify(responseObserver).onNext(any(org.opensearch.protobufs.BulkResponse.class)); + verify(responseObserver).onCompleted(); + } + + public void testOnResponseWithException() { + // Create a mock BulkResponse that will cause an exception when converted to proto + BulkResponse bulkResponse = null; + + // Call onResponse + listener.onResponse(bulkResponse); + + // Verify that onError was called + verify(responseObserver).onError(any(Throwable.class)); + } + + public void testOnFailure() { + // Create an exception + Exception exception = new IOException("Test exception"); + + // Call onFailure + listener.onFailure(exception); + + // Verify that onError was called + verify(responseObserver).onError(any(Throwable.class)); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/common/FetchSourceContextProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/common/FetchSourceContextProtoUtilsTests.java new file mode 100644 index 0000000000000..1fc4d26101eab --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/common/FetchSourceContextProtoUtilsTests.java @@ -0,0 +1,180 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.common; + +import org.opensearch.core.common.Strings; +import org.opensearch.protobufs.BulkRequest; +import org.opensearch.protobufs.SourceConfig; +import org.opensearch.protobufs.SourceConfigParam; +import org.opensearch.protobufs.SourceFilter; +import org.opensearch.protobufs.StringArray; +import org.opensearch.search.fetch.subphase.FetchSourceContext; +import org.opensearch.test.OpenSearchTestCase; + +public class FetchSourceContextProtoUtilsTests extends OpenSearchTestCase { + + public void testParseFromProtoRequestWithBoolValue() { + // Create a BulkRequest with source as boolean + BulkRequest request = BulkRequest.newBuilder().setSource(SourceConfigParam.newBuilder().setBoolValue(true).build()).build(); + + // Parse the request + FetchSourceContext context = FetchSourceContextProtoUtils.parseFromProtoRequest(request); + + // Verify the result + assertNotNull("Context should not be null", context); + assertTrue("fetchSource should be true", context.fetchSource()); + assertArrayEquals("includes should be empty", Strings.EMPTY_ARRAY, context.includes()); + assertArrayEquals("excludes should be empty", Strings.EMPTY_ARRAY, context.excludes()); + } + + public void testParseFromProtoRequestWithStringArray() { + // Create a BulkRequest with source as string array + BulkRequest request = BulkRequest.newBuilder() + .setSource( + SourceConfigParam.newBuilder() + .setStringArray(StringArray.newBuilder().addStringArray("field1").addStringArray("field2").build()) + .build() + ) + .build(); + + // Parse the request + FetchSourceContext context = FetchSourceContextProtoUtils.parseFromProtoRequest(request); + + // Verify the result + assertNotNull("Context should not be null", context); + assertTrue("fetchSource should be true", context.fetchSource()); + assertArrayEquals("includes should match", new String[] { "field1", "field2" }, context.includes()); + assertArrayEquals("excludes should be empty", Strings.EMPTY_ARRAY, context.excludes()); + } + + public void testParseFromProtoRequestWithSourceIncludes() { + // Create a BulkRequest with source includes + BulkRequest request = BulkRequest.newBuilder().addSourceIncludes("field1").addSourceIncludes("field2").build(); + + // Parse the request + FetchSourceContext context = FetchSourceContextProtoUtils.parseFromProtoRequest(request); + + // Verify the result + assertNotNull("Context should not be null", context); + assertTrue("fetchSource should be true", context.fetchSource()); + assertArrayEquals("includes should match", new String[] { "field1", "field2" }, context.includes()); + assertArrayEquals("excludes should be empty", Strings.EMPTY_ARRAY, context.excludes()); + } + + public void testParseFromProtoRequestWithSourceExcludes() { + // Create a BulkRequest with source excludes + BulkRequest request = BulkRequest.newBuilder().addSourceExcludes("field1").addSourceExcludes("field2").build(); + + // Parse the request + FetchSourceContext context = FetchSourceContextProtoUtils.parseFromProtoRequest(request); + + // Verify the result + assertNotNull("Context should not be null", context); + assertTrue("fetchSource should be true", context.fetchSource()); + assertArrayEquals("includes should be empty", Strings.EMPTY_ARRAY, context.includes()); + assertArrayEquals("excludes should match", new String[] { "field1", "field2" }, context.excludes()); + } + + public void testParseFromProtoRequestWithBothIncludesAndExcludes() { + // Create a BulkRequest with both source includes and excludes + BulkRequest request = BulkRequest.newBuilder() + .addSourceIncludes("include1") + .addSourceIncludes("include2") + .addSourceExcludes("exclude1") + .addSourceExcludes("exclude2") + .build(); + + // Parse the request + FetchSourceContext context = FetchSourceContextProtoUtils.parseFromProtoRequest(request); + + // Verify the result + assertNotNull("Context should not be null", context); + assertTrue("fetchSource should be true", context.fetchSource()); + assertArrayEquals("includes should match", new String[] { "include1", "include2" }, context.includes()); + assertArrayEquals("excludes should match", new String[] { "exclude1", "exclude2" }, context.excludes()); + } + + public void testParseFromProtoRequestWithNoSourceParams() { + // Create a BulkRequest with no source parameters + BulkRequest request = BulkRequest.newBuilder().build(); + + // Parse the request + FetchSourceContext context = FetchSourceContextProtoUtils.parseFromProtoRequest(request); + + // Verify the result + // The implementation returns a default FetchSourceContext with fetchSource=true + // and empty includes/excludes arrays when no source parameters are provided + assertNotNull("Context should not be null", context); + assertTrue("fetchSource should be true", context.fetchSource()); + assertArrayEquals("includes should be empty", Strings.EMPTY_ARRAY, context.includes()); + assertArrayEquals("excludes should be empty", Strings.EMPTY_ARRAY, context.excludes()); + } + + public void testFromProtoWithFetch() { + // Create a SourceConfig with fetch=true + SourceConfig sourceConfig = SourceConfig.newBuilder().setFetch(true).build(); + + // Convert to FetchSourceContext + FetchSourceContext context = FetchSourceContextProtoUtils.fromProto(sourceConfig); + + // Verify the result + assertNotNull("Context should not be null", context); + assertTrue("fetchSource should be true", context.fetchSource()); + assertArrayEquals("includes should be empty", Strings.EMPTY_ARRAY, context.includes()); + assertArrayEquals("excludes should be empty", Strings.EMPTY_ARRAY, context.excludes()); + } + + public void testFromProtoWithIncludes() { + // Create a SourceConfig with includes + SourceConfig sourceConfig = SourceConfig.newBuilder() + .setIncludes(StringArray.newBuilder().addStringArray("field1").addStringArray("field2").build()) + .build(); + + // Convert to FetchSourceContext + FetchSourceContext context = FetchSourceContextProtoUtils.fromProto(sourceConfig); + + // Verify the result + assertNotNull("Context should not be null", context); + assertTrue("fetchSource should be true", context.fetchSource()); + assertArrayEquals("includes should match", new String[] { "field1", "field2" }, context.includes()); + assertArrayEquals("excludes should be empty", Strings.EMPTY_ARRAY, context.excludes()); + } + + public void testFromProtoWithFilterIncludes() { + // Create a SourceConfig with filter includes + SourceConfig sourceConfig = SourceConfig.newBuilder() + .setFilter(SourceFilter.newBuilder().addIncludes("field1").addIncludes("field2").build()) + .build(); + + // Convert to FetchSourceContext + FetchSourceContext context = FetchSourceContextProtoUtils.fromProto(sourceConfig); + + // Verify the result + assertNotNull("Context should not be null", context); + assertTrue("fetchSource should be true", context.fetchSource()); + assertArrayEquals("includes should match", new String[] { "field1", "field2" }, context.includes()); + assertArrayEquals("excludes should be empty", Strings.EMPTY_ARRAY, context.excludes()); + } + + public void testFromProtoWithFilterExcludes() { + // Create a SourceConfig with filter excludes + SourceConfig sourceConfig = SourceConfig.newBuilder() + .setFilter(SourceFilter.newBuilder().addExcludes("field1").addExcludes("field2").build()) + .build(); + + // Convert to FetchSourceContext + FetchSourceContext context = FetchSourceContextProtoUtils.fromProto(sourceConfig); + + // Verify the result + assertNotNull("Context should not be null", context); + assertTrue("fetchSource should be true", context.fetchSource()); + assertArrayEquals("includes should be empty", Strings.EMPTY_ARRAY, context.includes()); + assertArrayEquals("excludes should match", new String[] { "field1", "field2" }, context.excludes()); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/common/ObjectMapProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/common/ObjectMapProtoUtilsTests.java new file mode 100644 index 0000000000000..23649647996bc --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/common/ObjectMapProtoUtilsTests.java @@ -0,0 +1,187 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.common; + +import org.opensearch.protobufs.NullValue; +import org.opensearch.protobufs.ObjectMap; +import org.opensearch.protobufs.ObjectMap.ListValue; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.List; +import java.util.Map; + +public class ObjectMapProtoUtilsTests extends OpenSearchTestCase { + + public void testFromProtoWithEmptyMap() { + // Create an empty ObjectMap + ObjectMap objectMap = ObjectMap.newBuilder().build(); + + // Convert to Java Map + Map map = ObjectMapProtoUtils.fromProto(objectMap); + + // Verify the result + assertNotNull("Map should not be null", map); + assertTrue("Map should be empty", map.isEmpty()); + } + + public void testFromProtoWithStringValue() { + // Create an ObjectMap with a string value + ObjectMap objectMap = ObjectMap.newBuilder().putFields("key", ObjectMap.Value.newBuilder().setString("value").build()).build(); + + // Convert to Java Map + Map map = ObjectMapProtoUtils.fromProto(objectMap); + + // Verify the result + assertNotNull("Map should not be null", map); + assertEquals("Map should have 1 entry", 1, map.size()); + assertTrue("Map should contain the key", map.containsKey("key")); + assertEquals("Value should be a string", "value", map.get("key")); + } + + public void testFromProtoWithBooleanValue() { + // Create an ObjectMap with a boolean value + ObjectMap objectMap = ObjectMap.newBuilder().putFields("key", ObjectMap.Value.newBuilder().setBool(true).build()).build(); + + // Convert to Java Map + Map map = ObjectMapProtoUtils.fromProto(objectMap); + + // Verify the result + assertNotNull("Map should not be null", map); + assertEquals("Map should have 1 entry", 1, map.size()); + assertTrue("Map should contain the key", map.containsKey("key")); + assertEquals("Value should be a boolean", true, map.get("key")); + } + + public void testFromProtoWithDoubleValue() { + // Create an ObjectMap with a double value + double value = 123.456; + ObjectMap objectMap = ObjectMap.newBuilder().putFields("key", ObjectMap.Value.newBuilder().setDouble(value).build()).build(); + + // Convert to Java Map + Map map = ObjectMapProtoUtils.fromProto(objectMap); + + // Verify the result + assertNotNull("Map should not be null", map); + assertEquals("Map should have 1 entry", 1, map.size()); + assertTrue("Map should contain the key", map.containsKey("key")); + assertEquals("Value should be a double", value, map.get("key")); + } + + public void testFromProtoWithFloatValue() { + // Create an ObjectMap with a float value + float value = 123.456f; + ObjectMap objectMap = ObjectMap.newBuilder().putFields("key", ObjectMap.Value.newBuilder().setFloat(value).build()).build(); + + // Convert to Java Map + Map map = ObjectMapProtoUtils.fromProto(objectMap); + + // Verify the result + assertNotNull("Map should not be null", map); + assertEquals("Map should have 1 entry", 1, map.size()); + assertTrue("Map should contain the key", map.containsKey("key")); + assertEquals("Value should be a float", value, map.get("key")); + } + + public void testFromProtoWithInt32Value() { + // Create an ObjectMap with an int32 value + int value = 123; + ObjectMap objectMap = ObjectMap.newBuilder().putFields("key", ObjectMap.Value.newBuilder().setInt32(value).build()).build(); + + // Convert to Java Map + Map map = ObjectMapProtoUtils.fromProto(objectMap); + + // Verify the result + assertNotNull("Map should not be null", map); + assertEquals("Map should have 1 entry", 1, map.size()); + assertTrue("Map should contain the key", map.containsKey("key")); + assertEquals("Value should be an int32", value, map.get("key")); + } + + public void testFromProtoWithInt64Value() { + // Create an ObjectMap with an int64 value + long value = 123456789L; + ObjectMap objectMap = ObjectMap.newBuilder().putFields("key", ObjectMap.Value.newBuilder().setInt64(value).build()).build(); + + // Convert to Java Map + Map map = ObjectMapProtoUtils.fromProto(objectMap); + + // Verify the result + assertNotNull("Map should not be null", map); + assertEquals("Map should have 1 entry", 1, map.size()); + assertTrue("Map should contain the key", map.containsKey("key")); + assertEquals("Value should be an int64", value, map.get("key")); + } + + public void testFromProtoWithListValue() { + // Create an ObjectMap with a list value + ListValue listValue = ListValue.newBuilder() + .addValue(ObjectMap.Value.newBuilder().setString("value1").build()) + .addValue(ObjectMap.Value.newBuilder().setInt32(123).build()) + .addValue(ObjectMap.Value.newBuilder().setBool(true).build()) + .build(); + + ObjectMap objectMap = ObjectMap.newBuilder().putFields("key", ObjectMap.Value.newBuilder().setListValue(listValue).build()).build(); + + // Convert to Java Map + Map map = ObjectMapProtoUtils.fromProto(objectMap); + + // Verify the result + assertNotNull("Map should not be null", map); + assertEquals("Map should have 1 entry", 1, map.size()); + assertTrue("Map should contain the key", map.containsKey("key")); + assertTrue("Value should be a List", map.get("key") instanceof List); + + List list = (List) map.get("key"); + assertEquals("List should have 3 elements", 3, list.size()); + assertEquals("First element should be a string", "value1", list.get(0)); + assertEquals("Second element should be an int", 123, list.get(1)); + assertEquals("Third element should be a boolean", true, list.get(2)); + } + + public void testFromProtoWithNestedObjectMap() { + // Create a nested ObjectMap + ObjectMap nestedMap = ObjectMap.newBuilder() + .putFields("nestedKey", ObjectMap.Value.newBuilder().setString("nestedValue").build()) + .build(); + + ObjectMap objectMap = ObjectMap.newBuilder().putFields("key", ObjectMap.Value.newBuilder().setObjectMap(nestedMap).build()).build(); + + // Convert to Java Map + Map map = ObjectMapProtoUtils.fromProto(objectMap); + + // Verify the result + assertNotNull("Map should not be null", map); + assertEquals("Map should have 1 entry", 1, map.size()); + assertTrue("Map should contain the key", map.containsKey("key")); + assertTrue("Value should be a Map", map.get("key") instanceof Map); + + Map nested = (Map) map.get("key"); + assertEquals("Nested map should have 1 entry", 1, nested.size()); + assertTrue("Nested map should contain the key", nested.containsKey("nestedKey")); + assertEquals("Nested value should be a string", "nestedValue", nested.get("nestedKey")); + } + + public void testFromProtoWithNullValueThrowsException() { + // Create an ObjectMap with a null value + ObjectMap objectMap = ObjectMap.newBuilder() + .putFields("key", ObjectMap.Value.newBuilder().setNullValue(NullValue.NULL_VALUE_NULL).build()) + .build(); + + // Attempt to convert to Java Map, should throw UnsupportedOperationException + expectThrows(UnsupportedOperationException.class, () -> ObjectMapProtoUtils.fromProto(objectMap)); + } + + public void testFromProtoWithInvalidValueTypeThrowsException() { + // Create an ObjectMap with an unset value type + ObjectMap objectMap = ObjectMap.newBuilder().putFields("key", ObjectMap.Value.newBuilder().build()).build(); + + // Attempt to convert to Java Map, should throw IllegalArgumentException + expectThrows(IllegalArgumentException.class, () -> ObjectMapProtoUtils.fromProto(objectMap)); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/common/ScriptProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/common/ScriptProtoUtilsTests.java new file mode 100644 index 0000000000000..cc5be18ea9942 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/common/ScriptProtoUtilsTests.java @@ -0,0 +1,273 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.common; + +import org.opensearch.protobufs.InlineScript; +import org.opensearch.protobufs.ObjectMap; +import org.opensearch.protobufs.ScriptLanguage; +import org.opensearch.protobufs.ScriptLanguage.BuiltinScriptLanguage; +import org.opensearch.protobufs.StoredScriptId; +import org.opensearch.script.Script; +import org.opensearch.script.ScriptType; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.HashMap; +import java.util.Map; + +public class ScriptProtoUtilsTests extends OpenSearchTestCase { + + public void testParseFromProtoRequestWithInlineScript() { + // Create a protobuf Script with an inline script + org.opensearch.protobufs.Script protoScript = org.opensearch.protobufs.Script.newBuilder() + .setInlineScript( + InlineScript.newBuilder() + .setSource("doc['field'].value * 2") + .setLang(ScriptLanguage.newBuilder().setBuiltinScriptLanguage(BuiltinScriptLanguage.BUILTIN_SCRIPT_LANGUAGE_PAINLESS)) + .build() + ) + .build(); + + // Parse the protobuf Script + Script script = ScriptProtoUtils.parseFromProtoRequest(protoScript); + + // Verify the result + assertNotNull("Script should not be null", script); + assertEquals("Script type should be INLINE", ScriptType.INLINE, script.getType()); + assertEquals("Script language should be painless", "painless", script.getLang()); + assertEquals("Script source should match", "doc['field'].value * 2", script.getIdOrCode()); + assertTrue("Script params should be empty", script.getParams().isEmpty()); + } + + public void testParseFromProtoRequestWithInlineScriptAndCustomLanguage() { + // Create a protobuf Script with an inline script and custom language + org.opensearch.protobufs.Script protoScript = org.opensearch.protobufs.Script.newBuilder() + .setInlineScript( + InlineScript.newBuilder() + .setSource("doc['field'].value * 2") + .setLang(ScriptLanguage.newBuilder().setStringValue("custom_lang")) + .build() + ) + .build(); + + // Parse the protobuf Script + Script script = ScriptProtoUtils.parseFromProtoRequest(protoScript); + + // Verify the result + assertNotNull("Script should not be null", script); + assertEquals("Script type should be INLINE", ScriptType.INLINE, script.getType()); + assertEquals("Script language should be custom_lang", "custom_lang", script.getLang()); + assertEquals("Script source should match", "doc['field'].value * 2", script.getIdOrCode()); + assertTrue("Script params should be empty", script.getParams().isEmpty()); + } + + public void testParseFromProtoRequestWithInlineScriptAndParams() { + // Create a protobuf Script with an inline script and parameters + ObjectMap params = ObjectMap.newBuilder() + .putFields("factor", ObjectMap.Value.newBuilder().setDouble(2.5).build()) + .putFields("name", ObjectMap.Value.newBuilder().setString("test").build()) + .build(); + + org.opensearch.protobufs.Script protoScript = org.opensearch.protobufs.Script.newBuilder() + .setInlineScript( + InlineScript.newBuilder() + .setSource("doc['field'].value * params.factor") + .setLang(ScriptLanguage.newBuilder().setBuiltinScriptLanguage(BuiltinScriptLanguage.BUILTIN_SCRIPT_LANGUAGE_PAINLESS)) + .setParams(params) + .build() + ) + .build(); + + // Parse the protobuf Script + Script script = ScriptProtoUtils.parseFromProtoRequest(protoScript); + + // Verify the result + assertNotNull("Script should not be null", script); + assertEquals("Script type should be INLINE", ScriptType.INLINE, script.getType()); + assertEquals("Script language should be painless", "painless", script.getLang()); + assertEquals("Script source should match", "doc['field'].value * params.factor", script.getIdOrCode()); + assertEquals("Script params should have 2 entries", 2, script.getParams().size()); + assertEquals("Script param 'factor' should be 2.5", 2.5, script.getParams().get("factor")); + assertEquals("Script param 'name' should be 'test'", "test", script.getParams().get("name")); + } + + public void testParseFromProtoRequestWithInlineScriptAndOptions() { + // Create a protobuf Script with an inline script and options + Map options = new HashMap<>(); + options.put("content_type", "application/json"); + + org.opensearch.protobufs.Script protoScript = org.opensearch.protobufs.Script.newBuilder() + .setInlineScript( + InlineScript.newBuilder() + .setSource("doc['field'].value * 2") + .setLang(ScriptLanguage.newBuilder().setBuiltinScriptLanguage(BuiltinScriptLanguage.BUILTIN_SCRIPT_LANGUAGE_PAINLESS)) + .putAllOptions(options) + .build() + ) + .build(); + + // Parse the protobuf Script + Script script = ScriptProtoUtils.parseFromProtoRequest(protoScript); + + // Verify the result + assertNotNull("Script should not be null", script); + assertEquals("Script type should be INLINE", ScriptType.INLINE, script.getType()); + assertEquals("Script language should be painless", "painless", script.getLang()); + assertEquals("Script source should match", "doc['field'].value * 2", script.getIdOrCode()); + assertEquals("Script options should have 1 entry", 1, script.getOptions().size()); + assertEquals( + "Script option 'content_type' should be 'application/json'", + "application/json", + script.getOptions().get("content_type") + ); + } + + public void testParseFromProtoRequestWithInlineScriptAndInvalidOptions() { + // Create a protobuf Script with an inline script and invalid options + Map options = new HashMap<>(); + options.put("content_type", "application/json"); + options.put("invalid_option", "value"); + + org.opensearch.protobufs.Script protoScript = org.opensearch.protobufs.Script.newBuilder() + .setInlineScript( + InlineScript.newBuilder() + .setSource("doc['field'].value * 2") + .setLang(ScriptLanguage.newBuilder().setBuiltinScriptLanguage(BuiltinScriptLanguage.BUILTIN_SCRIPT_LANGUAGE_PAINLESS)) + .putAllOptions(options) + .build() + ) + .build(); + + // Parse the protobuf Script, should throw IllegalArgumentException + expectThrows(IllegalArgumentException.class, () -> ScriptProtoUtils.parseFromProtoRequest(protoScript)); + } + + public void testParseFromProtoRequestWithStoredScript() { + // Create a protobuf Script with a stored script + org.opensearch.protobufs.Script protoScript = org.opensearch.protobufs.Script.newBuilder() + .setStoredScriptId(StoredScriptId.newBuilder().setId("my-stored-script").build()) + .build(); + + // Parse the protobuf Script + Script script = ScriptProtoUtils.parseFromProtoRequest(protoScript); + + // Verify the result + assertNotNull("Script should not be null", script); + assertEquals("Script type should be STORED", ScriptType.STORED, script.getType()); + assertNull("Script language should be null for stored scripts", script.getLang()); + assertEquals("Script id should match", "my-stored-script", script.getIdOrCode()); + assertTrue("Script params should be empty", script.getParams().isEmpty()); + assertNull("Script options should be null for stored scripts", script.getOptions()); + } + + public void testParseFromProtoRequestWithStoredScriptAndParams() { + // Create a protobuf Script with a stored script and parameters + ObjectMap params = ObjectMap.newBuilder() + .putFields("factor", ObjectMap.Value.newBuilder().setDouble(2.5).build()) + .putFields("name", ObjectMap.Value.newBuilder().setString("test").build()) + .build(); + + org.opensearch.protobufs.Script protoScript = org.opensearch.protobufs.Script.newBuilder() + .setStoredScriptId(StoredScriptId.newBuilder().setId("my-stored-script").setParams(params).build()) + .build(); + + // Parse the protobuf Script + Script script = ScriptProtoUtils.parseFromProtoRequest(protoScript); + + // Verify the result + assertNotNull("Script should not be null", script); + assertEquals("Script type should be STORED", ScriptType.STORED, script.getType()); + assertNull("Script language should be null for stored scripts", script.getLang()); + assertEquals("Script id should match", "my-stored-script", script.getIdOrCode()); + assertEquals("Script params should have 2 entries", 2, script.getParams().size()); + assertEquals("Script param 'factor' should be 2.5", 2.5, script.getParams().get("factor")); + assertEquals("Script param 'name' should be 'test'", "test", script.getParams().get("name")); + } + + public void testParseFromProtoRequestWithNoScriptType() { + // Create a protobuf Script with no script type + org.opensearch.protobufs.Script protoScript = org.opensearch.protobufs.Script.newBuilder().build(); + + // Parse the protobuf Script, should throw UnsupportedOperationException + expectThrows(UnsupportedOperationException.class, () -> ScriptProtoUtils.parseFromProtoRequest(protoScript)); + } + + public void testParseScriptLanguageWithExpressionLanguage() { + // Create a protobuf Script with expression language + org.opensearch.protobufs.Script protoScript = org.opensearch.protobufs.Script.newBuilder() + .setInlineScript( + InlineScript.newBuilder() + .setSource("doc['field'].value * 2") + .setLang(ScriptLanguage.newBuilder().setBuiltinScriptLanguage(BuiltinScriptLanguage.BUILTIN_SCRIPT_LANGUAGE_EXPRESSION)) + .build() + ) + .build(); + + // Parse the protobuf Script + Script script = ScriptProtoUtils.parseFromProtoRequest(protoScript); + + // Verify the result + assertNotNull("Script should not be null", script); + assertEquals("Script language should be expression", "expression", script.getLang()); + } + + public void testParseScriptLanguageWithJavaLanguage() { + // Create a protobuf Script with java language + org.opensearch.protobufs.Script protoScript = org.opensearch.protobufs.Script.newBuilder() + .setInlineScript( + InlineScript.newBuilder() + .setSource("doc['field'].value * 2") + .setLang(ScriptLanguage.newBuilder().setBuiltinScriptLanguage(BuiltinScriptLanguage.BUILTIN_SCRIPT_LANGUAGE_JAVA)) + .build() + ) + .build(); + + // Parse the protobuf Script + Script script = ScriptProtoUtils.parseFromProtoRequest(protoScript); + + // Verify the result + assertNotNull("Script should not be null", script); + assertEquals("Script language should be java", "java", script.getLang()); + } + + public void testParseScriptLanguageWithMustacheLanguage() { + // Create a protobuf Script with mustache language + org.opensearch.protobufs.Script protoScript = org.opensearch.protobufs.Script.newBuilder() + .setInlineScript( + InlineScript.newBuilder() + .setSource("doc['field'].value * 2") + .setLang(ScriptLanguage.newBuilder().setBuiltinScriptLanguage(BuiltinScriptLanguage.BUILTIN_SCRIPT_LANGUAGE_MUSTACHE)) + .build() + ) + .build(); + + // Parse the protobuf Script + Script script = ScriptProtoUtils.parseFromProtoRequest(protoScript); + + // Verify the result + assertNotNull("Script should not be null", script); + assertEquals("Script language should be mustache", "mustache", script.getLang()); + } + + public void testParseScriptLanguageWithUnspecifiedLanguage() { + // Create a protobuf Script with unspecified language + org.opensearch.protobufs.Script protoScript = org.opensearch.protobufs.Script.newBuilder() + .setInlineScript( + InlineScript.newBuilder() + .setSource("doc['field'].value * 2") + .setLang( + ScriptLanguage.newBuilder().setBuiltinScriptLanguage(BuiltinScriptLanguage.BUILTIN_SCRIPT_LANGUAGE_UNSPECIFIED) + ) + .build() + ) + .build(); + + // Parse the protobuf Script, should throw UnsupportedOperationException + expectThrows(UnsupportedOperationException.class, () -> ScriptProtoUtils.parseFromProtoRequest(protoScript)); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/ActiveShardCountProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/ActiveShardCountProtoUtilsTests.java new file mode 100644 index 0000000000000..b0e5c9c28d40e --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/ActiveShardCountProtoUtilsTests.java @@ -0,0 +1,122 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.document.bulk; + +import org.opensearch.action.bulk.BulkRequest; +import org.opensearch.action.support.ActiveShardCount; +import org.opensearch.protobufs.WaitForActiveShards; +import org.opensearch.test.OpenSearchTestCase; + +import static org.opensearch.plugin.transport.grpc.proto.request.document.bulk.ActiveShardCountProtoUtils.getActiveShardCount; + +public class ActiveShardCountProtoUtilsTests extends OpenSearchTestCase { + + public void testGetActiveShardCountWithNoWaitForActiveShards() { + // Create a BulkRequest + BulkRequest bulkRequest = new BulkRequest(); + + // Create a protobuf BulkRequest with no wait_for_active_shards + org.opensearch.protobufs.BulkRequest protoRequest = org.opensearch.protobufs.BulkRequest.newBuilder().build(); + + BulkRequest result = getActiveShardCount(bulkRequest, protoRequest); + + // Verify the result + assertSame("Should return the same BulkRequest instance", bulkRequest, result); + assertEquals("Should have default active shard count", ActiveShardCount.DEFAULT, result.waitForActiveShards()); + } + + public void testGetActiveShardCountWithWaitForActiveShardsAll() { + // Create a BulkRequest + BulkRequest bulkRequest = new BulkRequest(); + + // Create a protobuf BulkRequest with wait_for_active_shards = ALL (value 1) + WaitForActiveShards waitForActiveShards = WaitForActiveShards.newBuilder() + .setWaitForActiveShardOptionsValue(1) // WAIT_FOR_ACTIVE_SHARD_OPTIONS_ALL = 1 + .build(); + + org.opensearch.protobufs.BulkRequest protoRequest = org.opensearch.protobufs.BulkRequest.newBuilder() + .setWaitForActiveShards(waitForActiveShards) + .build(); + + BulkRequest result = getActiveShardCount(bulkRequest, protoRequest); + + // Verify the result + assertSame("Should return the same BulkRequest instance", bulkRequest, result); + assertEquals("Should have ALL active shard count", ActiveShardCount.ALL, result.waitForActiveShards()); + } + + public void testGetActiveShardCountWithWaitForActiveShardsDefault() { + // Create a BulkRequest + BulkRequest bulkRequest = new BulkRequest(); + + // Create a protobuf BulkRequest with wait_for_active_shards = DEFAULT (value 2) + WaitForActiveShards waitForActiveShards = WaitForActiveShards.newBuilder() + .setWaitForActiveShardOptionsValue(2) // WAIT_FOR_ACTIVE_SHARD_OPTIONS_DEFAULT = 2 + .build(); + + org.opensearch.protobufs.BulkRequest protoRequest = org.opensearch.protobufs.BulkRequest.newBuilder() + .setWaitForActiveShards(waitForActiveShards) + .build(); + + BulkRequest result = getActiveShardCount(bulkRequest, protoRequest); + + // Verify the result + assertSame("Should return the same BulkRequest instance", bulkRequest, result); + assertEquals("Should have DEFAULT active shard count", ActiveShardCount.DEFAULT, result.waitForActiveShards()); + } + + public void testGetActiveShardCountWithWaitForActiveShardsUnspecified() { + // Create a BulkRequest + BulkRequest bulkRequest = new BulkRequest(); + + // Create a protobuf BulkRequest with wait_for_active_shards = UNSPECIFIED (value 0) + WaitForActiveShards waitForActiveShards = WaitForActiveShards.newBuilder() + .setWaitForActiveShardOptionsValue(0) // WAIT_FOR_ACTIVE_SHARD_OPTIONS_UNSPECIFIED = 0 + .build(); + + org.opensearch.protobufs.BulkRequest protoRequest = org.opensearch.protobufs.BulkRequest.newBuilder() + .setWaitForActiveShards(waitForActiveShards) + .build(); + + expectThrows(UnsupportedOperationException.class, () -> getActiveShardCount(bulkRequest, protoRequest)); + } + + public void testGetActiveShardCountWithWaitForActiveShardsInt32() { + // Create a BulkRequest + BulkRequest bulkRequest = new BulkRequest(); + + // Create a protobuf BulkRequest with wait_for_active_shards = 2 + WaitForActiveShards waitForActiveShards = WaitForActiveShards.newBuilder().setInt32Value(2).build(); + + org.opensearch.protobufs.BulkRequest protoRequest = org.opensearch.protobufs.BulkRequest.newBuilder() + .setWaitForActiveShards(waitForActiveShards) + .build(); + + BulkRequest result = getActiveShardCount(bulkRequest, protoRequest); + + // Verify the result + assertSame("Should return the same BulkRequest instance", bulkRequest, result); + assertEquals("Should have active shard count of 2", ActiveShardCount.from(2), result.waitForActiveShards()); + } + + public void testGetActiveShardCountWithWaitForActiveShardsNoCase() { + // Create a BulkRequest + BulkRequest bulkRequest = new BulkRequest(); + + // Create a protobuf BulkRequest with wait_for_active_shards but no case set + WaitForActiveShards waitForActiveShards = WaitForActiveShards.newBuilder().build(); + + org.opensearch.protobufs.BulkRequest protoRequest = org.opensearch.protobufs.BulkRequest.newBuilder() + .setWaitForActiveShards(waitForActiveShards) + .build(); + + // Call getActiveShardCount, should throw UnsupportedOperationException + expectThrows(UnsupportedOperationException.class, () -> getActiveShardCount(bulkRequest, protoRequest)); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/BulkRequestParserProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/BulkRequestParserProtoUtilsTests.java new file mode 100644 index 0000000000000..655b845b89ef4 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/BulkRequestParserProtoUtilsTests.java @@ -0,0 +1,340 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.document.bulk; + +import com.google.protobuf.ByteString; +import org.opensearch.action.DocWriteRequest; +import org.opensearch.action.delete.DeleteRequest; +import org.opensearch.action.index.IndexRequest; +import org.opensearch.action.update.UpdateRequest; +import org.opensearch.common.lucene.uid.Versions; +import org.opensearch.index.VersionType; +import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.protobufs.BulkRequest; +import org.opensearch.protobufs.BulkRequestBody; +import org.opensearch.protobufs.CreateOperation; +import org.opensearch.protobufs.DeleteOperation; +import org.opensearch.protobufs.IndexOperation; +import org.opensearch.protobufs.UpdateOperation; +import org.opensearch.test.OpenSearchTestCase; + +import java.nio.charset.StandardCharsets; + +import static org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; + +public class BulkRequestParserProtoUtilsTests extends OpenSearchTestCase { + + public void testBuildCreateRequest() { + // Create a CreateOperation + CreateOperation createOperation = CreateOperation.newBuilder() + .setIndex("test-index") + .setId("test-id") + .setRouting("test-routing") + .setVersion(2) + .setVersionTypeValue(1) // VERSION_TYPE_EXTERNAL = 1 + .setPipeline("test-pipeline") + .setIfSeqNo(3) + .setIfPrimaryTerm(4) + .setRequireAlias(true) + .build(); + + // Create document content + byte[] document = "{\"field\":\"value\"}".getBytes(StandardCharsets.UTF_8); + + // Call buildCreateRequest + IndexRequest indexRequest = BulkRequestParserProtoUtils.buildCreateRequest( + createOperation, + document, + "default-index", + "default-id", + "default-routing", + 1L, + VersionType.INTERNAL, + "default-pipeline", + 1L, + 2L, + false + ); + + // Verify the result + assertNotNull("IndexRequest should not be null", indexRequest); + assertEquals("Index should match", "test-index", indexRequest.index()); + assertEquals("Id should match", "test-id", indexRequest.id()); + assertEquals("Routing should match", "test-routing", indexRequest.routing()); + assertEquals("Version should match", 2L, indexRequest.version()); + assertEquals("VersionType should match", VersionType.EXTERNAL, indexRequest.versionType()); + assertEquals("Pipeline should match", "test-pipeline", indexRequest.getPipeline()); + assertEquals("IfSeqNo should match", 3L, indexRequest.ifSeqNo()); + assertEquals("IfPrimaryTerm should match", 4L, indexRequest.ifPrimaryTerm()); + assertTrue("RequireAlias should match", indexRequest.isRequireAlias()); + assertEquals("Create flag should be true", DocWriteRequest.OpType.CREATE, indexRequest.opType()); + } + + public void testBuildIndexRequest() { + // Create an IndexOperation + IndexOperation indexOperation = IndexOperation.newBuilder() + .setIndex("test-index") + .setId("test-id") + .setRouting("test-routing") + .setVersion(2) + .setVersionTypeValue(2) // VERSION_TYPE_EXTERNAL_GTE = 2 + .setPipeline("test-pipeline") + .setIfSeqNo(3) + .setIfPrimaryTerm(4) + .setRequireAlias(true) + .build(); + + // Create document content + byte[] document = "{\"field\":\"value\"}".getBytes(StandardCharsets.UTF_8); + + // Call buildIndexRequest + IndexRequest indexRequest = BulkRequestParserProtoUtils.buildIndexRequest( + indexOperation, + document, + null, + "default-index", + "default-id", + "default-routing", + 1L, + VersionType.INTERNAL, + "default-pipeline", + 1L, + 2L, + false + ); + + // Verify the result + assertNotNull("IndexRequest should not be null", indexRequest); + assertEquals("Index should match", "test-index", indexRequest.index()); + assertEquals("Id should match", "test-id", indexRequest.id()); + assertEquals("Routing should match", "test-routing", indexRequest.routing()); + assertEquals("Version should match", 2L, indexRequest.version()); + assertEquals("VersionType should match", VersionType.EXTERNAL_GTE, indexRequest.versionType()); + assertEquals("Pipeline should match", "test-pipeline", indexRequest.getPipeline()); + assertEquals("IfSeqNo should match", 3L, indexRequest.ifSeqNo()); + assertEquals("IfPrimaryTerm should match", 4L, indexRequest.ifPrimaryTerm()); + assertTrue("RequireAlias should match", indexRequest.isRequireAlias()); + assertNotEquals("Create flag should be false", DocWriteRequest.OpType.CREATE, indexRequest.opType()); + } + + public void testBuildIndexRequestWithOpType() { + // Create an IndexOperation with OpType + IndexOperation indexOperation = IndexOperation.newBuilder() + .setIndex("test-index") + .setId("test-id") + .setOpType(IndexOperation.OpType.OP_TYPE_CREATE) + .build(); + + // Create document content + byte[] document = "{\"field\":\"value\"}".getBytes(StandardCharsets.UTF_8); + + // Call buildIndexRequest + IndexRequest indexRequest = BulkRequestParserProtoUtils.buildIndexRequest( + indexOperation, + document, + IndexOperation.OpType.OP_TYPE_CREATE, + "default-index", + "default-id", + "default-routing", + Versions.MATCH_ANY, + VersionType.INTERNAL, + "default-pipeline", + SequenceNumbers.UNASSIGNED_SEQ_NO, + UNASSIGNED_PRIMARY_TERM, + false + ); + + // Verify the result + assertNotNull("IndexRequest should not be null", indexRequest); + assertEquals("Index should match", "test-index", indexRequest.index()); + assertEquals("Id should match", "test-id", indexRequest.id()); + assertEquals("Create flag should be true", DocWriteRequest.OpType.CREATE, indexRequest.opType()); + } + + public void testBuildDeleteRequest() { + // Create a DeleteOperation + DeleteOperation deleteOperation = DeleteOperation.newBuilder() + .setIndex("test-index") + .setId("test-id") + .setRouting("test-routing") + .setVersion(2) + .setVersionTypeValue(1) // VERSION_TYPE_EXTERNAL = 1 + .setIfSeqNo(3) + .setIfPrimaryTerm(4) + .build(); + + // Call buildDeleteRequest + DeleteRequest deleteRequest = BulkRequestParserProtoUtils.buildDeleteRequest( + deleteOperation, + "default-index", + "default-id", + "default-routing", + 1L, + VersionType.INTERNAL, + 1L, + 2L + ); + + // Verify the result + assertNotNull("DeleteRequest should not be null", deleteRequest); + assertEquals("Index should match", "test-index", deleteRequest.index()); + assertEquals("Id should match", "test-id", deleteRequest.id()); + assertEquals("Routing should match", "test-routing", deleteRequest.routing()); + assertEquals("Version should match", 2L, deleteRequest.version()); + assertEquals("VersionType should match", VersionType.EXTERNAL, deleteRequest.versionType()); + assertEquals("IfSeqNo should match", 3L, deleteRequest.ifSeqNo()); + assertEquals("IfPrimaryTerm should match", 4L, deleteRequest.ifPrimaryTerm()); + } + + public void testBuildUpdateRequest() { + // Create an UpdateOperation + UpdateOperation updateOperation = UpdateOperation.newBuilder() + .setIndex("test-index") + .setId("test-id") + .setRouting("test-routing") + .setRetryOnConflict(3) + .setIfSeqNo(4) + .setIfPrimaryTerm(5) + .setRequireAlias(true) + .build(); + + // Create document content + byte[] document = "{\"doc\":{\"field\":\"value\"}}".getBytes(StandardCharsets.UTF_8); + + // Create BulkRequestBody + BulkRequestBody bulkRequestBody = BulkRequestBody.newBuilder() + .setUpdate(updateOperation) + .setDoc(ByteString.copyFrom(document)) + .setDocAsUpsert(true) + .setDetectNoop(true) + .build(); + + // Call buildUpdateRequest + UpdateRequest updateRequest = BulkRequestParserProtoUtils.buildUpdateRequest( + updateOperation, + document, + bulkRequestBody, + "default-index", + "default-id", + "default-routing", + null, + 1, + "default-pipeline", + 1L, + 2L, + false + ); + + // Verify the result + assertNotNull("UpdateRequest should not be null", updateRequest); + assertEquals("Index should match", "test-index", updateRequest.index()); + assertEquals("Id should match", "test-id", updateRequest.id()); + assertEquals("Routing should match", "test-routing", updateRequest.routing()); + assertEquals("RetryOnConflict should match", 3, updateRequest.retryOnConflict()); + assertEquals("IfSeqNo should match", 4L, updateRequest.ifSeqNo()); + assertEquals("IfPrimaryTerm should match", 5L, updateRequest.ifPrimaryTerm()); + assertTrue("RequireAlias should match", updateRequest.isRequireAlias()); + assertTrue("DocAsUpsert should match", updateRequest.docAsUpsert()); + assertTrue("DetectNoop should match", updateRequest.detectNoop()); + } + + public void testGetDocWriteRequests() { + // Create a BulkRequest with multiple operations + IndexOperation indexOp = IndexOperation.newBuilder().setIndex("test-index").setId("test-id-1").build(); + CreateOperation createOp = CreateOperation.newBuilder().setIndex("test-index").setId("test-id-2").build(); + UpdateOperation updateOp = UpdateOperation.newBuilder().setIndex("test-index").setId("test-id-3").build(); + DeleteOperation deleteOp = DeleteOperation.newBuilder().setIndex("test-index").setId("test-id-4").build(); + + BulkRequestBody indexBody = BulkRequestBody.newBuilder() + .setIndex(indexOp) + .setDoc(ByteString.copyFromUtf8("{\"field\":\"value1\"}")) + .build(); + + BulkRequestBody createBody = BulkRequestBody.newBuilder() + .setCreate(createOp) + .setDoc(ByteString.copyFromUtf8("{\"field\":\"value2\"}")) + .build(); + + BulkRequestBody updateBody = BulkRequestBody.newBuilder() + .setUpdate(updateOp) + .setDoc(ByteString.copyFromUtf8("{\"field\":\"value3\"}")) + .build(); + + BulkRequestBody deleteBody = BulkRequestBody.newBuilder().setDelete(deleteOp).build(); + + BulkRequest request = BulkRequest.newBuilder() + .addRequestBody(indexBody) + .addRequestBody(createBody) + .addRequestBody(updateBody) + .addRequestBody(deleteBody) + .build(); + + // Call getDocWriteRequests + DocWriteRequest[] requests = BulkRequestParserProtoUtils.getDocWriteRequests( + request, + "default-index", + "default-routing", + null, + "default-pipeline", + false + ); + + // Verify the result + assertNotNull("Requests should not be null", requests); + assertEquals("Should have 4 requests", 4, requests.length); + assertTrue("First request should be an IndexRequest", requests[0] instanceof IndexRequest); + assertTrue( + "Second request should be an IndexRequest with create=true", + requests[1] instanceof IndexRequest && ((IndexRequest) requests[1]).opType().equals(DocWriteRequest.OpType.CREATE) + ); + assertTrue("Third request should be an UpdateRequest", requests[2] instanceof UpdateRequest); + assertTrue("Fourth request should be a DeleteRequest", requests[3] instanceof DeleteRequest); + + // Verify the index request + IndexRequest indexRequest = (IndexRequest) requests[0]; + assertEquals("Index should match", "test-index", indexRequest.index()); + assertEquals("Id should match", "test-id-1", indexRequest.id()); + + // Verify the create request + IndexRequest createRequest = (IndexRequest) requests[1]; + assertEquals("Index should match", "test-index", createRequest.index()); + assertEquals("Id should match", "test-id-2", createRequest.id()); + assertEquals("Create flag should be true", DocWriteRequest.OpType.CREATE, createRequest.opType()); + + // Verify the update request + UpdateRequest updateRequest = (UpdateRequest) requests[2]; + assertEquals("Index should match", "test-index", updateRequest.index()); + assertEquals("Id should match", "test-id-3", updateRequest.id()); + + // Verify the delete request + DeleteRequest deleteRequest = (DeleteRequest) requests[3]; + assertEquals("Index should match", "test-index", deleteRequest.index()); + assertEquals("Id should match", "test-id-4", deleteRequest.id()); + } + + public void testGetDocWriteRequestsWithInvalidOperation() { + // Create a BulkRequest with an invalid operation (no operation container) + BulkRequestBody invalidBody = BulkRequestBody.newBuilder().build(); + + BulkRequest request = BulkRequest.newBuilder().addRequestBody(invalidBody).build(); + + // Call getDocWriteRequests, should throw IllegalArgumentException + expectThrows( + IllegalArgumentException.class, + () -> BulkRequestParserProtoUtils.getDocWriteRequests( + request, + "default-index", + "default-routing", + null, + "default-pipeline", + false + ) + ); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/BulkRequestProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/BulkRequestProtoUtilsTests.java new file mode 100644 index 0000000000000..6312b6515ae59 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/BulkRequestProtoUtilsTests.java @@ -0,0 +1,117 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.document.bulk; + +import org.opensearch.action.support.WriteRequest; +import org.opensearch.protobufs.BulkRequest; +import org.opensearch.test.OpenSearchTestCase; + +import java.text.ParseException; + +public class BulkRequestProtoUtilsTests extends OpenSearchTestCase { + + public void testGetRefreshPolicyWithRefreshTrue() { + // Create a protobuf BulkRequest with refresh=REFRESH_TRUE (value 1) + BulkRequest request = BulkRequest.newBuilder().setRefresh(BulkRequest.Refresh.REFRESH_TRUE).build(); + + // Call getRefreshPolicy + String refreshPolicy = BulkRequestProtoUtils.getRefreshPolicy(request); + + // Verify the result + assertEquals("Should return IMMEDIATE refresh policy", WriteRequest.RefreshPolicy.IMMEDIATE.getValue(), refreshPolicy); + } + + public void testGetRefreshPolicyWithRefreshWaitFor() { + // Create a protobuf BulkRequest with refresh=REFRESH_WAIT_FOR + BulkRequest request = BulkRequest.newBuilder().setRefresh(BulkRequest.Refresh.REFRESH_WAIT_FOR).build(); + + // Call getRefreshPolicy + String refreshPolicy = BulkRequestProtoUtils.getRefreshPolicy(request); + + // Verify the result + assertEquals("Should return WAIT_UNTIL refresh policy", WriteRequest.RefreshPolicy.WAIT_UNTIL.getValue(), refreshPolicy); + } + + public void testGetRefreshPolicyWithRefreshFalse() { + // Create a protobuf BulkRequest with refresh=REFRESH_FALSE (value 3) + BulkRequest request = BulkRequest.newBuilder().setRefresh(BulkRequest.Refresh.REFRESH_FALSE).build(); + + // Call getRefreshPolicy + String refreshPolicy = BulkRequestProtoUtils.getRefreshPolicy(request); + + // Verify the result + assertEquals("Should return NONE refresh policy", WriteRequest.RefreshPolicy.NONE.getValue(), refreshPolicy); + } + + public void testGetRefreshPolicyWithRefreshUnspecified() { + // Create a protobuf BulkRequest with refresh=REFRESH_UNSPECIFIED (value 0) + BulkRequest request = BulkRequest.newBuilder().setRefresh(BulkRequest.Refresh.REFRESH_UNSPECIFIED).build(); + + // Call getRefreshPolicy + String refreshPolicy = BulkRequestProtoUtils.getRefreshPolicy(request); + + // Verify the result + assertEquals("Should return NONE refresh policy", WriteRequest.RefreshPolicy.NONE.getValue(), refreshPolicy); + } + + public void testGetRefreshPolicyWithNoRefresh() { + // Create a protobuf BulkRequest with no refresh value + BulkRequest request = BulkRequest.newBuilder().build(); + + // Call getRefreshPolicy + String refreshPolicy = BulkRequestProtoUtils.getRefreshPolicy(request); + + // Verify the result + assertNull("Should return null refresh policy", refreshPolicy); + } + + public void testPrepareRequestWithBasicSettings() { + // Create a protobuf BulkRequest with basic settings + BulkRequest request = BulkRequest.newBuilder() + .setIndex("test-index") + .setRouting("test-routing") + .setRefresh(BulkRequest.Refresh.REFRESH_TRUE) + .setTimeout("30s") + .build(); + + // Call prepareRequest + org.opensearch.action.bulk.BulkRequest bulkRequest = BulkRequestProtoUtils.prepareRequest(request); + + // Verify the result + assertNotNull("BulkRequest should not be null", bulkRequest); + assertEquals("Refresh policy should match", WriteRequest.RefreshPolicy.IMMEDIATE, bulkRequest.getRefreshPolicy()); + } + + public void testPrepareRequestWithDefaultValues() { + // Create a protobuf BulkRequest with no specific settings + BulkRequest request = BulkRequest.newBuilder().build(); + + // Call prepareRequest + org.opensearch.action.bulk.BulkRequest bulkRequest = BulkRequestProtoUtils.prepareRequest(request); + + // Verify the result + assertNotNull("BulkRequest should not be null", bulkRequest); + assertEquals("Should have zero requests", 0, bulkRequest.numberOfActions()); + assertEquals("Refresh policy should be null", WriteRequest.RefreshPolicy.NONE, bulkRequest.getRefreshPolicy()); + } + + public void testPrepareRequestWithTimeout() throws ParseException { + // Create a protobuf BulkRequest with a timeout + BulkRequest request = BulkRequest.newBuilder().setTimeout("5s").build(); + + // Call prepareRequest + org.opensearch.action.bulk.BulkRequest bulkRequest = BulkRequestProtoUtils.prepareRequest(request); + + // Verify the result + assertNotNull("BulkRequest should not be null", bulkRequest); + // The timeout is set in the BulkRequest + assertEquals("Require alias should be true", "5s", bulkRequest.timeout().toString()); + + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/BulkResponseProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/BulkResponseProtoUtilsTests.java new file mode 100644 index 0000000000000..80cb4c3be34da --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/BulkResponseProtoUtilsTests.java @@ -0,0 +1,101 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.response; + +import org.opensearch.action.DocWriteRequest; +import org.opensearch.action.bulk.BulkItemResponse; +import org.opensearch.action.bulk.BulkResponse; +import org.opensearch.action.index.IndexResponse; +import org.opensearch.action.support.replication.ReplicationResponse; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.plugin.transport.grpc.proto.response.document.bulk.BulkResponseProtoUtils; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class BulkResponseProtoUtilsTests extends OpenSearchTestCase { + + public void testToProtoWithSuccessfulResponse() throws IOException { + // Create a successful BulkResponse + BulkItemResponse[] responses = new BulkItemResponse[1]; + Index index = new Index("test-index", "_na_"); + ShardId shardId = new ShardId(index, 1); + IndexResponse indexResponse = new IndexResponse(shardId, "test-id", 1, 1, 1, true); + ReplicationResponse.ShardInfo shardInfo = new ReplicationResponse.ShardInfo(); + indexResponse.setShardInfo(shardInfo); + responses[0] = new BulkItemResponse(0, DocWriteRequest.OpType.INDEX, indexResponse); + + BulkResponse bulkResponse = new BulkResponse(responses, 100); + + // Convert to Protocol Buffer + org.opensearch.protobufs.BulkResponse protoResponse = BulkResponseProtoUtils.toProto(bulkResponse); + + // Verify the conversion + assertEquals("Should have the correct took time", 100, protoResponse.getBulkResponseBody().getTook()); + assertFalse("Should not have errors", protoResponse.getBulkResponseBody().getErrors()); + assertEquals("Should have 1 item", 1, protoResponse.getBulkResponseBody().getItemsCount()); + + // Verify the item response + org.opensearch.protobufs.Item item = protoResponse.getBulkResponseBody().getItems(0); + org.opensearch.protobufs.ResponseItem responseItem = item.getIndex(); + assertEquals("Should have the correct index", "test-index", responseItem.getIndex()); + assertEquals("Should have the correct id", "test-id", responseItem.getId().getString()); + assertEquals("Should have the correct status", 201, responseItem.getStatus()); + } + + public void testToProtoWithFailedResponse() throws IOException { + // Create a failed BulkResponse + BulkItemResponse[] responses = new BulkItemResponse[1]; + Exception exception = new Exception("Test failure"); + responses[0] = new BulkItemResponse( + 0, + DocWriteRequest.OpType.INDEX, + new BulkItemResponse.Failure("test-index", "test-id", exception) + ); + + BulkResponse bulkResponse = new BulkResponse(responses, 100); + + // Convert to Protocol Buffer + org.opensearch.protobufs.BulkResponse protoResponse = BulkResponseProtoUtils.toProto(bulkResponse); + + // Verify the conversion + assertEquals("Should have the correct took time", 100, protoResponse.getBulkResponseBody().getTook()); + assertTrue("Should have errors", protoResponse.getBulkResponseBody().getErrors()); + assertEquals("Should have 1 item", 1, protoResponse.getBulkResponseBody().getItemsCount()); + + // Verify the item response + org.opensearch.protobufs.Item item = protoResponse.getBulkResponseBody().getItems(0); + org.opensearch.protobufs.ResponseItem responseItem = item.getIndex(); + assertEquals("Should have the correct index", "test-index", responseItem.getIndex()); + assertEquals("Should have the correct id", "test-id", responseItem.getId().getString()); + assertTrue("Should have error", responseItem.getError().getReason().length() > 0); + } + + public void testToProtoWithIngestTook() throws IOException { + // Create a BulkResponse with ingest took time + BulkItemResponse[] responses = new BulkItemResponse[1]; + Index index = new Index("test-index", "_na_"); + ShardId shardId = new ShardId(index, 1); + IndexResponse indexResponse = new IndexResponse(shardId, "test-id", 1, 1, 1, true); + ReplicationResponse.ShardInfo shardInfo = new ReplicationResponse.ShardInfo(); + indexResponse.setShardInfo(shardInfo); + responses[0] = new BulkItemResponse(0, DocWriteRequest.OpType.INDEX, indexResponse); + + // Set ingest took time to 50ms + BulkResponse bulkResponse = new BulkResponse(responses, 100, 50); + + // Convert to Protocol Buffer + org.opensearch.protobufs.BulkResponse protoResponse = BulkResponseProtoUtils.toProto(bulkResponse); + + // Verify the conversion + assertEquals("Should have the correct took time", 100, protoResponse.getBulkResponseBody().getTook()); + assertEquals("Should have the correct ingest took time", 50, protoResponse.getBulkResponseBody().getIngestTook()); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/common/ObjectMapProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/common/ObjectMapProtoUtilsTests.java new file mode 100644 index 0000000000000..3d4000225ad4e --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/common/ObjectMapProtoUtilsTests.java @@ -0,0 +1,267 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.response.common; + +import org.opensearch.protobufs.NullValue; +import org.opensearch.protobufs.ObjectMap; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class ObjectMapProtoUtilsTests extends OpenSearchTestCase { + + public void testToProtoWithNull() { + // Convert null to Protocol Buffer + ObjectMap.Value value = ObjectMapProtoUtils.toProto(null); + + // Verify the conversion + assertNotNull("Value should not be null", value); + assertTrue("Should have null value", value.hasNullValue()); + assertEquals("Null value should be NULL_VALUE_NULL", NullValue.NULL_VALUE_NULL, value.getNullValue()); + } + + public void testToProtoWithInteger() { + // Convert Integer to Protocol Buffer + Integer intValue = 42; + ObjectMap.Value value = ObjectMapProtoUtils.toProto(intValue); + + // Verify the conversion + assertNotNull("Value should not be null", value); + assertTrue("Should have int32 value", value.hasInt32()); + assertEquals("Int32 value should match", intValue.intValue(), value.getInt32()); + } + + public void testToProtoWithLong() { + // Convert Long to Protocol Buffer + Long longValue = 9223372036854775807L; + ObjectMap.Value value = ObjectMapProtoUtils.toProto(longValue); + + // Verify the conversion + assertNotNull("Value should not be null", value); + assertTrue("Should have int64 value", value.hasInt64()); + assertEquals("Int64 value should match", longValue.longValue(), value.getInt64()); + } + + public void testToProtoWithDouble() { + // Convert Double to Protocol Buffer + Double doubleValue = 3.14159; + ObjectMap.Value value = ObjectMapProtoUtils.toProto(doubleValue); + + // Verify the conversion + assertNotNull("Value should not be null", value); + assertTrue("Should have double value", value.hasDouble()); + assertEquals("Double value should match", doubleValue, value.getDouble(), 0.0); + } + + public void testToProtoWithFloat() { + // Convert Float to Protocol Buffer + Float floatValue = 2.71828f; + ObjectMap.Value value = ObjectMapProtoUtils.toProto(floatValue); + + // Verify the conversion + assertNotNull("Value should not be null", value); + assertTrue("Should have float value", value.hasFloat()); + assertEquals("Float value should match", floatValue, value.getFloat(), 0.0f); + } + + public void testToProtoWithString() { + // Convert String to Protocol Buffer + String stringValue = "test string"; + ObjectMap.Value value = ObjectMapProtoUtils.toProto(stringValue); + + // Verify the conversion + assertNotNull("Value should not be null", value); + assertTrue("Should have string value", value.hasString()); + assertEquals("String value should match", stringValue, value.getString()); + } + + public void testToProtoWithBoolean() { + // Convert Boolean to Protocol Buffer + Boolean boolValue = true; + ObjectMap.Value value = ObjectMapProtoUtils.toProto(boolValue); + + // Verify the conversion + assertNotNull("Value should not be null", value); + assertTrue("Should have bool value", value.hasBool()); + assertEquals("Bool value should match", boolValue, value.getBool()); + } + + public void testToProtoWithEnum() { + // Convert Enum to Protocol Buffer + TestEnum enumValue = TestEnum.VALUE_2; + ObjectMap.Value value = ObjectMapProtoUtils.toProto(enumValue); + + // Verify the conversion + assertNotNull("Value should not be null", value); + assertTrue("Should have string value", value.hasString()); + assertEquals("String value should match enum name", enumValue.toString(), value.getString()); + } + + public void testToProtoWithList() { + // Convert List to Protocol Buffer + List listValue = Arrays.asList("string", 42, true); + ObjectMap.Value value = ObjectMapProtoUtils.toProto(listValue); + + // Verify the conversion + assertNotNull("Value should not be null", value); + assertTrue("Should have list value", value.hasListValue()); + assertEquals("List should have correct size", 3, value.getListValue().getValueCount()); + + // Verify list elements + assertTrue("First element should be string", value.getListValue().getValue(0).hasString()); + assertEquals("First element should match", "string", value.getListValue().getValue(0).getString()); + + assertTrue("Second element should be int32", value.getListValue().getValue(1).hasInt32()); + assertEquals("Second element should match", 42, value.getListValue().getValue(1).getInt32()); + + assertTrue("Third element should be bool", value.getListValue().getValue(2).hasBool()); + assertEquals("Third element should match", true, value.getListValue().getValue(2).getBool()); + } + + public void testToProtoWithEmptyList() { + // Convert empty List to Protocol Buffer + List listValue = Arrays.asList(); + ObjectMap.Value value = ObjectMapProtoUtils.toProto(listValue); + + // Verify the conversion + assertNotNull("Value should not be null", value); + assertTrue("Should have list value", value.hasListValue()); + assertEquals("List should be empty", 0, value.getListValue().getValueCount()); + } + + public void testToProtoWithMap() { + // Convert Map to Protocol Buffer + Map mapValue = new HashMap<>(); + mapValue.put("string", "value"); + mapValue.put("int", 42); + mapValue.put("bool", true); + + ObjectMap.Value value = ObjectMapProtoUtils.toProto(mapValue); + + // Verify the conversion + assertNotNull("Value should not be null", value); + assertTrue("Should have object map", value.hasObjectMap()); + assertEquals("Map should have correct size", 3, value.getObjectMap().getFieldsCount()); + + // Verify map entries + assertTrue("String entry should exist", value.getObjectMap().containsFields("string")); + assertTrue("String entry should be string", value.getObjectMap().getFieldsOrThrow("string").hasString()); + assertEquals("String entry should match", "value", value.getObjectMap().getFieldsOrThrow("string").getString()); + + assertTrue("Int entry should exist", value.getObjectMap().containsFields("int")); + assertTrue("Int entry should be int32", value.getObjectMap().getFieldsOrThrow("int").hasInt32()); + assertEquals("Int entry should match", 42, value.getObjectMap().getFieldsOrThrow("int").getInt32()); + + assertTrue("Bool entry should exist", value.getObjectMap().containsFields("bool")); + assertTrue("Bool entry should be bool", value.getObjectMap().getFieldsOrThrow("bool").hasBool()); + assertEquals("Bool entry should match", true, value.getObjectMap().getFieldsOrThrow("bool").getBool()); + } + + public void testToProtoWithEmptyMap() { + // Convert empty Map to Protocol Buffer + Map mapValue = new HashMap<>(); + ObjectMap.Value value = ObjectMapProtoUtils.toProto(mapValue); + + // Verify the conversion + assertNotNull("Value should not be null", value); + assertTrue("Should have object map", value.hasObjectMap()); + assertEquals("Map should be empty", 0, value.getObjectMap().getFieldsCount()); + } + + public void testToProtoWithNestedStructures() { + // Create a nested structure + Map innerMap = new HashMap<>(); + innerMap.put("key", "value"); + + List innerList = Arrays.asList(1, 2, 3); + + Map outerMap = new HashMap<>(); + outerMap.put("map", innerMap); + outerMap.put("list", innerList); + + // Convert to Protocol Buffer + ObjectMap.Value value = ObjectMapProtoUtils.toProto(outerMap); + + // Verify the conversion + assertNotNull("Value should not be null", value); + assertTrue("Should have object map", value.hasObjectMap()); + assertEquals("Map should have correct size", 2, value.getObjectMap().getFieldsCount()); + + // Verify nested map + assertTrue("Nested map should exist", value.getObjectMap().containsFields("map")); + assertTrue("Nested map should be object map", value.getObjectMap().getFieldsOrThrow("map").hasObjectMap()); + assertEquals( + "Nested map should have correct size", + 1, + value.getObjectMap().getFieldsOrThrow("map").getObjectMap().getFieldsCount() + ); + assertTrue("Nested map key should exist", value.getObjectMap().getFieldsOrThrow("map").getObjectMap().containsFields("key")); + assertEquals( + "Nested map value should match", + "value", + value.getObjectMap().getFieldsOrThrow("map").getObjectMap().getFieldsOrThrow("key").getString() + ); + + // Verify nested list + assertTrue("Nested list should exist", value.getObjectMap().containsFields("list")); + assertTrue("Nested list should be list value", value.getObjectMap().getFieldsOrThrow("list").hasListValue()); + assertEquals( + "Nested list should have correct size", + 3, + value.getObjectMap().getFieldsOrThrow("list").getListValue().getValueCount() + ); + assertEquals( + "Nested list first element should match", + 1, + value.getObjectMap().getFieldsOrThrow("list").getListValue().getValue(0).getInt32() + ); + assertEquals( + "Nested list second element should match", + 2, + value.getObjectMap().getFieldsOrThrow("list").getListValue().getValue(1).getInt32() + ); + assertEquals( + "Nested list third element should match", + 3, + value.getObjectMap().getFieldsOrThrow("list").getListValue().getValue(2).getInt32() + ); + } + + public void testToProtoWithUnsupportedType() { + // Create an unsupported type (a custom class) + UnsupportedType unsupportedValue = new UnsupportedType(); + + // Attempt to convert to Protocol Buffer, should throw IllegalArgumentException + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> ObjectMapProtoUtils.toProto(unsupportedValue) + ); + + // Verify the exception message contains the object's toString + assertTrue("Exception message should contain object's toString", exception.getMessage().contains(unsupportedValue.toString())); + } + + // Helper enum for testing + private enum TestEnum { + VALUE_1, + VALUE_2, + VALUE_3 + } + + // Helper class for testing unsupported types + private static class UnsupportedType { + @Override + public String toString() { + return "UnsupportedType"; + } + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/common/OpenSearchExceptionProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/common/OpenSearchExceptionProtoUtilsTests.java new file mode 100644 index 0000000000000..135fd0d094e1b --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/common/OpenSearchExceptionProtoUtilsTests.java @@ -0,0 +1,436 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.response.common; + +import org.opensearch.OpenSearchException; +import org.opensearch.action.FailedNodeException; +import org.opensearch.action.search.SearchPhaseExecutionException; +import org.opensearch.action.search.ShardSearchFailure; +import org.opensearch.common.breaker.ResponseLimitBreachedException; +import org.opensearch.common.breaker.ResponseLimitSettings; +import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.breaker.CircuitBreakingException; +import org.opensearch.protobufs.ErrorCause; +import org.opensearch.protobufs.ObjectMap; +import org.opensearch.protobufs.StringOrStringArray; +import org.opensearch.script.ScriptException; +import org.opensearch.search.SearchParseException; +import org.opensearch.search.aggregations.MultiBucketConsumerService; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class OpenSearchExceptionProtoUtilsTests extends OpenSearchTestCase { + private static final String TEST_NODE_ID = "test_node_id"; + + public void testToProtoWithOpenSearchException() throws IOException { + // Create an OpenSearchException + OpenSearchException exception = new OpenSearchException("Test exception"); + + // Convert to Protocol Buffer + ErrorCause errorCause = OpenSearchExceptionProtoUtils.toProto(exception); + + // Verify the conversion + // The actual type format uses underscores instead of dots + assertEquals("Should have the correct type", "exception", errorCause.getType()); + assertEquals("Should have the correct reason", "Test exception", errorCause.getReason()); + assertTrue("Should have a stack trace", errorCause.getStackTrace().length() > 0); + assertFalse("Should not have suppressed exceptions", errorCause.getSuppressedList().iterator().hasNext()); + assertFalse("Should not have a cause", errorCause.hasCausedBy()); + } + + public void testToProtoWithNestedOpenSearchException() throws IOException { + // Create a nested OpenSearchException + IOException cause = new IOException("Cause exception"); + OpenSearchException exception = new OpenSearchException("Test exception", cause); + + // Convert to Protocol Buffer + ErrorCause errorCause = OpenSearchExceptionProtoUtils.toProto(exception); + + // Verify the conversion + // The actual type format uses underscores instead of dots + assertEquals("Should have the correct type", "exception", errorCause.getType()); + assertEquals("Should have the correct reason", "Test exception", errorCause.getReason()); + assertTrue("Should have a stack trace", errorCause.getStackTrace().length() > 0); + + // Verify the cause + assertTrue("Should have a cause", errorCause.hasCausedBy()); + ErrorCause causedBy = errorCause.getCausedBy(); + // The actual type format uses underscores instead of dots + assertEquals("Cause should have the correct type", "i_o_exception", causedBy.getType()); + assertEquals("Cause should have the correct reason", "Cause exception", causedBy.getReason()); + } + + public void testGenerateThrowableProtoWithOpenSearchException() throws IOException { + // Create an OpenSearchException + OpenSearchException exception = new OpenSearchException("Test exception"); + + // Convert to Protocol Buffer + ErrorCause errorCause = OpenSearchExceptionProtoUtils.generateThrowableProto(exception); + + // Verify the conversion + // The actual type format uses underscores instead of dots + assertEquals("Should have the correct type", "exception", errorCause.getType()); + assertEquals("Should have the correct reason", "Test exception", errorCause.getReason()); + } + + public void testGenerateThrowableProtoWithIOException() throws IOException { + // Create an IOException + IOException exception = new IOException("Test IO exception"); + + // Convert to Protocol Buffer + ErrorCause errorCause = OpenSearchExceptionProtoUtils.generateThrowableProto(exception); + + // Verify the conversion + // The actual type format uses underscores instead of dots + assertEquals("Should have the correct type", "i_o_exception", errorCause.getType()); + assertEquals("Should have the correct reason", "Test IO exception", errorCause.getReason()); + } + + public void testGenerateThrowableProtoWithRuntimeException() throws IOException { + // Create a RuntimeException + RuntimeException exception = new RuntimeException("Test runtime exception"); + + // Convert to Protocol Buffer + ErrorCause errorCause = OpenSearchExceptionProtoUtils.generateThrowableProto(exception); + + // Verify the conversion + // The actual type format uses underscores instead of dots + assertEquals("Should have the correct type", "runtime_exception", errorCause.getType()); + assertEquals("Should have the correct reason", "Test runtime exception", errorCause.getReason()); + } + + public void testGenerateThrowableProtoWithNullMessage() throws IOException { + // Create an exception with null message + RuntimeException exception = new RuntimeException((String) null); + + // Convert to Protocol Buffer + ErrorCause errorCause = OpenSearchExceptionProtoUtils.generateThrowableProto(exception); + + // Verify the conversion + // The actual type format uses underscores instead of dots + assertEquals("Should have the correct type", "runtime_exception", errorCause.getType()); + assertFalse("Should not have a reason", errorCause.hasReason()); + } + + public void testGenerateThrowableProtoWithSuppressedExceptions() throws IOException { + // Create an exception with suppressed exceptions + RuntimeException exception = new RuntimeException("Main exception"); + exception.addSuppressed(new IllegalArgumentException("Suppressed exception")); + + // Convert to Protocol Buffer + ErrorCause errorCause = OpenSearchExceptionProtoUtils.generateThrowableProto(exception); + + // Verify the conversion + // The actual type format uses underscores instead of dots + assertEquals("Should have the correct type", "runtime_exception", errorCause.getType()); + assertEquals("Should have the correct reason", "Main exception", errorCause.getReason()); + + // Verify suppressed exceptions + assertEquals("Should have one suppressed exception", 1, errorCause.getSuppressedCount()); + ErrorCause suppressed = errorCause.getSuppressed(0); + // The actual type format uses underscores instead of dots + assertEquals("Suppressed should have the correct type", "illegal_argument_exception", suppressed.getType()); + assertEquals("Suppressed should have the correct reason", "Suppressed exception", suppressed.getReason()); + } + + public void testInnerToProtoWithBasicException() throws IOException { + // Create a basic exception + RuntimeException exception = new RuntimeException("Test exception"); + + // Convert to Protocol Buffer using the protected method via reflection + ErrorCause errorCause = OpenSearchExceptionProtoUtils.generateThrowableProto(exception); + + // Verify the conversion + // The actual type format uses underscores instead of dots + assertEquals("Should have the correct type", "runtime_exception", errorCause.getType()); + assertEquals("Should have the correct reason", "Test exception", errorCause.getReason()); + assertTrue("Should have a stack trace", errorCause.getStackTrace().length() > 0); + } + + public void testHeaderToProtoWithSingleValue() throws IOException { + // Create a header with a single value + String key = "test-header"; + List values = Collections.singletonList("test-value"); + + // Convert to Protocol Buffer + Map.Entry entry = OpenSearchExceptionProtoUtils.headerToProto(key, values); + + // Verify the conversion + assertNotNull("Entry should not be null", entry); + assertEquals("Key should match", key, entry.getKey()); + assertTrue("Should be a string value", entry.getValue().hasStringValue()); + assertEquals("Value should match", "test-value", entry.getValue().getStringValue()); + assertFalse("Should not have a string array", entry.getValue().hasStringArray()); + } + + public void testHeaderToProtoWithMultipleValues() throws IOException { + // Create a header with multiple values + String key = "test-header"; + List values = Arrays.asList("value1", "value2", "value3"); + + // Convert to Protocol Buffer + Map.Entry entry = OpenSearchExceptionProtoUtils.headerToProto(key, values); + + // Verify the conversion + assertNotNull("Entry should not be null", entry); + assertEquals("Key should match", key, entry.getKey()); + assertFalse("Should not be a string value", entry.getValue().hasStringValue()); + assertTrue("Should have a string array", entry.getValue().hasStringArray()); + assertEquals("Array should have correct size", 3, entry.getValue().getStringArray().getStringArrayCount()); + assertEquals("First value should match", "value1", entry.getValue().getStringArray().getStringArray(0)); + assertEquals("Second value should match", "value2", entry.getValue().getStringArray().getStringArray(1)); + assertEquals("Third value should match", "value3", entry.getValue().getStringArray().getStringArray(2)); + } + + public void testHeaderToProtoWithEmptyValues() throws IOException { + // Create a header with empty values + String key = "test-header"; + List values = Collections.emptyList(); + + // Convert to Protocol Buffer + Map.Entry entry = OpenSearchExceptionProtoUtils.headerToProto(key, values); + + // Verify the conversion + assertNull("Entry should be null for empty values", entry); + } + + public void testHeaderToProtoWithNullValues() throws IOException { + // Create a header with null values + String key = "test-header"; + List values = null; + + // Convert to Protocol Buffer + Map.Entry entry = OpenSearchExceptionProtoUtils.headerToProto(key, values); + + // Verify the conversion + assertNull("Entry should be null for null values", entry); + } + + public void testHeaderToValueProtoWithSingleValue() throws IOException { + // Create a header with a single value + String key = "test-header"; + List values = Collections.singletonList("test-value"); + + // Convert to Protocol Buffer + Map.Entry entry = OpenSearchExceptionProtoUtils.headerToValueProto(key, values); + + // Verify the conversion + assertNotNull("Entry should not be null", entry); + assertEquals("Key should match", key, entry.getKey()); + assertTrue("Should be a string value", entry.getValue().hasString()); + assertEquals("Value should match", "test-value", entry.getValue().getString()); + assertFalse("Should not have a list value", entry.getValue().hasListValue()); + } + + public void testHeaderToValueProtoWithMultipleValues() throws IOException { + // Create a header with multiple values + String key = "test-header"; + List values = Arrays.asList("value1", "value2", "value3"); + + // Convert to Protocol Buffer + Map.Entry entry = OpenSearchExceptionProtoUtils.headerToValueProto(key, values); + + // Verify the conversion + assertNotNull("Entry should not be null", entry); + assertEquals("Key should match", key, entry.getKey()); + assertFalse("Should not be a string value", entry.getValue().hasString()); + assertTrue("Should have a list value", entry.getValue().hasListValue()); + assertEquals("List should have correct size", 3, entry.getValue().getListValue().getValueCount()); + assertEquals("First value should match", "value1", entry.getValue().getListValue().getValue(0).getString()); + assertEquals("Second value should match", "value2", entry.getValue().getListValue().getValue(1).getString()); + assertEquals("Third value should match", "value3", entry.getValue().getListValue().getValue(2).getString()); + } + + public void testHeaderToValueProtoWithEmptyValues() throws IOException { + // Create a header with empty values + String key = "test-header"; + List values = Collections.emptyList(); + + // Convert to Protocol Buffer + Map.Entry entry = OpenSearchExceptionProtoUtils.headerToValueProto(key, values); + + // Verify the conversion + assertNull("Entry should be null for empty values", entry); + } + + public void testHeaderToValueProtoWithNullValues() throws IOException { + // Create a header with null values + String key = "test-header"; + List values = null; + + // Convert to Protocol Buffer + Map.Entry entry = OpenSearchExceptionProtoUtils.headerToValueProto(key, values); + + // Verify the conversion + assertNull("Entry should be null for null values", entry); + } + + public void testMetadataToProtoWithCircuitBreakingException() { + // Create a CircuitBreakingException with bytes wanted and bytes limit + CircuitBreakingException exception = new CircuitBreakingException("Test circuit breaking", 1000L, 500L, null); + + // Convert to Protocol Buffer + Map metadata = OpenSearchExceptionProtoUtils.metadataToProto(exception); + + // Verify the conversion + assertNotNull("Metadata should not be null", metadata); + assertTrue("Should have bytes_wanted field", metadata.containsKey("bytes_wanted")); + assertEquals("bytes_wanted should match", 1000L, metadata.get("bytes_wanted").getInt64()); + assertTrue("Should have bytes_limit field", metadata.containsKey("bytes_limit")); + assertEquals("bytes_limit should match", 500L, metadata.get("bytes_limit").getInt64()); + // Note: Durability is not in the constructor in newer versions + } + + public void testMetadataToProtoWithFailedNodeException() { + // Create a FailedNodeException + FailedNodeException exception = new FailedNodeException(TEST_NODE_ID, "Test failed node", new IOException("IO error")); + + // Convert to Protocol Buffer + Map metadata = OpenSearchExceptionProtoUtils.metadataToProto(exception); + + // Verify the conversion + assertNotNull("Metadata should not be null", metadata); + assertTrue("Should have node_id field", metadata.containsKey("node_id")); + assertEquals("node_id should match", TEST_NODE_ID, metadata.get("node_id").getString()); + } + + public void testMetadataToProtoWithParsingException() { + // Create a ParsingException with line and column numbers + // Using a mock since we can't directly set line and column numbers + ParsingException exception = mock(ParsingException.class); + when(exception.getMessage()).thenReturn("Test parsing exception"); + when(exception.getLineNumber()).thenReturn(10); + when(exception.getColumnNumber()).thenReturn(20); + + // Convert to Protocol Buffer + Map metadata = OpenSearchExceptionProtoUtils.metadataToProto(exception); + + // Verify the conversion + assertNotNull("Metadata should not be null", metadata); + assertTrue("Should have line field", metadata.containsKey("line")); + assertEquals("line should match", 10, metadata.get("line").getInt32()); + assertTrue("Should have col field", metadata.containsKey("col")); + assertEquals("col should match", 20, metadata.get("col").getInt32()); + } + + public void testMetadataToProtoWithResponseLimitBreachedException() { + // Create a ResponseLimitBreachedException + ResponseLimitBreachedException exception = new ResponseLimitBreachedException( + "Test response limit", + 1000, + ResponseLimitSettings.LimitEntity.INDICES + ); + + // Convert to Protocol Buffer + Map metadata = OpenSearchExceptionProtoUtils.metadataToProto(exception); + + // Verify the conversion + assertNotNull("Metadata should not be null", metadata); + assertTrue("Should have response_limit field", metadata.containsKey("response_limit")); + assertEquals("response_limit should match", 1000L, metadata.get("response_limit").getInt32()); + assertTrue("Should have limit_entity field", metadata.containsKey("limit_entity")); + assertEquals("limit_entity should match", "INDICES", metadata.get("limit_entity").getString()); + } + + public void testMetadataToProtoWithScriptException() { + // Create a ScriptException + ScriptException exception = new ScriptException( + "Test script exception", + new Exception("Script error"), + Arrays.asList("line1", "line2"), + "test_script", + "painless" + ); + + // Convert to Protocol Buffer + Map metadata = OpenSearchExceptionProtoUtils.metadataToProto(exception); + + // Verify the conversion + assertNotNull("Metadata should not be null", metadata); + assertTrue("Should have script_stack field", metadata.containsKey("script_stack")); + assertTrue("Should have script field", metadata.containsKey("script")); + assertEquals("script should match", "test_script", metadata.get("script").getString()); + assertTrue("Should have lang field", metadata.containsKey("lang")); + assertEquals("lang should match", "painless", metadata.get("lang").getString()); + } + + public void testMetadataToProtoWithSearchParseException() { + // Create a SearchParseException with line and column numbers + // Using a mock since we can't directly set line and column numbers + SearchParseException exception = mock(SearchParseException.class); + when(exception.getMessage()).thenReturn("Test search parse exception"); + when(exception.getLineNumber()).thenReturn(10); + when(exception.getColumnNumber()).thenReturn(20); + + // Convert to Protocol Buffer + Map metadata = OpenSearchExceptionProtoUtils.metadataToProto(exception); + + // Verify the conversion + assertNotNull("Metadata should not be null", metadata); + assertTrue("Should have line field", metadata.containsKey("line")); + assertEquals("line should match", 10, metadata.get("line").getInt32()); + assertTrue("Should have col field", metadata.containsKey("col")); + assertEquals("col should match", 20, metadata.get("col").getInt32()); + } + + public void testMetadataToProtoWithSearchPhaseExecutionException() { + // Create a SearchPhaseExecutionException + SearchPhaseExecutionException exception = new SearchPhaseExecutionException( + "test_phase", + "Test search phase execution", + ShardSearchFailure.EMPTY_ARRAY + ); + + // Convert to Protocol Buffer + Map metadata = OpenSearchExceptionProtoUtils.metadataToProto(exception); + + // Verify the conversion + assertNotNull("Metadata should not be null", metadata); + assertTrue("Should have phase field", metadata.containsKey("phase")); + assertEquals("phase should match", "test_phase", metadata.get("phase").getString()); + assertTrue("Should have grouped field", metadata.containsKey("grouped")); + assertTrue("grouped should be true", metadata.get("grouped").getBool()); + } + + public void testMetadataToProtoWithTooManyBucketsException() { + // Create a TooManyBucketsException + MultiBucketConsumerService.TooManyBucketsException exception = new MultiBucketConsumerService.TooManyBucketsException( + "Test too many buckets", + 1000 + ); + + // Convert to Protocol Buffer + Map metadata = OpenSearchExceptionProtoUtils.metadataToProto(exception); + + // Verify the conversion + assertNotNull("Metadata should not be null", metadata); + assertTrue("Should have max_buckets field", metadata.containsKey("max_buckets")); + assertEquals("max_buckets should match", 1000, metadata.get("max_buckets").getInt32()); + } + + public void testMetadataToProtoWithGenericOpenSearchException() { + // Create a generic OpenSearchException + OpenSearchException exception = new OpenSearchException("Test generic exception"); + + // Convert to Protocol Buffer + Map metadata = OpenSearchExceptionProtoUtils.metadataToProto(exception); + + // Verify the conversion + assertNotNull("Metadata should not be null", metadata); + assertTrue("Metadata should be empty for generic exception", metadata.isEmpty()); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/common/StructProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/common/StructProtoUtilsTests.java new file mode 100644 index 0000000000000..1663d239ff3e7 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/common/StructProtoUtilsTests.java @@ -0,0 +1,177 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.response.common; + +import com.google.protobuf.ListValue; +import com.google.protobuf.NullValue; +import com.google.protobuf.Struct; +import com.google.protobuf.Value; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class StructProtoUtilsTests extends OpenSearchTestCase { + + public void testToProtoWithNull() { + // Convert null to protobuf Value + Value value = StructProtoUtils.toProto(null); + + // Verify the result + assertNotNull("Value should not be null", value); + assertTrue("Value should have null_value set", value.hasNullValue()); + assertEquals("Null value should be NULL_VALUE", NullValue.NULL_VALUE, value.getNullValue()); + } + + public void testToProtoWithInteger() { + // Convert Integer to protobuf Value + Integer intValue = 42; + Value value = StructProtoUtils.toProto(intValue); + + // Verify the result + assertNotNull("Value should not be null", value); + assertTrue("Value should have number_value set", value.hasNumberValue()); + assertEquals("Number value should match", 42.0, value.getNumberValue(), 0.0); + } + + public void testToProtoWithDouble() { + // Convert Double to protobuf Value + Double doubleValue = 3.14159; + Value value = StructProtoUtils.toProto(doubleValue); + + // Verify the result + assertNotNull("Value should not be null", value); + assertTrue("Value should have number_value set", value.hasNumberValue()); + assertEquals("Number value should match", 3.14159, value.getNumberValue(), 0.0); + } + + public void testToProtoWithString() { + // Convert String to protobuf Value + String stringValue = "Hello, World!"; + Value value = StructProtoUtils.toProto(stringValue); + + // Verify the result + assertNotNull("Value should not be null", value); + assertTrue("Value should have string_value set", value.hasStringValue()); + assertEquals("String value should match", "Hello, World!", value.getStringValue()); + } + + public void testToProtoWithBoolean() { + // Convert Boolean to protobuf Value + Boolean boolValue = true; + Value value = StructProtoUtils.toProto(boolValue); + + // Verify the result + assertNotNull("Value should not be null", value); + assertTrue("Value should have bool_value set", value.hasBoolValue()); + assertEquals("Boolean value should match", true, value.getBoolValue()); + } + + public void testToProtoWithList() { + // Create a list with mixed types + List list = new ArrayList<>(); + list.add("string"); + list.add(42); + list.add(true); + list.add(null); + + // Convert List to protobuf Value + Value value = StructProtoUtils.toProto(list); + + // Verify the result + assertNotNull("Value should not be null", value); + assertTrue("Value should have list_value set", value.hasListValue()); + + ListValue listValue = value.getListValue(); + assertEquals("List should have 4 elements", 4, listValue.getValuesCount()); + + // Verify each element + assertTrue("First element should be a string", listValue.getValues(0).hasStringValue()); + assertEquals("First element should match", "string", listValue.getValues(0).getStringValue()); + + assertTrue("Second element should be a number", listValue.getValues(1).hasNumberValue()); + assertEquals("Second element should match", 42.0, listValue.getValues(1).getNumberValue(), 0.0); + + assertTrue("Third element should be a boolean", listValue.getValues(2).hasBoolValue()); + assertEquals("Third element should match", true, listValue.getValues(2).getBoolValue()); + + assertTrue("Fourth element should be null", listValue.getValues(3).hasNullValue()); + assertEquals("Fourth element should be NULL_VALUE", NullValue.NULL_VALUE, listValue.getValues(3).getNullValue()); + } + + public void testToProtoWithMap() { + // Create a map with mixed types + Map map = new HashMap<>(); + map.put("string", "value"); + map.put("number", 42); + map.put("boolean", true); + map.put("null", null); + + // Convert Map to protobuf Value + Value value = StructProtoUtils.toProto(map); + + // Verify the result + assertNotNull("Value should not be null", value); + assertTrue("Value should have struct_value set", value.hasStructValue()); + + Struct struct = value.getStructValue(); + assertEquals("Struct should have 4 fields", 4, struct.getFieldsCount()); + + // Verify each field + assertTrue("string field should be a string", struct.getFieldsOrThrow("string").hasStringValue()); + assertEquals("string field should match", "value", struct.getFieldsOrThrow("string").getStringValue()); + + assertTrue("number field should be a number", struct.getFieldsOrThrow("number").hasNumberValue()); + assertEquals("number field should match", 42.0, struct.getFieldsOrThrow("number").getNumberValue(), 0.0); + + assertTrue("boolean field should be a boolean", struct.getFieldsOrThrow("boolean").hasBoolValue()); + assertEquals("boolean field should match", true, struct.getFieldsOrThrow("boolean").getBoolValue()); + + assertTrue("null field should be null", struct.getFieldsOrThrow("null").hasNullValue()); + assertEquals("null field should be NULL_VALUE", NullValue.NULL_VALUE, struct.getFieldsOrThrow("null").getNullValue()); + } + + public void testToProtoWithNestedMap() { + // Create a nested map + Map nestedMap = new HashMap<>(); + nestedMap.put("nestedKey", "nestedValue"); + + Map map = new HashMap<>(); + map.put("nested", nestedMap); + + // Convert Map to protobuf Value + Value value = StructProtoUtils.toProto(map); + + // Verify the result + assertNotNull("Value should not be null", value); + assertTrue("Value should have struct_value set", value.hasStructValue()); + + Struct struct = value.getStructValue(); + assertEquals("Struct should have 1 field", 1, struct.getFieldsCount()); + + // Verify nested field + assertTrue("nested field should be a struct", struct.getFieldsOrThrow("nested").hasStructValue()); + + Struct nestedStruct = struct.getFieldsOrThrow("nested").getStructValue(); + assertEquals("Nested struct should have 1 field", 1, nestedStruct.getFieldsCount()); + assertTrue("nestedKey field should be a string", nestedStruct.getFieldsOrThrow("nestedKey").hasStringValue()); + assertEquals("nestedKey field should match", "nestedValue", nestedStruct.getFieldsOrThrow("nestedKey").getStringValue()); + } + + public void testToProtoWithUnsupportedType() { + // Create an unsupported type (e.g., a custom class) + class CustomClass {} + CustomClass customObject = new CustomClass(); + + // Attempt to convert to protobuf Value, should throw IllegalArgumentException + expectThrows(IllegalArgumentException.class, () -> StructProtoUtils.toProto(customObject)); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/document/bulk/BulkItemResponseProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/document/bulk/BulkItemResponseProtoUtilsTests.java new file mode 100644 index 0000000000000..fcf2021600229 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/document/bulk/BulkItemResponseProtoUtilsTests.java @@ -0,0 +1,220 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.response.document.bulk; + +import org.opensearch.action.DocWriteRequest; +import org.opensearch.action.DocWriteResponse; +import org.opensearch.action.bulk.BulkItemResponse; +import org.opensearch.action.delete.DeleteResponse; +import org.opensearch.action.index.IndexResponse; +import org.opensearch.action.support.replication.ReplicationResponse; +import org.opensearch.action.update.UpdateResponse; +import org.opensearch.common.document.DocumentField; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.index.get.GetResult; +import org.opensearch.protobufs.Item; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class BulkItemResponseProtoUtilsTests extends OpenSearchTestCase { + + public void testToProtoWithIndexResponse() throws IOException { + // Create a ShardId + ShardId shardId = new ShardId("test-index", "test-uuid", 0); + + // Create a ShardInfo with no failures + ReplicationResponse.ShardInfo shardInfo = new ReplicationResponse.ShardInfo(5, 3, new ReplicationResponse.ShardInfo.Failure[0]); + + // Create an IndexResponse + IndexResponse indexResponse = new IndexResponse(shardId, "test-id", 1, 2, 3, true); + indexResponse.setShardInfo(shardInfo); + + // Create a BulkItemResponse with the IndexResponse + BulkItemResponse bulkItemResponse = new BulkItemResponse(0, DocWriteRequest.OpType.INDEX, indexResponse); + + // Convert to protobuf Item + Item item = BulkItemResponseProtoUtils.toProto(bulkItemResponse); + + // Verify the result + assertNotNull("Item should not be null", item); + assertTrue("Item should have index field set", item.hasIndex()); + assertEquals("Index should match", "test-index", item.getIndex().getIndex()); + assertEquals("Id should match", "test-id", item.getIndex().getId().getString()); + assertEquals("Version should match", indexResponse.getVersion(), item.getIndex().getVersion()); + assertEquals("Result should match", DocWriteResponse.Result.CREATED.getLowercase(), item.getIndex().getResult()); + } + + public void testToProtoWithCreateResponse() throws IOException { + // Create a ShardId + ShardId shardId = new ShardId("test-index", "test-uuid", 0); + + // Create a ShardInfo with no failures + ReplicationResponse.ShardInfo shardInfo = new ReplicationResponse.ShardInfo(5, 3, new ReplicationResponse.ShardInfo.Failure[0]); + + // Create an IndexResponse + IndexResponse indexResponse = new IndexResponse(shardId, "test-id", 1, 2, 3, true); + indexResponse.setShardInfo(shardInfo); + + // Create a BulkItemResponse with the IndexResponse and CREATE op type + BulkItemResponse bulkItemResponse = new BulkItemResponse(0, DocWriteRequest.OpType.CREATE, indexResponse); + + // Convert to protobuf Item + Item item = BulkItemResponseProtoUtils.toProto(bulkItemResponse); + + // Verify the result + assertNotNull("Item should not be null", item); + assertTrue("Item should have create field set", item.hasCreate()); + assertEquals("Index should match", "test-index", item.getCreate().getIndex()); + assertEquals("Id should match", "test-id", item.getCreate().getId().getString()); + assertEquals("Version should match", indexResponse.getVersion(), item.getCreate().getVersion()); + assertEquals("Result should match", DocWriteResponse.Result.CREATED.getLowercase(), item.getCreate().getResult()); + } + + public void testToProtoWithDeleteResponse() throws IOException { + // Create a ShardId + ShardId shardId = new ShardId("test-index", "test-uuid", 0); + + // Create a ShardInfo with no failures + ReplicationResponse.ShardInfo shardInfo = new ReplicationResponse.ShardInfo(5, 3, new ReplicationResponse.ShardInfo.Failure[0]); + + // Create a DeleteResponse + DeleteResponse deleteResponse = new DeleteResponse(shardId, "test-id", 1, 2, 3, true); + deleteResponse.setShardInfo(shardInfo); + + // Create a BulkItemResponse with the DeleteResponse + BulkItemResponse bulkItemResponse = new BulkItemResponse(0, DocWriteRequest.OpType.DELETE, deleteResponse); + + // Convert to protobuf Item + Item item = BulkItemResponseProtoUtils.toProto(bulkItemResponse); + + // Verify the result + assertNotNull("Item should not be null", item); + assertTrue("Item should have delete field set", item.hasDelete()); + assertEquals("Index should match", "test-index", item.getDelete().getIndex()); + assertEquals("Id should match", "test-id", item.getDelete().getId().getString()); + assertEquals("Version should match", deleteResponse.getVersion(), item.getDelete().getVersion()); + assertEquals("Result should match", DocWriteResponse.Result.DELETED.getLowercase(), item.getDelete().getResult()); + } + + public void testToProtoWithUpdateResponse() throws IOException { + // Create a ShardId + ShardId shardId = new ShardId("test-index", "test-uuid", 0); + + // Create a ShardInfo with no failures + ReplicationResponse.ShardInfo shardInfo = new ReplicationResponse.ShardInfo(5, 3, new ReplicationResponse.ShardInfo.Failure[0]); + + // Create an UpdateResponse + UpdateResponse updateResponse = new UpdateResponse(shardId, "test-id", 1, 2, 3, DocWriteResponse.Result.UPDATED); + updateResponse.setShardInfo(shardInfo); + + // Create a BulkItemResponse with the UpdateResponse + BulkItemResponse bulkItemResponse = new BulkItemResponse(0, DocWriteRequest.OpType.UPDATE, updateResponse); + + // Convert to protobuf Item + Item item = BulkItemResponseProtoUtils.toProto(bulkItemResponse); + + // Verify the result + assertNotNull("Item should not be null", item); + assertTrue("Item should have update field set", item.hasUpdate()); + assertEquals("Index should match", "test-index", item.getUpdate().getIndex()); + assertEquals("Id should match", "test-id", item.getUpdate().getId().getString()); + assertEquals("Version should match", updateResponse.getVersion(), item.getUpdate().getVersion()); + assertEquals("Result should match", DocWriteResponse.Result.UPDATED.getLowercase(), item.getUpdate().getResult()); + } + + public void testToProtoWithUpdateResponseAndGetResult() throws IOException { + // Create a ShardId + ShardId shardId = new ShardId("test-index", "test-uuid", 0); + + // Create a ShardInfo with no failures + ReplicationResponse.ShardInfo shardInfo = new ReplicationResponse.ShardInfo(5, 3, new ReplicationResponse.ShardInfo.Failure[0]); + + // Create a GetResult + Map sourceMap = new HashMap<>(); + sourceMap.put("field1", new DocumentField("field1", List.of("value1"))); + sourceMap.put("field2", new DocumentField("field1", List.of(42))); + + GetResult getResult = new GetResult( + "test-index", + "test-id", + 0, + 1, + 1, + true, + new BytesArray("{\"field1\":\"value1\",\"field2\":42}".getBytes(StandardCharsets.UTF_8)), + sourceMap, + null + ); + + // Create an UpdateResponse with GetResult + UpdateResponse updateResponse = new UpdateResponse(shardId, "test-id", 1, 2, 3, DocWriteResponse.Result.UPDATED); + updateResponse.setShardInfo(shardInfo); + updateResponse.setGetResult(getResult); + + // Create a BulkItemResponse with the UpdateResponse + BulkItemResponse bulkItemResponse = new BulkItemResponse(0, DocWriteRequest.OpType.UPDATE, updateResponse); + + // Convert to protobuf Item + Item item = BulkItemResponseProtoUtils.toProto(bulkItemResponse); + + // Verify the result + assertNotNull("Item should not be null", item); + assertTrue("Item should have update field set", item.hasUpdate()); + assertEquals("Index should match", "test-index", item.getUpdate().getIndex()); + assertEquals("Id should match", "test-id", item.getUpdate().getId().getString()); + assertEquals("Version should match", 1, item.getUpdate().getVersion()); + assertEquals("Result should match", DocWriteResponse.Result.UPDATED.getLowercase(), item.getUpdate().getResult()); + + // Verify GetResult fields + assertTrue("Get field should be set", item.getUpdate().hasGet()); + assertEquals("Get index should match", "test-index", item.getUpdate().getIndex()); + assertEquals("Get id should match", "test-id", item.getUpdate().getId().getString()); + assertTrue("Get found should be true", item.getUpdate().getGet().getFound()); + } + + public void testToProtoWithFailure() throws IOException { + // Create a failure + Exception exception = new IOException("Test IO exception"); + BulkItemResponse.Failure failure = new BulkItemResponse.Failure( + "test-index", + "test-id", + exception, + RestStatus.INTERNAL_SERVER_ERROR + ); + + // Create a BulkItemResponse with the failure + BulkItemResponse bulkItemResponse = new BulkItemResponse(0, DocWriteRequest.OpType.INDEX, failure); + + // Convert to protobuf Item + Item item = BulkItemResponseProtoUtils.toProto(bulkItemResponse); + + // Verify the result + assertNotNull("Item should not be null", item); + assertTrue("Item should have index field set", item.hasIndex()); + assertEquals("Index should match", "test-index", item.getIndex().getIndex()); + assertEquals("Id should match", "test-id", item.getIndex().getId().getString()); + assertEquals("Status should match", RestStatus.INTERNAL_SERVER_ERROR.getStatus(), item.getIndex().getStatus()); + + // Verify error + assertTrue("Error should be set", item.getIndex().hasError()); + assertTrue("Error reason should contain exception message", item.getIndex().getError().getReason().contains("Test IO exception")); + } + + public void testToProtoWithNullResponse() throws IOException { + // Call toProto with null, should throw NullPointerException + expectThrows(NullPointerException.class, () -> BulkItemResponseProtoUtils.toProto(null)); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/DocWriteResponseProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/DocWriteResponseProtoUtilsTests.java new file mode 100644 index 0000000000000..7c53996034a63 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/DocWriteResponseProtoUtilsTests.java @@ -0,0 +1,114 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.response.document.common; + +import org.opensearch.action.DocWriteResponse; +import org.opensearch.action.index.IndexResponse; +import org.opensearch.action.support.replication.ReplicationResponse; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.protobufs.ResponseItem; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class DocWriteResponseProtoUtilsTests extends OpenSearchTestCase { + + public void testToProtoWithIndexResponse() throws IOException { + // Create a ShardId + ShardId shardId = new ShardId("test-index", "test-uuid", 0); + + // Create a ShardInfo with no failures + ReplicationResponse.ShardInfo shardInfo = new ReplicationResponse.ShardInfo(5, 3, new ReplicationResponse.ShardInfo.Failure[0]); + + // Create an IndexResponse + IndexResponse indexResponse = new IndexResponse(shardId, "test-id", 1, 2, 3, true); + indexResponse.setShardInfo(shardInfo); + indexResponse.setForcedRefresh(true); + + // Convert to protobuf ResponseItem.Builder + ResponseItem.Builder responseItemBuilder = DocWriteResponseProtoUtils.toProto(indexResponse); + + // Verify the result + assertNotNull("ResponseItem.Builder should not be null", responseItemBuilder); + + // Build the ResponseItem to verify its contents + ResponseItem responseItem = responseItemBuilder.build(); + + // Verify basic fields + assertEquals("Index should match", "test-index", responseItem.getIndex()); + assertEquals("Id should match", "test-id", responseItem.getId().getString()); + assertEquals("Version should match", indexResponse.getVersion(), responseItem.getVersion()); + assertEquals("Result should match", DocWriteResponse.Result.CREATED.getLowercase(), responseItem.getResult()); + assertTrue("ForcedRefresh should be true", responseItem.getForcedRefresh()); + + // Verify sequence number and primary term + assertEquals("SeqNo should match", indexResponse.getSeqNo(), responseItem.getSeqNo()); + assertEquals("PrimaryTerm should match", indexResponse.getPrimaryTerm(), responseItem.getPrimaryTerm()); + + // Verify ShardInfo + assertNotNull("ShardInfo should not be null", responseItem.getShards()); + assertEquals("Total shards should match", 5, responseItem.getShards().getTotal()); + assertEquals("Successful shards should match", 3, responseItem.getShards().getSuccessful()); + assertEquals("Failed shards should match", indexResponse.getShardInfo().getFailed(), responseItem.getShards().getFailed()); + } + + public void testToProtoWithEmptyId() throws IOException { + // Create a ShardId + ShardId shardId = new ShardId("test-index", "test-uuid", 0); + + // Create a ShardInfo with no failures + ReplicationResponse.ShardInfo shardInfo = new ReplicationResponse.ShardInfo(5, 3, new ReplicationResponse.ShardInfo.Failure[0]); + + // Create an IndexResponse with empty ID + IndexResponse indexResponse = new IndexResponse(shardId, "", 1, 2, 3, true); + indexResponse.setShardInfo(shardInfo); + + // Convert to protobuf ResponseItem.Builder + ResponseItem.Builder responseItemBuilder = DocWriteResponseProtoUtils.toProto(indexResponse); + + // Verify the result + assertNotNull("ResponseItem.Builder should not be null", responseItemBuilder); + + // Build the ResponseItem to verify its contents + ResponseItem responseItem = responseItemBuilder.build(); + + // Verify ID is set to null value + assertTrue("Id should be null value", responseItem.getId().hasNullValue()); + } + + public void testToProtoWithNoSeqNo() throws IOException { + // Create a ShardId + ShardId shardId = new ShardId("test-index", "test-uuid", 0); + + // Create a ShardInfo with no failures + ReplicationResponse.ShardInfo shardInfo = new ReplicationResponse.ShardInfo(5, 3, new ReplicationResponse.ShardInfo.Failure[0]); + + // Create an IndexResponse with negative sequence number (unassigned) + IndexResponse indexResponse = new IndexResponse(shardId, "test-id", -1, 1, 3, true); + indexResponse.setShardInfo(shardInfo); + + // Convert to protobuf ResponseItem.Builder + ResponseItem.Builder responseItemBuilder = DocWriteResponseProtoUtils.toProto(indexResponse); + + // Verify the result + assertNotNull("ResponseItem.Builder should not be null", responseItemBuilder); + + // Build the ResponseItem to verify its contents + ResponseItem responseItem = responseItemBuilder.build(); + + // Verify sequence number and primary term are not set + assertFalse("SeqNo should not be set", responseItem.hasSeqNo()); + assertFalse("PrimaryTerm should not be set", responseItem.hasPrimaryTerm()); + } + + public void testToProtoWithNullResponse() throws IOException { + // Call toProto with null, should throw NullPointerException + expectThrows(NullPointerException.class, () -> DocWriteResponseProtoUtils.toProto(null)); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/DocumentFieldProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/DocumentFieldProtoUtilsTests.java new file mode 100644 index 0000000000000..f4ed9256b2f9a --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/DocumentFieldProtoUtilsTests.java @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.response.document.common; + +import com.google.protobuf.Value; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +public class DocumentFieldProtoUtilsTests extends OpenSearchTestCase { + + public void testToProtoWithEmptyList() { + // Create an empty list of field values + List fieldValues = Collections.emptyList(); + + // Convert to Protocol Buffer + Value value = DocumentFieldProtoUtils.toProto(fieldValues); + + // Verify the conversion + assertNotNull("Value should not be null", value); + } + + public void testToProtoWithSimpleValues() { + // Create a list of field values + List fieldValues = Arrays.asList("value1", "value2"); + + // Convert to Protocol Buffer + Value value = DocumentFieldProtoUtils.toProto(fieldValues); + + // Verify the conversion + assertNotNull("Value should not be null", value); + + // Note: The current implementation might return a default value because the implementation + // is not yet completed. This test will need to be updated once the implementation is complete. + } + + public void testToProtoWithNullList() { + // Convert null to Protocol Buffer + Value value = DocumentFieldProtoUtils.toProto(null); + + // Verify the conversion + assertNotNull("Value should not be null", value); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/ShardInfoProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/ShardInfoProtoUtilsTests.java new file mode 100644 index 0000000000000..0af62dd40ba38 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/ShardInfoProtoUtilsTests.java @@ -0,0 +1,92 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.response.document.common; + +import org.opensearch.action.support.replication.ReplicationResponse; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.protobufs.ShardInfo; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class ShardInfoProtoUtilsTests extends OpenSearchTestCase { + + public void testToProtoWithNoFailures() throws IOException { + // Create a ShardInfo with no failures + ReplicationResponse.ShardInfo shardInfo = new ReplicationResponse.ShardInfo(5, 3, new ReplicationResponse.ShardInfo.Failure[0]); + + // Convert to protobuf ShardInfo + ShardInfo protoShardInfo = ShardInfoProtoUtils.toProto(shardInfo); + + // Verify the result + assertNotNull("ShardInfo should not be null", protoShardInfo); + assertEquals("Total should match", 5, protoShardInfo.getTotal()); + assertEquals("Successful should match", 3, protoShardInfo.getSuccessful()); + assertEquals("Failed should match", shardInfo.getFailed(), protoShardInfo.getFailed()); + assertEquals("Failures list should be empty", 0, protoShardInfo.getFailuresCount()); + } + + public void testToProtoWithFailures() throws IOException { + // Create failures + List failuresList = new ArrayList<>(); + ShardId shardId = new ShardId(new Index("index1", "1"), 1); + ShardId shardId2 = new ShardId(new Index("index2", "2"), 2); + + // Add a failure with an exception + Exception exception1 = new IOException("Test IO exception"); + failuresList.add(new ReplicationResponse.ShardInfo.Failure(shardId, "node0", exception1, RestStatus.INTERNAL_SERVER_ERROR, true)); + + // Add another failure with a different exception + Exception exception2 = new IllegalArgumentException("Test argument exception"); + failuresList.add(new ReplicationResponse.ShardInfo.Failure(shardId2, "node1", exception2, RestStatus.BAD_REQUEST, false)); + + // Create a ShardInfo with failures + ReplicationResponse.ShardInfo.Failure[] failures = failuresList.toArray(new ReplicationResponse.ShardInfo.Failure[0]); + ReplicationResponse.ShardInfo shardInfo = new ReplicationResponse.ShardInfo(5, 3, failures); + + // Convert to protobuf ShardInfo + ShardInfo protoShardInfo = ShardInfoProtoUtils.toProto(shardInfo); + + // Verify the result + assertNotNull("ShardInfo should not be null", protoShardInfo); + assertEquals("Total should match", 5, protoShardInfo.getTotal()); + assertEquals("Successful should match", 3, protoShardInfo.getSuccessful()); + assertEquals("Failed should match", 2, protoShardInfo.getFailed()); + assertEquals("Failures list should have 2 entries", 2, protoShardInfo.getFailuresCount()); + + // Verify first failure + assertEquals("First failure index should match", "index1", protoShardInfo.getFailures(0).getIndex()); + assertEquals("First failure shard should match", 1, protoShardInfo.getFailures(0).getShard()); + assertTrue( + "First failure reason should contain exception message", + protoShardInfo.getFailures(0).getReason().getReason().contains("Test IO exception") + ); + assertEquals("First failure status should match", "INTERNAL_SERVER_ERROR", protoShardInfo.getFailures(0).getStatus()); + assertTrue("First failure primary flag should be true", protoShardInfo.getFailures(0).getPrimary()); + + // Verify second failure + assertEquals("Second failure index should match", "index2", protoShardInfo.getFailures(1).getIndex()); + assertEquals("Second failure shard should match", 2, protoShardInfo.getFailures(1).getShard()); + assertTrue( + "Second failure reason should contain exception message", + protoShardInfo.getFailures(1).getReason().getReason().contains("Test argument exception") + ); + assertEquals("Second failure status should match", "BAD_REQUEST", protoShardInfo.getFailures(1).getStatus()); + assertFalse("Second failure primary flag should be false", protoShardInfo.getFailures(1).getPrimary()); + } + + public void testToProtoWithNullShardInfo() throws IOException { + // Call toProto with null, should throw NullPointerException + expectThrows(NullPointerException.class, () -> ShardInfoProtoUtils.toProto(null)); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/document/get/GetResultProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/document/get/GetResultProtoUtilsTests.java new file mode 100644 index 0000000000000..52e342c9d7c08 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/document/get/GetResultProtoUtilsTests.java @@ -0,0 +1,172 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.response.document.get; + +import com.google.protobuf.ByteString; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.index.get.GetResult; +import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.protobufs.InlineGetDictUserDefined; +import org.opensearch.protobufs.ResponseItem; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Collections; + +public class GetResultProtoUtilsTests extends OpenSearchTestCase { + + public void testToProtoWithExistingDocument() throws IOException { + // Create a GetResult for an existing document + String index = "test-index"; + String id = "test-id"; + long version = 1; + long seqNo = 2; + long primaryTerm = 3; + byte[] sourceBytes = "{\"field\":\"value\"}".getBytes(StandardCharsets.UTF_8); + BytesReference source = new BytesArray(sourceBytes); + + GetResult getResult = new GetResult( + index, + id, + seqNo, + primaryTerm, + version, + true, + source, + Collections.emptyMap(), + Collections.emptyMap() + ); + + // Convert to Protocol Buffer + ResponseItem.Builder responseItemBuilder = ResponseItem.newBuilder(); + ResponseItem.Builder result = GetResultProtoUtils.toProto(getResult, responseItemBuilder); + + // Verify the conversion + assertEquals("Should have the correct index", index, result.getIndex()); + assertEquals("Should have the correct id", id, result.getId().getString()); + assertEquals("Should have the correct version", version, result.getVersion()); + + InlineGetDictUserDefined get = result.getGet(); + assertTrue("Should be found", get.getFound()); + assertEquals("Should have the correct sequence number", seqNo, get.getSeqNo()); + assertEquals("Should have the correct primary term", primaryTerm, get.getPrimaryTerm()); + assertEquals("Should have the correct source", ByteString.copyFrom(sourceBytes), get.getSource()); + } + + public void testToProtoWithNonExistingDocument() throws IOException { + // Create a GetResult for a non-existing document + String index = "test-index"; + String id = "test-id"; + + GetResult getResult = new GetResult( + index, + id, + SequenceNumbers.UNASSIGNED_SEQ_NO, + SequenceNumbers.UNASSIGNED_PRIMARY_TERM, + -1, + false, + null, + Collections.emptyMap(), + Collections.emptyMap() + ); + + // Convert to Protocol Buffer + ResponseItem.Builder responseItemBuilder = ResponseItem.newBuilder(); + ResponseItem.Builder result = GetResultProtoUtils.toProto(getResult, responseItemBuilder); + + // Verify the conversion + assertEquals("Should have the correct index", index, result.getIndex()); + assertEquals("Should have the correct id", id, result.getId().getString()); + assertFalse("Should not be found", result.getGet().getFound()); + } + + public void testToProtoEmbeddedWithSequenceNumber() throws IOException { + // Create a GetResult with sequence number and primary term + String index = "test-index"; + String id = "test-id"; + long seqNo = 2; + long primaryTerm = 3; + byte[] sourceBytes = "{\"field\":\"value\"}".getBytes(StandardCharsets.UTF_8); + BytesReference source = new BytesArray(sourceBytes); + + GetResult getResult = new GetResult(index, id, seqNo, primaryTerm, 1, true, source, Collections.emptyMap(), Collections.emptyMap()); + + // Convert to Protocol Buffer + InlineGetDictUserDefined.Builder builder = InlineGetDictUserDefined.newBuilder(); + InlineGetDictUserDefined.Builder result = GetResultProtoUtils.toProtoEmbedded(getResult, builder); + + // Verify the conversion + assertTrue("Should be found", result.getFound()); + assertEquals("Should have the correct sequence number", seqNo, result.getSeqNo()); + assertEquals("Should have the correct primary term", primaryTerm, result.getPrimaryTerm()); + assertEquals("Should have the correct source", ByteString.copyFrom(sourceBytes), result.getSource()); + } + + public void testToProtoEmbeddedWithoutSequenceNumber() throws IOException { + // Create a GetResult without sequence number and primary term + String index = "test-index"; + String id = "test-id"; + byte[] sourceBytes = "{\"field\":\"value\"}".getBytes(StandardCharsets.UTF_8); + BytesReference source = new BytesArray(sourceBytes); + + GetResult getResult = new GetResult( + index, + id, + SequenceNumbers.UNASSIGNED_SEQ_NO, + SequenceNumbers.UNASSIGNED_PRIMARY_TERM, + 1, + true, + source, + Collections.emptyMap(), + Collections.emptyMap() + ); + + // Convert to Protocol Buffer + InlineGetDictUserDefined.Builder builder = InlineGetDictUserDefined.newBuilder(); + InlineGetDictUserDefined.Builder result = GetResultProtoUtils.toProtoEmbedded(getResult, builder); + + // Verify the conversion + assertTrue("Should be found", result.getFound()); + assertEquals("Should have the correct source", ByteString.copyFrom(source.toBytesRef().bytes), result.getSource()); + + // Sequence number and primary term should not be set + assertFalse("Should not have sequence number", result.hasSeqNo()); + assertFalse("Should not have primary term", result.hasPrimaryTerm()); + } + + public void testToProtoEmbeddedWithoutSource() throws IOException { + // Create a GetResult without source + String index = "test-index"; + String id = "test-id"; + + GetResult getResult = new GetResult( + index, + id, + SequenceNumbers.UNASSIGNED_SEQ_NO, + SequenceNumbers.UNASSIGNED_PRIMARY_TERM, + 1, + true, + null, + Collections.emptyMap(), + Collections.emptyMap() + ); + + // Convert to Protocol Buffer + InlineGetDictUserDefined.Builder builder = InlineGetDictUserDefined.newBuilder(); + InlineGetDictUserDefined.Builder result = GetResultProtoUtils.toProtoEmbedded(getResult, builder); + + // Verify the conversion + assertTrue("Should be found", result.getFound()); + + // Source should not be set + assertFalse("Should not have source", result.hasSource()); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/CircuitBreakingExceptionProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/CircuitBreakingExceptionProtoUtilsTests.java new file mode 100644 index 0000000000000..2d165d479ef88 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/CircuitBreakingExceptionProtoUtilsTests.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.response.exceptions; + +import org.opensearch.core.common.breaker.CircuitBreaker.Durability; +import org.opensearch.core.common.breaker.CircuitBreakingException; +import org.opensearch.protobufs.ObjectMap; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Map; + +public class CircuitBreakingExceptionProtoUtilsTests extends OpenSearchTestCase { + + public void testMetadataToProto() { + // Create a CircuitBreakingException with specific values + long bytesWanted = 1024; + long byteLimit = 512; + Durability durability = Durability.TRANSIENT; + CircuitBreakingException exception = new CircuitBreakingException("Test circuit breaking", bytesWanted, byteLimit, durability); + + // Convert to Protocol Buffer + Map metadata = CircuitBreakingExceptionProtoUtils.metadataToProto(exception); + + // Verify the conversion + assertTrue("Should have bytes_wanted field", metadata.containsKey("bytes_wanted")); + assertTrue("Should have bytes_limit field", metadata.containsKey("bytes_limit")); + assertTrue("Should have durability field", metadata.containsKey("durability")); + + // Verify field values + ObjectMap.Value bytesWantedValue = metadata.get("bytes_wanted"); + ObjectMap.Value bytesLimitValue = metadata.get("bytes_limit"); + ObjectMap.Value durabilityValue = metadata.get("durability"); + + assertEquals("bytes_wanted should match", bytesWanted, bytesWantedValue.getInt64()); + assertEquals("bytes_limit should match", byteLimit, bytesLimitValue.getInt64()); + assertEquals("durability should match", durability.toString(), durabilityValue.getString()); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/FailedNodeExceptionProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/FailedNodeExceptionProtoUtilsTests.java new file mode 100644 index 0000000000000..592944f6d5a02 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/FailedNodeExceptionProtoUtilsTests.java @@ -0,0 +1,34 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.response.exceptions; + +import org.opensearch.action.FailedNodeException; +import org.opensearch.protobufs.ObjectMap; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Map; + +public class FailedNodeExceptionProtoUtilsTests extends OpenSearchTestCase { + + public void testMetadataToProto() { + // Create a FailedNodeException with a specific node ID + String nodeId = "test_node_id"; + FailedNodeException exception = new FailedNodeException(nodeId, "Test failed node", new RuntimeException("Cause")); + + // Convert to Protocol Buffer + Map metadata = FailedNodeExceptionProtoUtils.metadataToProto(exception); + + // Verify the conversion + assertTrue("Should have node_id field", metadata.containsKey("node_id")); + + // Verify field value + ObjectMap.Value nodeIdValue = metadata.get("node_id"); + assertEquals("node_id should match", nodeId, nodeIdValue.getString()); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/ParsingExceptionProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/ParsingExceptionProtoUtilsTests.java new file mode 100644 index 0000000000000..f359bdf50da95 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/ParsingExceptionProtoUtilsTests.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.response.exceptions; + +import org.opensearch.core.common.ParsingException; +import org.opensearch.protobufs.ObjectMap; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Map; + +import static org.opensearch.core.common.ParsingException.UNKNOWN_POSITION; + +public class ParsingExceptionProtoUtilsTests extends OpenSearchTestCase { + + public void testMetadataToProtoWithPositionInfo() { + // Create a ParsingException with line and column information + int lineNumber = 42; + int columnNumber = 10; + ParsingException exception = new ParsingException(lineNumber, columnNumber, "Test parsing error", null); + + // Convert to Protocol Buffer + Map metadata = ParsingExceptionProtoUtils.metadataToProto(exception); + + // Verify the conversion + assertTrue("Should have line field", metadata.containsKey("line")); + assertTrue("Should have col field", metadata.containsKey("col")); + + // Verify field values + ObjectMap.Value lineValue = metadata.get("line"); + ObjectMap.Value colValue = metadata.get("col"); + + assertEquals("line should match", lineNumber, lineValue.getInt32()); + assertEquals("col should match", columnNumber, colValue.getInt32()); + } + + public void testMetadataToProtoWithUnknownPosition() { + // Create a ParsingException with unknown position + ParsingException exception = new ParsingException(UNKNOWN_POSITION, UNKNOWN_POSITION, "Test parsing error", null); + + // Convert to Protocol Buffer + Map metadata = ParsingExceptionProtoUtils.metadataToProto(exception); + + // Verify the conversion - should be empty since position is unknown + assertFalse("Should have line field", metadata.containsKey("line")); + assertFalse("Should have col field", metadata.containsKey("col")); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/ResponseLimitBreachedExceptionProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/ResponseLimitBreachedExceptionProtoUtilsTests.java new file mode 100644 index 0000000000000..26af97448c6f1 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/ResponseLimitBreachedExceptionProtoUtilsTests.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.response.exceptions; + +import org.opensearch.common.breaker.ResponseLimitBreachedException; +import org.opensearch.common.breaker.ResponseLimitSettings; +import org.opensearch.protobufs.ObjectMap; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Map; + +public class ResponseLimitBreachedExceptionProtoUtilsTests extends OpenSearchTestCase { + + public void testMetadataToProto() { + // Create a ResponseLimitBreachedException with specific values + int responseLimit = 10000; // Smaller value that fits in an int + ResponseLimitSettings.LimitEntity limitEntity = ResponseLimitSettings.LimitEntity.INDICES; + ResponseLimitBreachedException exception = new ResponseLimitBreachedException( + "Test response limit breached", + responseLimit, + limitEntity + ); + + // Convert to Protocol Buffer + Map metadata = ResponseLimitBreachedExceptionProtoUtils.metadataToProto(exception); + + // Verify the conversion + assertTrue("Should have response_limit field", metadata.containsKey("response_limit")); + assertTrue("Should have limit_entity field", metadata.containsKey("limit_entity")); + + // Verify field values + ObjectMap.Value responseLimitValue = metadata.get("response_limit"); + ObjectMap.Value limitEntityValue = metadata.get("limit_entity"); + + assertEquals("response_limit should match", responseLimit, responseLimitValue.getInt32()); + assertEquals("limit_entity should match", limitEntity.toString(), limitEntityValue.getString()); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/ScriptExceptionProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/ScriptExceptionProtoUtilsTests.java new file mode 100644 index 0000000000000..82c670989ce42 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/ScriptExceptionProtoUtilsTests.java @@ -0,0 +1,103 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.response.exceptions; + +import org.opensearch.protobufs.ObjectMap; +import org.opensearch.script.ScriptException; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class ScriptExceptionProtoUtilsTests extends OpenSearchTestCase { + + public void testMetadataToProtoWithoutPosition() { + // Create a ScriptException without position information + String script = "doc['field'].value > 100"; + String lang = "painless"; + List stack = new ArrayList<>(); + stack.add("line 1: error"); + stack.add("line 2: another error"); + + ScriptException exception = new ScriptException("Test script error", new RuntimeException("Test cause"), stack, script, lang); + + // Convert to Protocol Buffer + Map metadata = ScriptExceptionProtoUtils.metadataToProto(exception); + + // Verify the conversion + assertTrue("Should have script_stack field", metadata.containsKey("script_stack")); + assertTrue("Should have script field", metadata.containsKey("script")); + assertTrue("Should have lang field", metadata.containsKey("lang")); + assertFalse("Should have position field", metadata.containsKey("position")); + + // Verify field values + ObjectMap.Value scriptValue = metadata.get("script"); + ObjectMap.Value langValue = metadata.get("lang"); + + assertEquals("script should match", script, scriptValue.getString()); + assertEquals("lang should match", lang, langValue.getString()); + + // Verify script stack + ObjectMap.Value stackValue = metadata.get("script_stack"); + assertEquals("script_stack should have 2 items", 2, stackValue.getListValue().getValueCount()); + assertEquals("First stack item should match", stack.get(0), stackValue.getListValue().getValue(0).getString()); + assertEquals("Second stack item should match", stack.get(1), stackValue.getListValue().getValue(1).getString()); + } + + public void testMetadataToProtoWithPosition() { + // Create a ScriptException with position information + String script = "doc['field'].value > 100"; + String lang = "painless"; + List stack = new ArrayList<>(); + stack.add("line 1: error"); + + ScriptException.Position pos = new ScriptException.Position(10, 5, 15); + ScriptException exception = new ScriptException("Test script error", new RuntimeException("Test cause"), stack, script, lang, pos); + + // Convert to Protocol Buffer + Map metadata = ScriptExceptionProtoUtils.metadataToProto(exception); + + // Verify the conversion + assertTrue("Should have position field", metadata.containsKey("position")); + + // Verify position field + ObjectMap.Value positionValue = metadata.get("position"); + Map positionMap = positionValue.getObjectMap().getFieldsMap(); + + assertTrue("Position should have offset field", positionMap.containsKey("offset")); + assertTrue("Position should have start field", positionMap.containsKey("start")); + assertTrue("Position should have end field", positionMap.containsKey("end")); + + assertEquals("offset should match", 10, positionMap.get("offset").getInt32()); + assertEquals("start should match", 5, positionMap.get("start").getInt32()); + assertEquals("end should match", 15, positionMap.get("end").getInt32()); + } + + public void testToProtoBuilderMethod() { + // Test the toProto method that takes a builder and position + ScriptException.Position pos = new ScriptException.Position(10, 5, 15); + + // Create a builder and add position + Map map = new HashMap<>(); + Map result = ScriptExceptionProtoUtils.toProto(map, pos); + + // Verify the result + assertTrue("Should have position field", result.containsKey("position")); + + // Verify position field + ObjectMap.Value positionValue = result.get("position"); + Map positionMap = positionValue.getObjectMap().getFieldsMap(); + + assertEquals("offset should match", 10, positionMap.get("offset").getInt32()); + assertEquals("start should match", 5, positionMap.get("start").getInt32()); + assertEquals("end should match", 15, positionMap.get("end").getInt32()); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/SearchParseExceptionProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/SearchParseExceptionProtoUtilsTests.java new file mode 100644 index 0000000000000..40f5d9da1f7a1 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/SearchParseExceptionProtoUtilsTests.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.response.exceptions; + +import org.opensearch.protobufs.ObjectMap; +import org.opensearch.search.SearchParseException; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.Map; + +public class SearchParseExceptionProtoUtilsTests extends OpenSearchTestCase { + + public void testMetadataToProto() throws IOException { + // Create a mock SearchParseException with specific line and column information + int lineNumber = 25; + int columnNumber = 15; + + // Create a mock SearchParseException that only provides the line and column numbers + SearchParseException exception = new MockSearchParseException(lineNumber, columnNumber); + + // Convert to Protocol Buffer + Map metadata = SearchParseExceptionProtoUtils.metadataToProto(exception); + + // Verify the conversion + assertTrue("Should have line field", metadata.containsKey("line")); + assertTrue("Should have col field", metadata.containsKey("col")); + + // Verify field values + ObjectMap.Value lineValue = metadata.get("line"); + ObjectMap.Value colValue = metadata.get("col"); + + assertEquals("line should match", lineNumber, lineValue.getInt32()); + assertEquals("col should match", columnNumber, colValue.getInt32()); + } + + /** + * A simple mock implementation of SearchParseException for testing purposes. + * This mock only provides the line and column numbers needed for the test. + */ + private static class MockSearchParseException extends SearchParseException { + private final int lineNumber; + private final int columnNumber; + + public MockSearchParseException(int lineNumber, int columnNumber) throws IOException { + super(null, "Test search parse error", null); + this.lineNumber = lineNumber; + this.columnNumber = columnNumber; + } + + @Override + public int getLineNumber() { + return lineNumber; + } + + @Override + public int getColumnNumber() { + return columnNumber; + } + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/SearchPhaseExecutionExceptionProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/SearchPhaseExecutionExceptionProtoUtilsTests.java new file mode 100644 index 0000000000000..42dc60f6d2999 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/SearchPhaseExecutionExceptionProtoUtilsTests.java @@ -0,0 +1,57 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.response.exceptions; + +import org.opensearch.action.search.SearchPhaseExecutionException; +import org.opensearch.action.search.ShardSearchFailure; +import org.opensearch.protobufs.ObjectMap; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Map; + +public class SearchPhaseExecutionExceptionProtoUtilsTests extends OpenSearchTestCase { + + public void testMetadataToProto() { + // Create a SearchPhaseExecutionException with specific values + String phaseName = "query"; + + // Create a ShardSearchFailure + RuntimeException cause = new RuntimeException("Test cause"); + ShardSearchFailure[] shardFailures = new ShardSearchFailure[] { new ShardSearchFailure(cause) }; + + SearchPhaseExecutionException exception = new SearchPhaseExecutionException( + phaseName, + "Test search phase execution error", + shardFailures + ); + + // Convert to Protocol Buffer + Map metadata = SearchPhaseExecutionExceptionProtoUtils.metadataToProto(exception); + + // Verify the conversion + assertTrue("Should have phase field", metadata.containsKey("phase")); + assertTrue("Should have grouped field", metadata.containsKey("grouped")); + assertTrue("Should have failed_shards field", metadata.containsKey("failed_shards")); + + // Verify field values + ObjectMap.Value phaseValue = metadata.get("phase"); + ObjectMap.Value groupedValue = metadata.get("grouped"); + ObjectMap.Value failedShardsValue = metadata.get("failed_shards"); + + assertEquals("phase should match", phaseName, phaseValue.getString()); + assertTrue("grouped should be true", groupedValue.getBool()); + + // Verify failed_shards list + ObjectMap.ListValue failedShardsList = failedShardsValue.getListValue(); + assertEquals("failed_shards should have 1 item", 1, failedShardsList.getValueCount()); + + // Note: Since ShardOperationFailedExceptionProtoUtils.toProto() returns an empty Value, + // we can't verify the content of the failed shard item beyond its existence + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/ShardOperationFailedExceptionProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/ShardOperationFailedExceptionProtoUtilsTests.java new file mode 100644 index 0000000000000..c209a640cacfe --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/ShardOperationFailedExceptionProtoUtilsTests.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.response.exceptions; + +import org.opensearch.core.action.ShardOperationFailedException; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.protobufs.ObjectMap; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class ShardOperationFailedExceptionProtoUtilsTests extends OpenSearchTestCase { + + public void testToProto() { + // Create a mock ShardOperationFailedException + ShardOperationFailedException mockFailure = new MockShardOperationFailedException(); + + // Convert to Protocol Buffer + ObjectMap.Value value = ShardOperationFailedExceptionProtoUtils.toProto(mockFailure); + + // Verify the conversion + // Note: According to the implementation, this method currently returns an empty Value + // This test verifies that the method executes without error and returns a non-null Value + assertNotNull("Should return a non-null Value", value); + + // If the implementation is updated in the future to include actual data, + // this test should be updated to verify the specific fields and values + } + + /** + * A simple mock implementation of ShardOperationFailedException for testing purposes. + */ + private static class MockShardOperationFailedException extends ShardOperationFailedException { + + public MockShardOperationFailedException() { + this.index = "test_index"; + this.shardId = 1; + this.reason = "Test shard failure reason"; + this.status = RestStatus.INTERNAL_SERVER_ERROR; + this.cause = new RuntimeException("Test cause"); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + // Not needed for this test + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + // Not needed for this test + return builder; + } + + @Override + public String toString() { + return "MockShardOperationFailedException[test_index][1]: Test shard failure reason"; + } + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/TooManyBucketsExceptionProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/TooManyBucketsExceptionProtoUtilsTests.java new file mode 100644 index 0000000000000..62ae00bfdab08 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/TooManyBucketsExceptionProtoUtilsTests.java @@ -0,0 +1,37 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.response.exceptions; + +import org.opensearch.protobufs.ObjectMap; +import org.opensearch.search.aggregations.MultiBucketConsumerService; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Map; + +public class TooManyBucketsExceptionProtoUtilsTests extends OpenSearchTestCase { + + public void testMetadataToProto() { + // Create a TooManyBucketsException with a specific max buckets value + int maxBuckets = 10000; + MultiBucketConsumerService.TooManyBucketsException exception = new MultiBucketConsumerService.TooManyBucketsException( + "Test too many buckets", + maxBuckets + ); + + // Convert to Protocol Buffer + Map metadata = TooManyBucketsExceptionProtoUtils.metadataToProto(exception); + + // Verify the conversion + assertTrue("Should have max_buckets field", metadata.containsKey("max_buckets")); + + // Verify field value + ObjectMap.Value maxBucketsValue = metadata.get("max_buckets"); + assertEquals("max_buckets should match", maxBuckets, maxBucketsValue.getInt32()); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/services/BulkRequestProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/services/BulkRequestProtoUtilsTests.java new file mode 100644 index 0000000000000..d3f6bac873d21 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/services/BulkRequestProtoUtilsTests.java @@ -0,0 +1,157 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.services; + +import com.google.protobuf.ByteString; +import org.opensearch.action.DocWriteRequest; +import org.opensearch.action.index.IndexRequest; +import org.opensearch.action.support.WriteRequest; +import org.opensearch.plugin.transport.grpc.proto.request.document.bulk.BulkRequestProtoUtils; +import org.opensearch.protobufs.BulkRequest; +import org.opensearch.protobufs.BulkRequestBody; +import org.opensearch.protobufs.CreateOperation; +import org.opensearch.protobufs.DeleteOperation; +import org.opensearch.protobufs.IndexOperation; +import org.opensearch.protobufs.UpdateOperation; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.transport.client.node.NodeClient; +import org.junit.Before; + +import java.io.IOException; + +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +public class BulkRequestProtoUtilsTests extends OpenSearchTestCase { + + @Mock + private NodeClient client; + + @Before + public void setup() { + MockitoAnnotations.openMocks(this); + } + + public void testPrepareRequestWithIndexOperation() throws IOException { + // Create a Protocol Buffer BulkRequest with an index operation + BulkRequest request = createBulkRequestWithIndexOperation(); + + // Convert to OpenSearch BulkRequest + org.opensearch.action.bulk.BulkRequest bulkRequest = BulkRequestProtoUtils.prepareRequest(request); + + // Verify the converted request + assertEquals("Should have 1 request", 1, bulkRequest.numberOfActions()); + // The actual refresh policy is NONE, not IMMEDIATE + assertEquals("Should have the correct refresh policy", WriteRequest.RefreshPolicy.NONE, bulkRequest.getRefreshPolicy()); + + // Verify the index request + DocWriteRequest docWriteRequest = bulkRequest.requests().get(0); + assertEquals("Should be an INDEX operation", DocWriteRequest.OpType.INDEX, docWriteRequest.opType()); + assertEquals("Should have the correct index", "test-index", docWriteRequest.index()); + assertEquals("Should have the correct id", "test-id", docWriteRequest.id()); + assertEquals("Should have the correct pipeline", "test-pipeline", ((IndexRequest) docWriteRequest).getPipeline()); + + } + + public void testPrepareRequestWithCreateOperation() throws IOException { + // Create a Protocol Buffer BulkRequest with a create operation + BulkRequest request = createBulkRequestWithCreateOperation(); + + // Convert to OpenSearch BulkRequest + org.opensearch.action.bulk.BulkRequest bulkRequest = BulkRequestProtoUtils.prepareRequest(request); + + // Verify the converted request + assertEquals("Should have 1 request", 1, bulkRequest.numberOfActions()); + + // Verify the create request + DocWriteRequest docWriteRequest = bulkRequest.requests().get(0); + assertEquals("Should be a CREATE operation", DocWriteRequest.OpType.CREATE, docWriteRequest.opType()); + assertEquals("Should have the correct index", "test-index", docWriteRequest.index()); + assertEquals("Should have the correct id", "test-id", docWriteRequest.id()); + } + + public void testPrepareRequestWithDeleteOperation() throws IOException { + // Create a Protocol Buffer BulkRequest with a delete operation + BulkRequest request = createBulkRequestWithDeleteOperation(); + + // Convert to OpenSearch BulkRequest + org.opensearch.action.bulk.BulkRequest bulkRequest = BulkRequestProtoUtils.prepareRequest(request); + + // Verify the converted request + assertEquals("Should have 1 request", 1, bulkRequest.numberOfActions()); + + // Verify the delete request + DocWriteRequest docWriteRequest = bulkRequest.requests().get(0); + assertEquals("Should have the correct index", "test-index", docWriteRequest.index()); + assertEquals("Should have the correct id", "test-id", docWriteRequest.id()); + } + + public void testPrepareRequestWithUpdateOperation() throws IOException { + // Create a Protocol Buffer BulkRequest with an update operation + BulkRequest request = createBulkRequestWithUpdateOperation(); + + // Convert to OpenSearch BulkRequest + org.opensearch.action.bulk.BulkRequest bulkRequest = BulkRequestProtoUtils.prepareRequest(request); + + // Verify the converted request + assertEquals("Should have 1 request", 1, bulkRequest.numberOfActions()); + + // Verify the update request + DocWriteRequest docWriteRequest = bulkRequest.requests().get(0); + assertEquals("Should have the correct index", "test-index", docWriteRequest.index()); + assertEquals("Should have the correct id", "test-id", docWriteRequest.id()); + } + + // Helper methods to create test requests + + private BulkRequest createBulkRequestWithIndexOperation() { + IndexOperation indexOp = IndexOperation.newBuilder().setIndex("test-index").setId("test-id").build(); + + BulkRequestBody requestBody = BulkRequestBody.newBuilder() + .setIndex(indexOp) + .setDoc(ByteString.copyFromUtf8("{\"field\":\"value\"}")) + .build(); + + return BulkRequest.newBuilder() + .setPipeline("test-pipeline") + .setRefreshValue(1) // REFRESH_TRUE = 1 + .addRequestBody(requestBody) + .build(); + } + + private BulkRequest createBulkRequestWithCreateOperation() { + CreateOperation createOp = CreateOperation.newBuilder().setIndex("test-index").setId("test-id").build(); + + BulkRequestBody requestBody = BulkRequestBody.newBuilder() + .setCreate(createOp) + .setDoc(ByteString.copyFromUtf8("{\"field\":\"value\"}")) + .build(); + + return BulkRequest.newBuilder().addRequestBody(requestBody).build(); + } + + private BulkRequest createBulkRequestWithDeleteOperation() { + DeleteOperation deleteOp = DeleteOperation.newBuilder().setIndex("test-index").setId("test-id").build(); + + BulkRequestBody requestBody = BulkRequestBody.newBuilder().setDelete(deleteOp).build(); + + return BulkRequest.newBuilder().addRequestBody(requestBody).build(); + } + + private BulkRequest createBulkRequestWithUpdateOperation() { + UpdateOperation updateOp = UpdateOperation.newBuilder().setIndex("test-index").setId("test-id").build(); + + BulkRequestBody requestBody = BulkRequestBody.newBuilder() + .setUpdate(updateOp) + .setDoc(ByteString.copyFromUtf8("{\"field\":\"updated-value\"}")) + .build(); + + return BulkRequest.newBuilder().addRequestBody(requestBody).build(); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/services/document/DocumentServiceImplTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/services/document/DocumentServiceImplTests.java new file mode 100644 index 0000000000000..8d4bba91877b6 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/services/document/DocumentServiceImplTests.java @@ -0,0 +1,81 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.services.document; + +import com.google.protobuf.ByteString; +import org.opensearch.plugin.transport.grpc.services.DocumentServiceImpl; +import org.opensearch.protobufs.BulkRequest; +import org.opensearch.protobufs.BulkRequestBody; +import org.opensearch.protobufs.IndexOperation; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.transport.client.node.NodeClient; +import org.junit.Before; + +import java.io.IOException; + +import io.grpc.stub.StreamObserver; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.verify; + +public class DocumentServiceImplTests extends OpenSearchTestCase { + + private DocumentServiceImpl service; + + @Mock + private NodeClient client; + + @Mock + private StreamObserver responseObserver; + + @Before + public void setup() throws IOException { + MockitoAnnotations.openMocks(this); + service = new DocumentServiceImpl(client); + } + + public void testBulkSuccess() throws IOException { + // Create a test request + BulkRequest request = createTestBulkRequest(); + + // Call the bulk method + service.bulk(request, responseObserver); + + // Verify that client.bulk was called with any BulkRequest and any ActionListener + verify(client).bulk(any(org.opensearch.action.bulk.BulkRequest.class), any()); + } + + public void testBulkError() throws IOException { + // Create a test request + BulkRequest request = createTestBulkRequest(); + + // Make the client throw an exception when bulk is called + doThrow(new RuntimeException("Test exception")).when(client).bulk(any(org.opensearch.action.bulk.BulkRequest.class), any()); + + // Call the bulk method + service.bulk(request, responseObserver); + + // Verify that the error was sent + verify(responseObserver).onError(any(RuntimeException.class)); + } + + private BulkRequest createTestBulkRequest() { + IndexOperation indexOp = IndexOperation.newBuilder().setIndex("test-index").setId("test-id").build(); + + BulkRequestBody requestBody = BulkRequestBody.newBuilder() + .setIndex(indexOp) + .setDoc(ByteString.copyFromUtf8("{\"field\":\"value\"}")) + .build(); + + return BulkRequest.newBuilder().addRequestBody(requestBody).build(); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/transport/grpc/Netty4GrpcServerTransportTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/transport/grpc/Netty4GrpcServerTransportTests.java deleted file mode 100644 index 8cf44eebb293e..0000000000000 --- a/plugins/transport-grpc/src/test/java/org/opensearch/transport/grpc/Netty4GrpcServerTransportTests.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.transport.grpc; - -import org.opensearch.common.network.NetworkService; -import org.opensearch.common.settings.Settings; -import org.opensearch.test.OpenSearchTestCase; -import org.hamcrest.MatcherAssert; -import org.junit.Before; - -import java.util.List; - -import io.grpc.BindableService; - -import static org.hamcrest.Matchers.emptyArray; -import static org.hamcrest.Matchers.not; - -public class Netty4GrpcServerTransportTests extends OpenSearchTestCase { - - private NetworkService networkService; - private List services; - - @Before - public void setup() { - networkService = new NetworkService(List.of()); - services = List.of(); - } - - public void test() { - try (Netty4GrpcServerTransport transport = new Netty4GrpcServerTransport(createSettings(), services, networkService)) { - transport.start(); - - MatcherAssert.assertThat(transport.boundAddress().boundAddresses(), not(emptyArray())); - assertNotNull(transport.boundAddress().publishAddress().address()); - - transport.stop(); - } - } - - private static Settings createSettings() { - return Settings.builder().put(Netty4GrpcServerTransport.SETTING_GRPC_PORT.getKey(), getPortRange()).build(); - } -} From e9d8e0007b58f96a89b1a400a14eb2ed9f7aa597 Mon Sep 17 00:00:00 2001 From: Ruirui Zhang Date: Fri, 4 Apr 2025 22:35:53 -0700 Subject: [PATCH 144/550] Add tracking for long running SearchTask post cancellation (#17726) * add tracking for long running SearchTask post cancellation Signed-off-by: Ruirui Zhang * add version checks for searchTask Signed-off-by: Ruirui Zhang --- CHANGELOG.md | 1 + .../BaseSearchTaskCancellationStats.java | 73 +++++++++++++++++++ .../SearchShardTaskCancellationStats.java | 54 +------------- .../tasks/SearchTaskCancellationStats.java | 27 +++++++ .../TaskCancellationMonitoringService.java | 7 +- .../tasks/TaskCancellationStats.java | 27 ++++++- .../SearchTaskCancellationStatsTests.java | 28 +++++++ ...askCancellationMonitoringServiceTests.java | 64 +++++++++++----- .../tasks/TaskCancellationStatsTests.java | 5 +- 9 files changed, 210 insertions(+), 76 deletions(-) create mode 100644 server/src/main/java/org/opensearch/tasks/BaseSearchTaskCancellationStats.java create mode 100644 server/src/main/java/org/opensearch/tasks/SearchTaskCancellationStats.java create mode 100644 server/src/test/java/org/opensearch/tasks/SearchTaskCancellationStatsTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 976017cae8da0..bf5ff556f0227 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Introduce a new search node role to hold search only shards ([#17620](https://github.com/opensearch-project/OpenSearch/pull/17620)) - Fix systemd integTest on deb regarding path ownership check ([#17641](https://github.com/opensearch-project/OpenSearch/pull/17641)) - Add dfs transformation function in XContentMapValues ([#17612](https://github.com/opensearch-project/OpenSearch/pull/17612)) +- Add tracking for long-running SearchTask post cancellation ([#17726](https://github.com/opensearch-project/OpenSearch/pull/17726)) - Added Kinesis support as a plugin for the pull-based ingestion ([#17615](https://github.com/opensearch-project/OpenSearch/pull/17615)) - Add FilterFieldType for developers who want to wrap MappedFieldType ([#17627](https://github.com/opensearch-project/OpenSearch/pull/17627)) - [Rule Based Auto-tagging] Add in-memory rule processing service ([#17365](https://github.com/opensearch-project/OpenSearch/pull/17365)) diff --git a/server/src/main/java/org/opensearch/tasks/BaseSearchTaskCancellationStats.java b/server/src/main/java/org/opensearch/tasks/BaseSearchTaskCancellationStats.java new file mode 100644 index 0000000000000..4d9fc982d3df7 --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/BaseSearchTaskCancellationStats.java @@ -0,0 +1,73 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.tasks; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * Base class for search task cancellation statistics. + */ +public abstract class BaseSearchTaskCancellationStats implements ToXContentObject, Writeable { + + private final long currentLongRunningCancelledTaskCount; + private final long totalLongRunningCancelledTaskCount; + + public BaseSearchTaskCancellationStats(long currentTaskCount, long totalTaskCount) { + this.currentLongRunningCancelledTaskCount = currentTaskCount; + this.totalLongRunningCancelledTaskCount = totalTaskCount; + } + + public BaseSearchTaskCancellationStats(StreamInput in) throws IOException { + this.currentLongRunningCancelledTaskCount = in.readVLong(); + this.totalLongRunningCancelledTaskCount = in.readVLong(); + } + + protected long getCurrentLongRunningCancelledTaskCount() { + return this.currentLongRunningCancelledTaskCount; + } + + protected long getTotalLongRunningCancelledTaskCount() { + return this.totalLongRunningCancelledTaskCount; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("current_count_post_cancel", currentLongRunningCancelledTaskCount); + builder.field("total_count_post_cancel", totalLongRunningCancelledTaskCount); + return builder.endObject(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(currentLongRunningCancelledTaskCount); + out.writeVLong(totalLongRunningCancelledTaskCount); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + BaseSearchTaskCancellationStats that = (BaseSearchTaskCancellationStats) o; + return currentLongRunningCancelledTaskCount == that.currentLongRunningCancelledTaskCount + && totalLongRunningCancelledTaskCount == that.totalLongRunningCancelledTaskCount; + } + + @Override + public int hashCode() { + return Objects.hash(currentLongRunningCancelledTaskCount, totalLongRunningCancelledTaskCount); + } +} diff --git a/server/src/main/java/org/opensearch/tasks/SearchShardTaskCancellationStats.java b/server/src/main/java/org/opensearch/tasks/SearchShardTaskCancellationStats.java index e6ce092d7516e..cd13acde7f613 100644 --- a/server/src/main/java/org/opensearch/tasks/SearchShardTaskCancellationStats.java +++ b/server/src/main/java/org/opensearch/tasks/SearchShardTaskCancellationStats.java @@ -9,67 +9,19 @@ package org.opensearch.tasks; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.core.xcontent.ToXContentObject; -import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; -import java.util.Objects; /** * Holds monitoring service stats specific to search shard task. */ -public class SearchShardTaskCancellationStats implements ToXContentObject, Writeable { - - private final long currentLongRunningCancelledTaskCount; - private final long totalLongRunningCancelledTaskCount; +public class SearchShardTaskCancellationStats extends BaseSearchTaskCancellationStats { public SearchShardTaskCancellationStats(long currentTaskCount, long totalTaskCount) { - this.currentLongRunningCancelledTaskCount = currentTaskCount; - this.totalLongRunningCancelledTaskCount = totalTaskCount; + super(currentTaskCount, totalTaskCount); } public SearchShardTaskCancellationStats(StreamInput in) throws IOException { - this.currentLongRunningCancelledTaskCount = in.readVLong(); - this.totalLongRunningCancelledTaskCount = in.readVLong(); - } - - // package private for testing - protected long getCurrentLongRunningCancelledTaskCount() { - return this.currentLongRunningCancelledTaskCount; - } - - // package private for testing - protected long getTotalLongRunningCancelledTaskCount() { - return this.totalLongRunningCancelledTaskCount; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.field("current_count_post_cancel", currentLongRunningCancelledTaskCount); - builder.field("total_count_post_cancel", totalLongRunningCancelledTaskCount); - return builder.endObject(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVLong(currentLongRunningCancelledTaskCount); - out.writeVLong(totalLongRunningCancelledTaskCount); - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - SearchShardTaskCancellationStats that = (SearchShardTaskCancellationStats) o; - return currentLongRunningCancelledTaskCount == that.currentLongRunningCancelledTaskCount - && totalLongRunningCancelledTaskCount == that.totalLongRunningCancelledTaskCount; - } - - @Override - public int hashCode() { - return Objects.hash(currentLongRunningCancelledTaskCount, totalLongRunningCancelledTaskCount); + super(in); } } diff --git a/server/src/main/java/org/opensearch/tasks/SearchTaskCancellationStats.java b/server/src/main/java/org/opensearch/tasks/SearchTaskCancellationStats.java new file mode 100644 index 0000000000000..9ae9a65877bb7 --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/SearchTaskCancellationStats.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.tasks; + +import org.opensearch.core.common.io.stream.StreamInput; + +import java.io.IOException; + +/** + * Holds monitoring service stats specific to search task. + */ +public class SearchTaskCancellationStats extends BaseSearchTaskCancellationStats { + + public SearchTaskCancellationStats(long currentTaskCount, long totalTaskCount) { + super(currentTaskCount, totalTaskCount); + } + + public SearchTaskCancellationStats(StreamInput in) throws IOException { + super(in); + } +} diff --git a/server/src/main/java/org/opensearch/tasks/TaskCancellationMonitoringService.java b/server/src/main/java/org/opensearch/tasks/TaskCancellationMonitoringService.java index 2040703d88c38..65ac4d3a0db6e 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskCancellationMonitoringService.java +++ b/server/src/main/java/org/opensearch/tasks/TaskCancellationMonitoringService.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.action.search.SearchShardTask; +import org.opensearch.action.search.SearchTask; import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.metrics.CounterMetric; import org.opensearch.threadpool.Scheduler; @@ -32,7 +33,7 @@ public class TaskCancellationMonitoringService extends AbstractLifecycleComponent implements TaskManager.TaskEventListeners { private static final Logger logger = LogManager.getLogger(TaskCancellationMonitoringService.class); - private final static List> TASKS_TO_TRACK = Arrays.asList(SearchShardTask.class); + private final static List> TASKS_TO_TRACK = Arrays.asList(SearchShardTask.class, SearchTask.class); private volatile Scheduler.Cancellable scheduledFuture; private final ThreadPool threadPool; @@ -146,6 +147,10 @@ public TaskCancellationStats stats() { Map, List> currentRunningCancelledTasks = getCurrentRunningTasksPostCancellation(); return new TaskCancellationStats( + new SearchTaskCancellationStats( + Optional.of(currentRunningCancelledTasks).map(mapper -> mapper.get(SearchTask.class)).map(List::size).orElse(0), + cancellationStatsHolder.get(SearchTask.class).totalLongRunningCancelledTaskCount.count() + ), new SearchShardTaskCancellationStats( Optional.of(currentRunningCancelledTasks).map(mapper -> mapper.get(SearchShardTask.class)).map(List::size).orElse(0), cancellationStatsHolder.get(SearchShardTask.class).totalLongRunningCancelledTaskCount.count() diff --git a/server/src/main/java/org/opensearch/tasks/TaskCancellationStats.java b/server/src/main/java/org/opensearch/tasks/TaskCancellationStats.java index cca898fdd844f..49f4c8eb8e02e 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskCancellationStats.java +++ b/server/src/main/java/org/opensearch/tasks/TaskCancellationStats.java @@ -8,6 +8,7 @@ package org.opensearch.tasks; +import org.opensearch.Version; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -22,13 +23,23 @@ */ public class TaskCancellationStats implements ToXContentFragment, Writeable { + private final SearchTaskCancellationStats searchTaskCancellationStats; private final SearchShardTaskCancellationStats searchShardTaskCancellationStats; - public TaskCancellationStats(SearchShardTaskCancellationStats searchShardTaskCancellationStats) { + public TaskCancellationStats( + SearchTaskCancellationStats searchTaskCancellationStats, + SearchShardTaskCancellationStats searchShardTaskCancellationStats + ) { + this.searchTaskCancellationStats = searchTaskCancellationStats; this.searchShardTaskCancellationStats = searchShardTaskCancellationStats; } public TaskCancellationStats(StreamInput in) throws IOException { + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + searchTaskCancellationStats = new SearchTaskCancellationStats(in); + } else { + searchTaskCancellationStats = new SearchTaskCancellationStats(0, 0); + } searchShardTaskCancellationStats = new SearchShardTaskCancellationStats(in); } @@ -37,15 +48,24 @@ protected SearchShardTaskCancellationStats getSearchShardTaskCancellationStats() return this.searchShardTaskCancellationStats; } + // package private for testing + protected SearchTaskCancellationStats getSearchTaskCancellationStats() { + return this.searchTaskCancellationStats; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject("task_cancellation"); + builder.field("search_task", searchTaskCancellationStats); builder.field("search_shard_task", searchShardTaskCancellationStats); return builder.endObject(); } @Override public void writeTo(StreamOutput out) throws IOException { + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + searchTaskCancellationStats.writeTo(out); + } searchShardTaskCancellationStats.writeTo(out); } @@ -54,11 +74,12 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; TaskCancellationStats that = (TaskCancellationStats) o; - return Objects.equals(searchShardTaskCancellationStats, that.searchShardTaskCancellationStats); + return Objects.equals(searchTaskCancellationStats, that.searchTaskCancellationStats) + && Objects.equals(searchShardTaskCancellationStats, that.searchShardTaskCancellationStats); } @Override public int hashCode() { - return Objects.hash(searchShardTaskCancellationStats); + return Objects.hash(searchTaskCancellationStats, searchShardTaskCancellationStats); } } diff --git a/server/src/test/java/org/opensearch/tasks/SearchTaskCancellationStatsTests.java b/server/src/test/java/org/opensearch/tasks/SearchTaskCancellationStatsTests.java new file mode 100644 index 0000000000000..e4533582ca7f3 --- /dev/null +++ b/server/src/test/java/org/opensearch/tasks/SearchTaskCancellationStatsTests.java @@ -0,0 +1,28 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.tasks; + +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.test.AbstractWireSerializingTestCase; + +public class SearchTaskCancellationStatsTests extends AbstractWireSerializingTestCase { + @Override + protected Writeable.Reader instanceReader() { + return SearchTaskCancellationStats::new; + } + + @Override + protected SearchTaskCancellationStats createTestInstance() { + return randomInstance(); + } + + public static SearchTaskCancellationStats randomInstance() { + return new SearchTaskCancellationStats(randomNonNegativeLong(), randomNonNegativeLong()); + } +} diff --git a/server/src/test/java/org/opensearch/tasks/TaskCancellationMonitoringServiceTests.java b/server/src/test/java/org/opensearch/tasks/TaskCancellationMonitoringServiceTests.java index bb154b95f9f01..a10d5bd285c8b 100644 --- a/server/src/test/java/org/opensearch/tasks/TaskCancellationMonitoringServiceTests.java +++ b/server/src/test/java/org/opensearch/tasks/TaskCancellationMonitoringServiceTests.java @@ -10,6 +10,7 @@ import org.opensearch.Version; import org.opensearch.action.search.SearchShardTask; +import org.opensearch.action.search.SearchTask; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.StreamOutput; @@ -31,6 +32,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.Phaser; import java.util.concurrent.TimeUnit; +import java.util.function.Function; import static org.opensearch.tasks.TaskCancellationMonitoringSettings.DURATION_MILLIS_SETTING; import static org.mockito.ArgumentMatchers.any; @@ -79,6 +81,17 @@ public void testWithNoCurrentRunningCancelledTasks() { } public void testWithNonZeroCancelledSearchShardTasksRunning() throws InterruptedException { + testWithNonZeroCancelledTasksRunning(SearchShardTask.class, TaskCancellationStats::getSearchShardTaskCancellationStats); + } + + public void testWithNonZeroCancelledSearchTasksRunning() throws InterruptedException { + testWithNonZeroCancelledTasksRunning(SearchTask.class, TaskCancellationStats::getSearchTaskCancellationStats); + } + + private void testWithNonZeroCancelledTasksRunning( + Class taskType, + Function statsExtractor + ) throws InterruptedException { Settings settings = Settings.builder() .put(DURATION_MILLIS_SETTING.getKey(), 0) // Setting to zero for testing .build(); @@ -93,7 +106,7 @@ public void testWithNonZeroCancelledSearchShardTasksRunning() throws Interrupted taskCancellationMonitoringSettings ); int numTasks = randomIntBetween(5, 50); - List tasks = createTasks(numTasks); + List tasks = createTasks(taskType, numTasks); int cancelFromIdx = randomIntBetween(0, numTasks - 1); int cancelTillIdx = randomIntBetween(cancelFromIdx, numTasks - 1); @@ -105,19 +118,19 @@ public void testWithNonZeroCancelledSearchShardTasksRunning() throws Interrupted taskCancellationMonitoringService.doRun(); // 1st run to verify whether we are able to track running cancelled // tasks. TaskCancellationStats stats = taskCancellationMonitoringService.stats(); - assertEquals(numberOfTasksCancelled, stats.getSearchShardTaskCancellationStats().getCurrentLongRunningCancelledTaskCount()); - assertEquals(numberOfTasksCancelled, stats.getSearchShardTaskCancellationStats().getTotalLongRunningCancelledTaskCount()); + assertEquals(numberOfTasksCancelled, statsExtractor.apply(stats).getCurrentLongRunningCancelledTaskCount()); + assertEquals(numberOfTasksCancelled, statsExtractor.apply(stats).getTotalLongRunningCancelledTaskCount()); taskCancellationMonitoringService.doRun(); // 2nd run. Verify same. stats = taskCancellationMonitoringService.stats(); - assertEquals(numberOfTasksCancelled, stats.getSearchShardTaskCancellationStats().getCurrentLongRunningCancelledTaskCount()); - assertEquals(numberOfTasksCancelled, stats.getSearchShardTaskCancellationStats().getTotalLongRunningCancelledTaskCount()); + assertEquals(numberOfTasksCancelled, statsExtractor.apply(stats).getCurrentLongRunningCancelledTaskCount()); + assertEquals(numberOfTasksCancelled, statsExtractor.apply(stats).getTotalLongRunningCancelledTaskCount()); completeTasksConcurrently(tasks, 0, tasks.size() - 1).await(); taskCancellationMonitoringService.doRun(); // 3rd run to verify current count is 0 and total remains the same. stats = taskCancellationMonitoringService.stats(); assertTrue(taskCancellationMonitoringService.getCancelledTaskTracker().isEmpty()); - assertEquals(0, stats.getSearchShardTaskCancellationStats().getCurrentLongRunningCancelledTaskCount()); - assertEquals(numberOfTasksCancelled, stats.getSearchShardTaskCancellationStats().getTotalLongRunningCancelledTaskCount()); + assertEquals(0, statsExtractor.apply(stats).getCurrentLongRunningCancelledTaskCount()); + assertEquals(numberOfTasksCancelled, statsExtractor.apply(stats).getTotalLongRunningCancelledTaskCount()); } public void testShouldRunGetsDisabledAfterTaskCompletion() throws InterruptedException { @@ -138,7 +151,7 @@ public void testShouldRunGetsDisabledAfterTaskCompletion() throws InterruptedExc // Start few tasks. int numTasks = randomIntBetween(5, 50); - List tasks = createTasks(numTasks); + List tasks = createTasks(SearchShardTask.class, numTasks); taskCancellationMonitoringService.doRun(); TaskCancellationStats stats = taskCancellationMonitoringService.stats(); @@ -176,7 +189,7 @@ public void testWithVaryingCancelledTasksDuration() throws InterruptedException ); int numTasks = randomIntBetween(5, 50); - List tasks = createTasks(numTasks); + List tasks = createTasks(SearchShardTask.class, numTasks); int numTasksToBeCancelledInFirstIteration = randomIntBetween(1, numTasks - 1); CountDownLatch countDownLatch = cancelTasksConcurrently(tasks, 0, numTasksToBeCancelledInFirstIteration - 1); @@ -232,10 +245,13 @@ public void testTasksAreGettingEvictedCorrectlyAfterCompletion() throws Interrup ); // Start few tasks. - int numTasks = randomIntBetween(5, 50); - List tasks = createTasks(numTasks); + int searchShardTaskNum = randomIntBetween(5, 50); + int searchTaskNum = randomIntBetween(5, 50); + int taskNum = searchShardTaskNum + searchTaskNum; + List tasks = new ArrayList<>(createTasks(SearchShardTask.class, searchShardTaskNum)); + tasks.addAll(createTasks(SearchTask.class, searchTaskNum)); assertTrue(taskCancellationMonitoringService.getCancelledTaskTracker().isEmpty()); - int numTasksToBeCancelledInFirstIteration = randomIntBetween(2, numTasks - 1); + int numTasksToBeCancelledInFirstIteration = randomIntBetween(2, taskNum - 1); CountDownLatch countDownLatch = cancelTasksConcurrently(tasks, 0, numTasksToBeCancelledInFirstIteration - 1); countDownLatch.await(); // Wait for all tasks to be cancelled in first iteration @@ -245,7 +261,7 @@ public void testTasksAreGettingEvictedCorrectlyAfterCompletion() throws Interrup assertTrue(taskCancellationMonitoringService.getCancelledTaskTracker().containsKey(tasks.get(itr).getId())); } // Cancel rest of the tasks - cancelTasksConcurrently(tasks, numTasksToBeCancelledInFirstIteration, numTasks - 1).await(); + cancelTasksConcurrently(tasks, numTasksToBeCancelledInFirstIteration, taskNum - 1).await(); for (int itr = 0; itr < tasks.size(); itr++) { assertTrue(taskCancellationMonitoringService.getCancelledTaskTracker().containsKey(tasks.get(itr).getId())); } @@ -294,15 +310,16 @@ public void testDoStartAndStop() { verify(scheduleFuture, times(1)).cancel(); } - private List createTasks(int numTasks) { - List tasks = new ArrayList<>(numTasks); + @SuppressWarnings("unchecked") + private List createTasks(Class taskType, int numTasks) { + List tasks = new ArrayList<>(numTasks); for (int i = 0; i < numTasks; i++) { - tasks.add((SearchShardTask) taskManager.register("type-" + i, "action-" + i, new MockQuerySearchRequest())); + tasks.add((T) taskManager.register("type-" + i, "action-" + i, new MockQuerySearchRequest(taskType))); } return tasks; } - // Caller can this with the list of tasks specifically mentioning which ones to cancel. And can call CountDownLatch + // Caller can call this method with a list of tasks specifically mentioning which ones to cancel. And can call CountDownLatch // .await() to wait for all tasks be cancelled. private CountDownLatch cancelTasksConcurrently(List tasks, int cancelFromIdx, int cancelTillIdx) { assert cancelFromIdx >= 0; @@ -347,10 +364,12 @@ private CountDownLatch completeTasksConcurrently(List } public static class MockQuerySearchRequest extends TransportRequest { + private Class taskType; protected String requestName; - public MockQuerySearchRequest() { + public MockQuerySearchRequest(Class taskType) { super(); + this.taskType = taskType; } @Override @@ -366,8 +385,13 @@ public String getDescription() { @Override public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { - return new SearchShardTask(id, type, action, getDescription(), parentTaskId, headers); + if (taskType == SearchTask.class) { + return new SearchTask(id, type, action, this::getDescription, parentTaskId, headers); + } else if (taskType == SearchShardTask.class) { + return new SearchShardTask(id, type, action, getDescription(), parentTaskId, headers); + } else { + throw new IllegalArgumentException("Unsupported task type: " + taskType); + } } } - } diff --git a/server/src/test/java/org/opensearch/tasks/TaskCancellationStatsTests.java b/server/src/test/java/org/opensearch/tasks/TaskCancellationStatsTests.java index beef05b04ab7a..7c3dbe900cb1f 100644 --- a/server/src/test/java/org/opensearch/tasks/TaskCancellationStatsTests.java +++ b/server/src/test/java/org/opensearch/tasks/TaskCancellationStatsTests.java @@ -23,6 +23,9 @@ protected TaskCancellationStats createTestInstance() { } public static TaskCancellationStats randomInstance() { - return new TaskCancellationStats(SearchShardTaskCancellationStatsTests.randomInstance()); + return new TaskCancellationStats( + SearchTaskCancellationStatsTests.randomInstance(), + SearchShardTaskCancellationStatsTests.randomInstance() + ); } } From 2fd3882af7d00eca3ef53553418946a0f97eeebd Mon Sep 17 00:00:00 2001 From: Rajat Gupta <72070007+RajatGupta02@users.noreply.github.com> Date: Mon, 7 Apr 2025 01:37:03 +0530 Subject: [PATCH 145/550] Add File Interceptor and Integ Tests (#17760) * Implement a java agent based file interceptor Signed-off-by: Gulshan * Add FileInterceptor IntegTests Signed-off-by: Rajat Gupta * Reuse intercepted methods in Agent from FileInterceptor Signed-off-by: Rajat Gupta * Replace static final class members with interceptor parameters Signed-off-by: Andriy Redko * Replace interceptor parameters with inplace checks Signed-off-by: Andriy Redko * Set INTERCEPTED_METHODS to private Signed-off-by: Rajat Gupta --------- Signed-off-by: Gulshan Signed-off-by: Rajat Gupta Signed-off-by: Andriy Redko Co-authored-by: Gulshan Co-authored-by: Rajat Gupta Co-authored-by: Andriy Redko --- CHANGELOG.md | 1 + libs/agent-sm/agent/build.gradle | 1 + .../java/org/opensearch/javaagent/Agent.java | 30 ++- .../opensearch/javaagent/FileInterceptor.java | 89 +++++++ .../javaagent/FileInterceptorIntegTests.java | 245 ++++++++++++++++++ 5 files changed, 364 insertions(+), 2 deletions(-) create mode 100644 libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/FileInterceptor.java create mode 100644 libs/agent-sm/agent/src/test/java/org/opensearch/javaagent/FileInterceptorIntegTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index bf5ff556f0227..8921ccfdc38f1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Security Manager Replacement] Create initial Java Agent to intercept Socket::connect calls ([#17724](https://github.com/opensearch-project/OpenSearch/pull/17724)) - Add ingestion management APIs for pause, resume and get ingestion state ([#17631](https://github.com/opensearch-project/OpenSearch/pull/17631)) - [Security Manager Replacement] Enhance Java Agent to intercept System::exit ([#17746](https://github.com/opensearch-project/OpenSearch/pull/17746)) +- [Security Manager Replacement] Implement File Interceptor and add integration tests ([#17760](https://github.com/opensearch-project/OpenSearch/pull/17760)) - [Security Manager Replacement] Enhance Java Agent to intercept Runtime::halt ([#17757](https://github.com/opensearch-project/OpenSearch/pull/17757)) - Support AutoExpand for SearchReplica ([#17741](https://github.com/opensearch-project/OpenSearch/pull/17741)) - Implement fixed interval refresh task scheduling ([#17777](https://github.com/opensearch-project/OpenSearch/pull/17777)) diff --git a/libs/agent-sm/agent/build.gradle b/libs/agent-sm/agent/build.gradle index a69dc057f2f9c..fb2c71af42d74 100644 --- a/libs/agent-sm/agent/build.gradle +++ b/libs/agent-sm/agent/build.gradle @@ -43,6 +43,7 @@ tasks.named('forbiddenApisTest').configure { onlyIf { false } } tasks.named('forbiddenApisMain').configure { replaceSignatureFiles 'jdk-signatures' + onlyIf { false } } task prepareAgent(type: Copy) { diff --git a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/Agent.java b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/Agent.java index 4eb7baa93ab7e..1226943cb6fed 100644 --- a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/Agent.java +++ b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/Agent.java @@ -11,7 +11,9 @@ import org.opensearch.javaagent.bootstrap.AgentPolicy; import java.lang.instrument.Instrumentation; +import java.nio.channels.FileChannel; import java.nio.channels.SocketChannel; +import java.nio.file.Files; import java.util.Map; import net.bytebuddy.ByteBuddy; @@ -33,6 +35,22 @@ public class Agent { */ private Agent() {} + /** + * List of methods that are intercepted + */ + private static final String[] INTERCEPTED_METHODS = { + "write", + "createFile", + "createDirectories", + "createLink", + "copy", + "move", + "newByteChannel", + "delete", + "deleteIfExists", + "read", + "open" }; + /** * Premain * @param agentArguments agent arguments @@ -55,12 +73,18 @@ public static void agentmain(String agentArguments, Instrumentation instrumentat private static AgentBuilder createAgentBuilder(Instrumentation inst) throws Exception { final Junction systemType = ElementMatchers.isSubTypeOf(SocketChannel.class); + final Junction pathType = ElementMatchers.isSubTypeOf(Files.class); + final Junction fileChannelType = ElementMatchers.isSubTypeOf(FileChannel.class); - final AgentBuilder.Transformer transformer = (b, typeDescription, classLoader, module, pd) -> b.visit( + final AgentBuilder.Transformer socketTransformer = (b, typeDescription, classLoader, module, pd) -> b.visit( Advice.to(SocketChannelInterceptor.class) .on(ElementMatchers.named("connect").and(ElementMatchers.not(ElementMatchers.isAbstract()))) ); + final AgentBuilder.Transformer fileTransformer = (b, typeDescription, classLoader, module, pd) -> b.visit( + Advice.to(FileInterceptor.class).on(ElementMatchers.namedOneOf(INTERCEPTED_METHODS).or(ElementMatchers.isAbstract())) + ); + ClassInjector.UsingUnsafe.ofBootLoader() .inject( Map.of( @@ -79,7 +103,9 @@ private static AgentBuilder createAgentBuilder(Instrumentation inst) throws Exce .with(AgentBuilder.TypeStrategy.Default.REDEFINE) .ignore(ElementMatchers.none()) .type(systemType) - .transform(transformer) + .transform(socketTransformer) + .type(pathType.or(fileChannelType)) + .transform(fileTransformer) .type(ElementMatchers.is(java.lang.System.class)) .transform( (b, typeDescription, classLoader, module, pd) -> b.visit( diff --git a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/FileInterceptor.java b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/FileInterceptor.java new file mode 100644 index 0000000000000..605aa5a7d31df --- /dev/null +++ b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/FileInterceptor.java @@ -0,0 +1,89 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.javaagent; + +import org.opensearch.javaagent.bootstrap.AgentPolicy; + +import java.io.FilePermission; +import java.lang.reflect.Method; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.security.Policy; +import java.security.ProtectionDomain; +import java.util.Collection; + +import net.bytebuddy.asm.Advice; + +/** + * FileInterceptor + */ +public class FileInterceptor { + /** + * FileInterceptor + */ + public FileInterceptor() {} + + /** + * Intercepts file operations + * + * @param args arguments + * @param method method + * @throws Exception exceptions + */ + @Advice.OnMethodEnter + @SuppressWarnings({ "removal", "deprecation" }) + public static void intercept(@Advice.AllArguments Object[] args, @Advice.Origin Method method) throws Exception { + final Policy policy = AgentPolicy.getPolicy(); + if (policy == null) { + return; /* noop */ + } + + String filePath = null; + if (args.length > 0 && args[0] instanceof String pathStr) { + filePath = Paths.get(pathStr).toAbsolutePath().toString(); + } else if (args.length > 0 && args[0] instanceof Path path) { + filePath = path.toAbsolutePath().toString(); + } + + if (filePath == null) { + return; // No valid file path found + } + + final StackWalker walker = StackWalker.getInstance(StackWalker.Option.RETAIN_CLASS_REFERENCE); + final Collection callers = walker.walk(StackCallerProtectionDomainChainExtractor.INSTANCE); + + final String name = method.getName(); + final boolean isMutating = name.equals("copy") + || name.equals("move") + || name.equals("write") + || name.equals("newByteChannel") + || name.startsWith("create"); + final boolean isDelete = isMutating == false ? name.startsWith("delete") : false; + + // Check each permission separately + for (final ProtectionDomain domain : callers) { + // Handle FileChannel.open() separately to check read/write permissions properly + if (method.getName().equals("open")) { + if (!policy.implies(domain, new FilePermission(filePath, "read,write"))) { + throw new SecurityException("Denied OPEN access to file: " + filePath + ", domain: " + domain); + } + } + + // File mutating operations + if (isMutating && !policy.implies(domain, new FilePermission(filePath, "write"))) { + throw new SecurityException("Denied WRITE access to file: " + filePath + ", domain: " + domain); + } + + // File deletion operations + if (isDelete && !policy.implies(domain, new FilePermission(filePath, "delete"))) { + throw new SecurityException("Denied DELETE access to file: " + filePath + ", domain: " + domain); + } + } + } +} diff --git a/libs/agent-sm/agent/src/test/java/org/opensearch/javaagent/FileInterceptorIntegTests.java b/libs/agent-sm/agent/src/test/java/org/opensearch/javaagent/FileInterceptorIntegTests.java new file mode 100644 index 0000000000000..6375e1d3c2b2d --- /dev/null +++ b/libs/agent-sm/agent/src/test/java/org/opensearch/javaagent/FileInterceptorIntegTests.java @@ -0,0 +1,245 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.javaagent; + +import org.opensearch.javaagent.bootstrap.AgentPolicy; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FilePermission; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.security.PermissionCollection; +import java.security.Permissions; +import java.security.Policy; +import java.security.ProtectionDomain; +import java.util.UUID; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +@SuppressWarnings("removal") +public class FileInterceptorIntegTests { + + private static Path getTestDir() { + Path baseDir = Path.of(System.getProperty("user.dir")); + Path integTestFiles = baseDir.resolve("integ-test-files").normalize(); + return integTestFiles; + } + + private String randomAlphaOfLength(int length) { + // Using UUID to generate random string and taking first 'length' characters + return UUID.randomUUID().toString().replaceAll("-", "").substring(0, length); + } + + @BeforeClass + public static void setUp() throws Exception { + Policy policy = new Policy() { + @Override + public PermissionCollection getPermissions(ProtectionDomain domain) { + Permissions permissions = new Permissions(); + permissions.add(new FilePermission(System.getProperty("user.dir") + "/-", "read,write,delete")); + return permissions; + } + }; + AgentPolicy.setPolicy(policy); + Files.createDirectories(getTestDir()); + } + + @Test + public void testFileInputStream() throws Exception { + Path tmpDir = getTestDir(); + assertTrue("Tmp directory should exist", Files.exists(tmpDir)); + assertTrue("Tmp directory should be writable", Files.isWritable(tmpDir)); + + // Create a unique file name + String fileName = "test-" + randomAlphaOfLength(8) + ".txt"; + Path tempPath = tmpDir.resolve(fileName); + + // Ensure the file doesn't exist + Files.deleteIfExists(tempPath); + + // Write content + String content = "test content"; + Files.write(tempPath, content.getBytes(StandardCharsets.UTF_8)); + + // Verify file creation + assertTrue("File should exist", Files.exists(tempPath)); + assertTrue("File should be readable", Files.isReadable(tempPath)); + assertEquals("File should have correct content", content, Files.readString(tempPath, StandardCharsets.UTF_8)); + + File tempFile = tempPath.toFile(); + + try { + try (FileInputStream fis = new FileInputStream(tempFile)) { + byte[] buffer = new byte[100]; + int bytesRead = fis.read(buffer); + String readContent = new String(buffer, 0, bytesRead); + assertEquals("test content", readContent.trim()); + } + } finally { + // Clean up + Files.deleteIfExists(tempPath); + } + } + + @Test + public void testOpenForReadAndWrite() throws Exception { + Path tmpDir = getTestDir(); + Path tempPath = tmpDir.resolve("test-open-rw-" + randomAlphaOfLength(8) + ".txt"); + + try { + // Test open for read and write + try ( + FileChannel channel = FileChannel.open( + tempPath, + StandardOpenOption.CREATE, + StandardOpenOption.READ, + StandardOpenOption.WRITE + ) + ) { + + // Write content + String content = "test content"; + ByteBuffer writeBuffer = ByteBuffer.wrap(content.getBytes(StandardCharsets.UTF_8)); + channel.write(writeBuffer); + + // Read content back + channel.position(0); // Reset position to start + ByteBuffer readBuffer = ByteBuffer.allocate(100); + channel.read(readBuffer); + readBuffer.flip(); + + String readContent = StandardCharsets.UTF_8.decode(readBuffer).toString(); + assertEquals("Content should match", content, readContent); + } + } finally { + Files.deleteIfExists(tempPath); + } + } + + @Test + public void testCopy() throws Exception { + Path tmpDir = getTestDir(); + Path sourcePath = tmpDir.resolve("test-source-" + randomAlphaOfLength(8) + ".txt"); + Path targetPath = tmpDir.resolve("test-target-" + randomAlphaOfLength(8) + ".txt"); + + try { + // Create source file + String content = "test content"; + Files.write(sourcePath, content.getBytes(StandardCharsets.UTF_8)); + + // Test copy operation + Files.copy(sourcePath, targetPath); + + // Verify copy + assertTrue("Target file should exist", Files.exists(targetPath)); + assertEquals("Content should match", Files.readString(sourcePath), Files.readString(targetPath)); + } finally { + Files.deleteIfExists(sourcePath); + Files.deleteIfExists(targetPath); + } + } + + @Test + public void testCreateFile() throws Exception { + Path tmpDir = getTestDir(); + Path tempPath = tmpDir.resolve("test-create-" + randomAlphaOfLength(8) + ".txt"); + + try { + // Test createFile operation + Files.createFile(tempPath); + + // Verify file creation + assertTrue("File should exist", Files.exists(tempPath)); + assertTrue("Should be a regular file", Files.isRegularFile(tempPath)); + } finally { + Files.deleteIfExists(tempPath); + } + } + + @Test + public void testMove() throws Exception { + Path tmpDir = getTestDir(); + Path sourcePath = tmpDir.resolve("test-source-" + randomAlphaOfLength(8) + ".txt"); + Path targetPath = tmpDir.resolve("test-target-" + randomAlphaOfLength(8) + ".txt"); + + try { + // Create source file + String content = "test content"; + Files.write(sourcePath, content.getBytes(StandardCharsets.UTF_8)); + + // Test move operation + Files.move(sourcePath, targetPath); + + // Verify move + assertFalse("Source file should not exist", Files.exists(sourcePath)); + assertTrue("Target file should exist", Files.exists(targetPath)); + assertEquals("Content should match", content, Files.readString(targetPath)); + } finally { + Files.deleteIfExists(sourcePath); + Files.deleteIfExists(targetPath); + } + } + + @Test + public void testCreateLink() throws Exception { + Path tmpDir = getTestDir(); + Path originalPath = tmpDir.resolve("test-original-" + randomAlphaOfLength(8) + ".txt"); + Path linkPath = tmpDir.resolve("test-link-" + randomAlphaOfLength(8) + ".txt"); + + try { + // Create source file + Files.write(originalPath, "test content".getBytes(StandardCharsets.UTF_8)); + + // Test createLink operation + Files.createLink(linkPath, originalPath); + + // Verify link creation + assertTrue("Link should exist", Files.exists(linkPath)); + assertEquals("File contents should be same", Files.readString(originalPath), Files.readString(linkPath)); + } finally { + Files.deleteIfExists(linkPath); + Files.deleteIfExists(originalPath); + } + } + + @Test + public void testDelete() throws Exception { + Path tmpDir = getTestDir(); + Path tempPath = tmpDir.resolve("test-delete-" + randomAlphaOfLength(8) + ".txt"); + + try { + // Create a file with some content + String content = "test content"; + Files.write(tempPath, content.getBytes(StandardCharsets.UTF_8)); + + // Verify file exists before deletion + assertTrue("File should exist before deletion", Files.exists(tempPath)); + assertEquals("File should have correct content", content, Files.readString(tempPath, StandardCharsets.UTF_8)); + + // Test delete operation - FileInterceptor should intercept this + Files.delete(tempPath); + + // Verify deletion + assertFalse("File should not exist after deletion", Files.exists(tempPath)); + + } finally { + // Cleanup in case test fails + Files.deleteIfExists(tempPath); + } + } +} From cd266f35ef5f6cd690269fb92e10d768327bc7fd Mon Sep 17 00:00:00 2001 From: Prudhvi Godithi Date: Sun, 6 Apr 2025 17:50:57 -0700 Subject: [PATCH 146/550] Search only replicas (scale to zero) with Reader/Writer Separation (#17299) --- CHANGELOG.md | 1 + .../scale/searchonly/ScaleIndexIT.java | 366 ++++++++ .../replication/SearchReplicaRestoreIT.java | 22 + .../org/opensearch/action/ActionModule.java | 6 +- .../scale/searchonly/ScaleIndexAction.java | 47 ++ .../ScaleIndexClusterStateBuilder.java | 273 ++++++ .../searchonly/ScaleIndexNodeRequest.java | 87 ++ .../searchonly/ScaleIndexNodeResponse.java | 89 ++ .../ScaleIndexOperationValidator.java | 88 ++ .../scale/searchonly/ScaleIndexRequest.java | 166 ++++ .../searchonly/ScaleIndexRequestBuilder.java | 38 + .../scale/searchonly/ScaleIndexResponse.java | 136 +++ .../searchonly/ScaleIndexShardResponse.java | 109 +++ .../ScaleIndexShardSyncManager.java | 217 +++++ .../searchonly/TransportScaleIndexAction.java | 481 +++++++++++ .../scale/searchonly/package-info.java | 10 + .../TransportIndicesShardStoresAction.java | 2 +- .../cluster/block/ClusterBlockLevel.java | 1 + .../cluster/block/ClusterBlocks.java | 3 + .../cluster/health/ClusterIndexHealth.java | 32 +- .../cluster/health/ClusterShardHealth.java | 85 +- .../cluster/health/ClusterStateHealth.java | 81 +- .../cluster/metadata/IndexMetadata.java | 23 +- .../cluster/routing/IndexRoutingTable.java | 60 +- .../cluster/routing/RoutingNodes.java | 22 +- ...caAfterPrimaryActiveAllocationDecider.java | 12 +- .../common/settings/IndexScopedSettings.java | 1 + .../gateway/ReplicaShardAllocator.java | 56 +- .../recovery/RemoteStoreRestoreService.java | 25 +- .../RemoteStoreReplicationSource.java | 31 +- .../replication/SegmentReplicator.java | 2 +- .../admin/indices/RestScaleIndexAction.java | 88 ++ .../transport/client/IndicesAdminClient.java | 10 + .../client/support/AbstractClient.java | 5 + .../ScaleIndexClusterStateBuilderTests.java | 169 ++++ .../ScaleIndexNodeRequestTests.java | 100 +++ .../ScaleIndexOperationValidatorTests.java | 159 ++++ .../searchonly/ScaleIndexRequestTests.java | 56 ++ .../searchonly/ScaleIndexResponseTests.java | 234 +++++ .../ScaleIndexShardResponseTests.java | 104 +++ .../ScaleIndexShardSyncManagerTests.java | 340 ++++++++ .../TransportScaleIndexActionTests.java | 798 ++++++++++++++++++ .../health/ClusterShardHealthTests.java | 67 +- .../RemoteStoreReplicationSourceTests.java | 39 +- .../indices/RestScaleIndexActionTests.java | 161 ++++ 45 files changed, 4826 insertions(+), 76 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexIT.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexClusterStateBuilder.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexNodeRequest.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexNodeResponse.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexOperationValidator.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexRequest.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexRequestBuilder.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexResponse.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexShardResponse.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexShardSyncManager.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/TransportScaleIndexAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/package-info.java create mode 100644 server/src/main/java/org/opensearch/rest/action/admin/indices/RestScaleIndexAction.java create mode 100644 server/src/test/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexClusterStateBuilderTests.java create mode 100644 server/src/test/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexNodeRequestTests.java create mode 100644 server/src/test/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexOperationValidatorTests.java create mode 100644 server/src/test/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexRequestTests.java create mode 100644 server/src/test/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexResponseTests.java create mode 100644 server/src/test/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexShardResponseTests.java create mode 100644 server/src/test/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexShardSyncManagerTests.java create mode 100644 server/src/test/java/org/opensearch/action/admin/indices/scale/searchonly/TransportScaleIndexActionTests.java create mode 100644 server/src/test/java/org/opensearch/rest/action/admin/indices/RestScaleIndexActionTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 8921ccfdc38f1..08f839aa55dbb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Support AutoExpand for SearchReplica ([#17741](https://github.com/opensearch-project/OpenSearch/pull/17741)) - Implement fixed interval refresh task scheduling ([#17777](https://github.com/opensearch-project/OpenSearch/pull/17777)) - Add GRPC DocumentService and Bulk endpoint ([#17727](https://github.com/opensearch-project/OpenSearch/pull/17727)) +- Added scale to zero (`search_only` mode) support for OpenSearch reader writer separation ([#17299](https://github.com/opensearch-project/OpenSearch/pull/17299) ### Changed - Migrate BC libs to their FIPS counterparts ([#14912](https://github.com/opensearch-project/OpenSearch/pull/14912)) diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexIT.java new file mode 100644 index 0000000000000..3e22084db96d8 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexIT.java @@ -0,0 +1,366 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.scale.searchonly; + +import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.opensearch.action.index.IndexResponse; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.support.WriteRequest; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.IndexShardRoutingTable; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.HashSet; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class ScaleIndexIT extends RemoteStoreBaseIntegTestCase { + + private static final String TEST_INDEX = "test_scale_index"; + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL, Boolean.TRUE).build(); + } + + public Settings indexSettings() { + return Settings.builder().put(SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT).build(); + } + + /** + * Tests the full lifecycle of scaling an index down to search-only mode, + * scaling search replicas while in search-only mode, verifying cluster health in + * various states, and then scaling back up to normal mode. + */ + public void testFullSearchOnlyReplicasFullLifecycle() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNodes(2); + internalCluster().startSearchOnlyNodes(3); + + Settings specificSettings = Settings.builder() + .put(indexSettings()) + .put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_NUMBER_OF_REPLICAS, 1) + .put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 1) + .build(); + + createIndex(TEST_INDEX, specificSettings); + ensureGreen(TEST_INDEX); + + for (int i = 0; i < 10; i++) { + IndexResponse indexResponse = client().prepareIndex(TEST_INDEX) + .setId(Integer.toString(i)) + .setSource("field1", "value" + i) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + assertEquals(RestStatus.CREATED, indexResponse.status()); + } + + assertBusy(() -> { + SearchResponse searchResponse = client().prepareSearch(TEST_INDEX).get(); + assertHitCount(searchResponse, 10); + assertSearchNodeDocCounts(10, TEST_INDEX); + }, 30, TimeUnit.SECONDS); + + ensureGreen(TEST_INDEX); + + // Scale down to search-only mode + assertAcked(client().admin().indices().prepareScaleSearchOnly(TEST_INDEX, true).get()); + + // Verify search-only setting is enabled + GetSettingsResponse settingsResponse = client().admin().indices().prepareGetSettings(TEST_INDEX).get(); + assertTrue(settingsResponse.getSetting(TEST_INDEX, IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey()).equals("true")); + + // Verify that write operations are blocked during scale-down + assertBusy(() -> { + try { + // Attempt to index a document while scale-down is in progress + client().prepareIndex(TEST_INDEX) + .setId("sample-write-after-search-only-block") + .setSource("field1", "value1") + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + fail("Write operation should be blocked during scale-down"); + } catch (Exception e) { + assertTrue( + "Exception should indicate index scaled down", + e.getMessage().contains("blocked by: [FORBIDDEN/20/index scaled down]") + ); + } + }, 10, TimeUnit.SECONDS); + + ensureGreen(TEST_INDEX); + + // Verify search still works on all search nodes + assertSearchNodeDocCounts(10, TEST_INDEX); + + // Scale up search replicas while in search-only mode + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(TEST_INDEX) + .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 3).build()) + .get() + ); + + ensureGreen(TEST_INDEX); + + // Verify search still works on all search nodes + assertBusy(() -> { assertSearchNodeDocCounts(10, TEST_INDEX); }, 30, TimeUnit.SECONDS); + + // Scale down search replicas while still in search-only mode + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(TEST_INDEX) + .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 2).build()) + .get() + ); + + ensureGreen(TEST_INDEX); + + // Verify search still works on all search nodes + assertBusy(() -> { assertSearchNodeDocCounts(10, TEST_INDEX); }, 30, TimeUnit.SECONDS); + + // Test cluster health when one search replica is down + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(findNodesWithSearchOnlyReplicas()[0])); + + assertEquals( + "Index health should be YELLOW with one search replica down", + "YELLOW", + client().admin().cluster().prepareHealth(TEST_INDEX).get().getStatus().name() + ); + + // Start a replacement search node and wait for recovery + internalCluster().startSearchOnlyNode(); + ensureGreen(TEST_INDEX); + + // Scale back up to normal mode + assertAcked(client().admin().indices().prepareScaleSearchOnly(TEST_INDEX, false).get()); + ensureGreen(TEST_INDEX); + + // Verify search-only setting is disabled + settingsResponse = client().admin().indices().prepareGetSettings(TEST_INDEX).get(); + assertFalse(settingsResponse.getSetting(TEST_INDEX, IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey()).equals("true")); + + // Verify search still works after scale-up + assertBusy(() -> { + SearchResponse response = client().prepareSearch(TEST_INDEX).get(); + assertHitCount(response, 10); + assertSearchNodeDocCounts(10, TEST_INDEX); + }, 30, TimeUnit.SECONDS); + + // Verify writes work again after scale-up + IndexResponse indexResponse = client().prepareIndex(TEST_INDEX) + .setId("new-doc") + .setSource("field1", "new-value") + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + assertEquals(RestStatus.CREATED, indexResponse.status()); + + // Verify new document is searchable + assertBusy(() -> { + SearchResponse finalResponse = client().prepareSearch(TEST_INDEX).get(); + assertHitCount(finalResponse, 11); + assertSearchNodeDocCounts(11, TEST_INDEX); + }); + } + + /** + * Tests scaling down an index to search-only mode when there are no search replicas. + */ + public void testScaleDownValidationWithoutSearchReplicas() { + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNodes(2); + internalCluster().startSearchOnlyNode(); + + Settings specificSettings = Settings.builder() + .put(indexSettings()) + .put(SETTING_NUMBER_OF_SHARDS, 2) + .put(SETTING_NUMBER_OF_REPLICAS, 1) + .build(); + + createIndex(TEST_INDEX, specificSettings); + ensureYellow(TEST_INDEX); + + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> client().admin().indices().prepareScaleSearchOnly(TEST_INDEX, true).get() + ); + + assertTrue( + "Expected error about missing search replicas", + exception.getMessage().contains("Cannot scale to zero without search replicas for index:") + ); + } + + /** + * Scenario 1: Tests search-only replicas recovery with persistent data directory + * and cluster.remote_store.state.enabled=false + */ + public void testSearchOnlyRecoveryWithPersistentData() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNodes(2); + internalCluster().startSearchOnlyNode(); + + Settings specificSettings = Settings.builder() + .put(indexSettings()) + .put(SETTING_NUMBER_OF_SHARDS, 2) + .put(SETTING_NUMBER_OF_REPLICAS, 1) + .put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 1) + .build(); + + createIndex(TEST_INDEX, specificSettings); + ensureGreen(TEST_INDEX); + + assertAcked(client().admin().indices().prepareScaleSearchOnly(TEST_INDEX, true).get()); + + assertBusy(() -> { + ClusterState state = client().admin().cluster().prepareState().get().getState(); + IndexRoutingTable routingTable = state.routingTable().index(TEST_INDEX); + + for (IndexShardRoutingTable shardTable : routingTable) { + assertNull("Primary should be null", shardTable.primaryShard()); + assertTrue("No writer replicas should exist", shardTable.writerReplicas().isEmpty()); + assertEquals( + "One search replica should be active", + 1, + shardTable.searchOnlyReplicas().stream().filter(ShardRouting::active).count() + ); + } + }); + } + + /** + * Scenario 2: Tests behavior with cluster.remote_store.state.enabled=true + * but without data directory preservation + */ + public void testClusterRemoteStoreStateEnabled() throws Exception { + Settings remoteStoreSettings = Settings.builder().put(nodeSettings(0)).put("cluster.remote_store.state.enabled", true).build(); + + internalCluster().startClusterManagerOnlyNode(remoteStoreSettings); + internalCluster().startDataOnlyNodes(2); + internalCluster().startSearchOnlyNode(); + + Settings specificSettings = Settings.builder() + .put(indexSettings()) + .put(SETTING_NUMBER_OF_SHARDS, 2) + .put(SETTING_NUMBER_OF_REPLICAS, 1) + .put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 1) + .build(); + + createIndex(TEST_INDEX, specificSettings); + ensureGreen(TEST_INDEX); + + assertAcked(client().admin().indices().prepareScaleSearchOnly(TEST_INDEX, true).get()); + + internalCluster().stopAllNodes(); + + internalCluster().startClusterManagerOnlyNode(remoteStoreSettings); + internalCluster().startDataOnlyNodes(2); + internalCluster().startSearchOnlyNode(); + + assertBusy(() -> { + ClusterState state = client().admin().cluster().prepareState().get().getState(); + IndexRoutingTable routingTable = state.routingTable().index(TEST_INDEX); + + for (IndexShardRoutingTable shardTable : routingTable) { + assertTrue( + "Only search replicas should be active", + shardTable.searchOnlyReplicas().stream().anyMatch(ShardRouting::active) + ); + } + }); + } + + /** + * Scenario 3: Tests recovery with persistent data directory and remote store state + */ + public void testRecoveryWithPersistentDataAndRemoteStore() throws Exception { + Settings remoteStoreSettings = Settings.builder().put(nodeSettings(0)).put("cluster.remote_store.state.enabled", true).build(); + + internalCluster().startClusterManagerOnlyNode(remoteStoreSettings); + internalCluster().startDataOnlyNodes(2); + internalCluster().startSearchOnlyNode(); + + Settings specificSettings = Settings.builder() + .put(indexSettings()) + .put(SETTING_NUMBER_OF_SHARDS, 2) + .put(SETTING_NUMBER_OF_REPLICAS, 1) + .put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 1) + .build(); + + createIndex(TEST_INDEX, specificSettings); + ensureGreen(TEST_INDEX); + + internalCluster().fullRestart(); + + ensureGreen(TEST_INDEX); + assertAcked(client().admin().indices().prepareScaleSearchOnly(TEST_INDEX, true).get()); + + assertBusy(() -> { assertEquals("One search replica should be active", 1, findNodesWithSearchOnlyReplicas().length); }); + } + + /** + * Helper method to find all nodes that contain active search-only replica shards + * @return Array of node names that have active search-only replicas + */ + private String[] findNodesWithSearchOnlyReplicas() { + ClusterState state = client().admin().cluster().prepareState().get().getState(); + IndexRoutingTable indexRoutingTable = state.routingTable().index(TEST_INDEX); + + // Use a set to avoid duplicates if multiple shards are on the same node + Set nodeNames = new HashSet<>(); + + for (IndexShardRoutingTable shardTable : indexRoutingTable) { + for (ShardRouting searchReplica : shardTable.searchOnlyReplicas()) { + if (searchReplica.active()) { + String nodeId = searchReplica.currentNodeId(); + nodeNames.add(state.nodes().get(nodeId).getName()); + } + } + } + + if (nodeNames.isEmpty()) { + throw new AssertionError("Could not find any node with active search-only replica"); + } + + return nodeNames.toArray(new String[0]); + } + + /** + * Assert that documents are accessible and have the expected count across all search nodes + * @param expectedDocCount Expected number of documents in the index + * @param index The index name to search + */ + protected void assertSearchNodeDocCounts(int expectedDocCount, String index) { + // Check on all nodes that have search-only replicas + String[] searchNodes = findNodesWithSearchOnlyReplicas(); + for (String node : searchNodes) { + assertHitCount(client(node).prepareSearch(index).setSize(0).get(), expectedDocCount); + } + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaRestoreIT.java index af911162d4458..b544a8b602da6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaRestoreIT.java @@ -8,7 +8,11 @@ package org.opensearch.indices.replication; +import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; +import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreResponse; +import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse; import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.common.settings.Settings; @@ -99,6 +103,24 @@ public void testSearchReplicaRestore_WhenSnapshotOnSegRepWithSearchReplica_Resto assertTrue(exception.getMessage().contains(getSnapshotExceptionMessage(ReplicationType.SEGMENT, ReplicationType.DOCUMENT))); } + public void testRemoteStoreRestoreFailsForSearchOnlyIndex() throws Exception { + bootstrapIndexWithSearchReplicas(); + assertAcked(client().admin().indices().prepareScaleSearchOnly(INDEX_NAME, true).get()); + + GetSettingsResponse settingsResponse = client().admin().indices().prepareGetSettings(INDEX_NAME).get(); + assertEquals("true", settingsResponse.getSetting(INDEX_NAME, IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey())); + + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> { + PlainActionFuture future = PlainActionFuture.newFuture(); + client().admin().cluster().restoreRemoteStore(new RestoreRemoteStoreRequest().indices(INDEX_NAME), future); + future.actionGet(); + }); + + assertTrue( + exception.getMessage().contains("Cannot use _remotestore/_restore on search_only mode enabled index [" + INDEX_NAME + "].") + ); + } + private void bootstrapIndexWithOutSearchReplicas(ReplicationType replicationType) throws InterruptedException { internalCluster().startNodes(2); diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index f1cc400d1a4f8..fa504466e0a4a 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -184,6 +184,8 @@ import org.opensearch.action.admin.indices.resolve.ResolveIndexAction; import org.opensearch.action.admin.indices.rollover.RolloverAction; import org.opensearch.action.admin.indices.rollover.TransportRolloverAction; +import org.opensearch.action.admin.indices.scale.searchonly.ScaleIndexAction; +import org.opensearch.action.admin.indices.scale.searchonly.TransportScaleIndexAction; import org.opensearch.action.admin.indices.segments.IndicesSegmentsAction; import org.opensearch.action.admin.indices.segments.PitSegmentsAction; import org.opensearch.action.admin.indices.segments.TransportIndicesSegmentsAction; @@ -430,6 +432,7 @@ import org.opensearch.rest.action.admin.indices.RestResolveIndexAction; import org.opensearch.rest.action.admin.indices.RestResumeIngestionAction; import org.opensearch.rest.action.admin.indices.RestRolloverIndexAction; +import org.opensearch.rest.action.admin.indices.RestScaleIndexAction; import org.opensearch.rest.action.admin.indices.RestSimulateIndexTemplateAction; import org.opensearch.rest.action.admin.indices.RestSimulateTemplateAction; import org.opensearch.rest.action.admin.indices.RestSyncedFlushAction; @@ -696,6 +699,7 @@ public void reg actions.register(AutoPutMappingAction.INSTANCE, TransportAutoPutMappingAction.class); actions.register(IndicesAliasesAction.INSTANCE, TransportIndicesAliasesAction.class); actions.register(UpdateSettingsAction.INSTANCE, TransportUpdateSettingsAction.class); + actions.register(ScaleIndexAction.INSTANCE, TransportScaleIndexAction.class); actions.register(AnalyzeAction.INSTANCE, TransportAnalyzeAction.class); actions.register(PutIndexTemplateAction.INSTANCE, TransportPutIndexTemplateAction.class); actions.register(GetIndexTemplatesAction.INSTANCE, TransportGetIndexTemplatesAction.class); @@ -923,7 +927,7 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestUpgradeAction()); registerHandler.accept(new RestUpgradeStatusAction()); registerHandler.accept(new RestClearIndicesCacheAction()); - + registerHandler.accept(new RestScaleIndexAction()); registerHandler.accept(new RestIndexAction()); registerHandler.accept(new CreateHandler()); registerHandler.accept(new AutoIdHandler(nodesInCluster)); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexAction.java new file mode 100644 index 0000000000000..9b79dd7bba632 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexAction.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.scale.searchonly; + +import org.opensearch.action.ActionType; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; + +/** + * Action type for search-only scale operations on indices. + *

+ * This action type represents the administrative operation to transition indices + * between normal operation and search-only mode. It provides the action name constant + * used for transport actions and request routing. The action produces an + * {@link AcknowledgedResponse} to indicate success or failure. + *

+ * The action is registered with the name "indices:admin/scale/search_only" in the + * transport action registry. + */ +public class ScaleIndexAction extends ActionType { + /** + * Singleton instance of the SearchOnlyScaleAction. + */ + public static final ScaleIndexAction INSTANCE = new ScaleIndexAction(); + + /** + * The name of this action, used for transport action registration and routing. + *

+ * This action name follows the OpenSearch convention of prefixing administrative + * index actions with "indices:admin/". + */ + public static final String NAME = "indices:admin/scale/search_only"; + + /** + * Private constructor to enforce singleton pattern. + *

+ * Initializes the action with the appropriate name and response reader. + */ + private ScaleIndexAction() { + super(NAME, AcknowledgedResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexClusterStateBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexClusterStateBuilder.java new file mode 100644 index 0000000000000..e376e766e29fd --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexClusterStateBuilder.java @@ -0,0 +1,273 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.scale.searchonly; + +import org.opensearch.Version; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlock; +import org.opensearch.cluster.block.ClusterBlocks; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.IndexShardRoutingTable; +import org.opensearch.cluster.routing.RecoverySource; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.UnassignedInfo; +import org.opensearch.common.UUIDs; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.Index; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.repositories.IndexId; + +import java.util.EnumSet; +import java.util.Map; +import java.util.UUID; + +import static org.opensearch.cluster.block.ClusterBlockLevel.WRITE; +import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_SEARCH_ONLY_BLOCK; +import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_SEARCH_ONLY_BLOCK_ID; + +/** + * Utility class responsible for constructing new cluster states during scale operations. + *

+ * This builder constructs cluster states for both scale-up and scale-down operations, handling + * the transition of indices between normal operation and search-only mode. It manages: + *

    + *
  • Adding temporary write blocks during scale-down preparation
  • + *
  • Updating index metadata and settings when finalizing scale operations
  • + *
  • Modifying routing tables to add/remove shards based on scaling direction
  • + *
+ *

+ * The cluster state modifications ensure proper synchronization of operations and maintain + * data integrity throughout the scaling process. + */ +class ScaleIndexClusterStateBuilder { + + /** + * Builds the new cluster state by adding a temporary scale-down block on the target index. + *

+ * This temporary block prevents writes to the index during the preparation phase of scaling down, + * allowing existing operations to complete before transitioning to search-only mode. + * + * @param currentState the current cluster state + * @param index the name of the index being scaled down + * @param blockedIndices map to store the association between indices and their scale blocks + * @return the modified cluster state with temporary write blocks applied + */ + ClusterState buildScaleDownState(ClusterState currentState, String index, Map blockedIndices) { + Metadata.Builder metadataBuilder = Metadata.builder(currentState.metadata()); + ClusterBlocks.Builder blocksBuilder = ClusterBlocks.builder().blocks(currentState.blocks()); + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(currentState.routingTable()); + + IndexMetadata indexMetadata = currentState.metadata().index(index); + Index idx = indexMetadata.getIndex(); + ClusterBlock scaleBlock = createScaleDownBlock(); + + blocksBuilder.addIndexBlock(index, scaleBlock); + blockedIndices.put(idx, scaleBlock); + + return ClusterState.builder(currentState) + .metadata(metadataBuilder) + .blocks(blocksBuilder) + .routingTable(routingTableBuilder.build()) + .build(); + } + + /** + * Builds the final cluster state for completing a scale-down operation. + *

+ * This state modification: + *

    + *
  • Removes the temporary scale-down block
  • + *
  • Updates index metadata to mark it as search-only
  • + *
  • Adds a permanent search-only block
  • + *
  • Updates the routing table to maintain only search replicas
  • + *
+ * + * @param currentState the current cluster state + * @param index the name of the index being scaled down + * @return the modified cluster state with finalized search-only configuration + * @throws IllegalStateException if the specified index is not found + */ + ClusterState buildFinalScaleDownState(ClusterState currentState, String index) { + ClusterBlocks.Builder blocksBuilder = ClusterBlocks.builder().blocks(currentState.blocks()); + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(currentState.routingTable()); + Metadata.Builder metadataBuilder = Metadata.builder(currentState.metadata()); + + IndexMetadata indexMetadata = currentState.metadata().index(index); + if (indexMetadata == null) { + throw new IllegalStateException("Index " + index + " not found"); + } + + blocksBuilder.removeIndexBlockWithId(index, INDEX_SEARCH_ONLY_BLOCK_ID); + + Settings updatedSettings = Settings.builder() + .put(indexMetadata.getSettings()) + .put(IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey(), true) + .build(); + + metadataBuilder.put( + IndexMetadata.builder(indexMetadata).settings(updatedSettings).settingsVersion(indexMetadata.getSettingsVersion() + 1) + ); + + blocksBuilder.addIndexBlock(index, INDEX_SEARCH_ONLY_BLOCK); + + updateRoutingTableForScaleDown(routingTableBuilder, currentState, index); + + return ClusterState.builder(currentState) + .metadata(metadataBuilder) + .blocks(blocksBuilder) + .routingTable(routingTableBuilder.build()) + .build(); + } + + /** + * Updates the routing table for a scale-down operation, removing non-search-only shards. + *

+ * This method preserves only the search-only replica shards in the routing table, + * effectively removing primary shards and standard replicas from the allocation. + * + * @param routingTableBuilder the routing table builder to modify + * @param currentState the current cluster state + * @param index the name of the index being scaled down + */ + private void updateRoutingTableForScaleDown(RoutingTable.Builder routingTableBuilder, ClusterState currentState, String index) { + IndexRoutingTable indexRoutingTable = currentState.routingTable().index(index); + if (indexRoutingTable != null) { + IndexRoutingTable.Builder indexBuilder = new IndexRoutingTable.Builder(indexRoutingTable.getIndex()); + for (IndexShardRoutingTable shardTable : indexRoutingTable) { + IndexShardRoutingTable.Builder shardBuilder = new IndexShardRoutingTable.Builder(shardTable.shardId()); + for (ShardRouting shardRouting : shardTable) { + if (shardRouting.isSearchOnly()) { + shardBuilder.addShard(shardRouting); + } + } + indexBuilder.addIndexShard(shardBuilder.build()); + } + routingTableBuilder.add(indexBuilder.build()); + } + } + + /** + * Builds a new routing table for scaling up an index from search-only mode to normal operation. + *

+ * This method: + *

    + *
  • Preserves existing search-only replicas
  • + *
  • Creates new unassigned primary shards with remote store recovery source
  • + *
  • Creates new unassigned replica shards configured for peer recovery
  • + *
+ *

+ * The resulting routing table allows the cluster allocator to restore normal index operation + * by recovering shards from remote storage. + * + * @param currentState the current cluster state + * @param index the name of the index being scaled up + * @return the modified routing table with newly added primary and replica shards + */ + RoutingTable buildScaleUpRoutingTable(ClusterState currentState, String index) { + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(currentState.routingTable()); + IndexRoutingTable indexRoutingTable = currentState.routingTable().index(index); + IndexMetadata indexMetadata = currentState.metadata().index(index); + + if (indexRoutingTable != null && indexMetadata != null) { + IndexRoutingTable.Builder indexBuilder = new IndexRoutingTable.Builder(indexRoutingTable.getIndex()); + + for (IndexShardRoutingTable shardTable : indexRoutingTable) { + indexBuilder.addIndexShard(buildShardTableForScaleUp(shardTable, indexMetadata)); + } + routingTableBuilder.add(indexBuilder.build()); + } + + return routingTableBuilder.build(); + } + + /** + * Builds a shard routing table for a scale-up operation. + *

+ * For each shard, this method: + *

    + *
  • Preserves all existing search-only replicas
  • + *
  • Creates a new unassigned primary shard configured to recover from remote store
  • + *
  • Creates a new unassigned replica shard configured to recover from peers
  • + *
+ * + * @param shardTable the current shard routing table + * @return a new shard routing table with both search replicas and newly added shards + */ + + private IndexShardRoutingTable buildShardTableForScaleUp(IndexShardRoutingTable shardTable, IndexMetadata indexMetadata) { + IndexShardRoutingTable.Builder shardBuilder = new IndexShardRoutingTable.Builder(shardTable.shardId()); + + // Keep existing search-only shards + for (ShardRouting shardRouting : shardTable) { + if (shardRouting.isSearchOnly()) { + shardBuilder.addShard(shardRouting); + } + } + + RecoverySource.RemoteStoreRecoverySource remoteStoreRecoverySource = new RecoverySource.RemoteStoreRecoverySource( + UUID.randomUUID().toString(), + Version.CURRENT, + new IndexId(shardTable.shardId().getIndex().getName(), shardTable.shardId().getIndex().getUUID()) + ); + + // Get replica settings from index metadata + int numberOfReplicas = indexMetadata.getNumberOfReplicas(); + + // Create primary shard + ShardRouting primaryShard = ShardRouting.newUnassigned( + shardTable.shardId(), + true, + remoteStoreRecoverySource, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "Restoring primary shard") + ); + shardBuilder.addShard(primaryShard); + + // Create the correct number of replica shards + for (int i = 0; i < numberOfReplicas; i++) { + ShardRouting replicaShard = ShardRouting.newUnassigned( + shardTable.shardId(), + false, + RecoverySource.PeerRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "Restoring replica shard") + ); + shardBuilder.addShard(replicaShard); + } + + return shardBuilder.build(); + } + + /** + * Creates a temporary cluster block used during scale-down preparation. + *

+ * This block: + *

    + *
  • Prevents write operations to the index
  • + *
  • Uses a unique ID to track the block through the scaling process
  • + *
  • Returns a 403 Forbidden status for write attempts
  • + *
  • Includes a descriptive message
  • + *
+ * + * @return a cluster block for temporary use during scale-down + */ + static ClusterBlock createScaleDownBlock() { + return new ClusterBlock( + INDEX_SEARCH_ONLY_BLOCK_ID, + UUIDs.randomBase64UUID(), + "index preparing to scale down", + false, + false, + false, + RestStatus.FORBIDDEN, + EnumSet.of(WRITE) + ); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexNodeRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexNodeRequest.java new file mode 100644 index 0000000000000..73ef7d1b97fc6 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexNodeRequest.java @@ -0,0 +1,87 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.scale.searchonly; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.transport.TransportRequest; + +import java.io.IOException; +import java.util.List; + +/** + * A transport request sent to nodes to facilitate shard synchronization during search-only scaling operations. + *

+ * This request is sent from the cluster manager to data nodes that host primary shards for the target index + * during scale operations. It contains the index name and a list of shard IDs that need to be synchronized + * before completing a scale-down operation. + *

+ * When a node receives this request, it performs final sync and flush operations on the specified shards, + * ensuring all operations are committed and the remote store is synced. This is a crucial step in + * the scale-down process to ensure no data loss occurs when the index transitions to search-only mode. + */ +class ScaleIndexNodeRequest extends TransportRequest { + private final String index; + private final List shardIds; + + /** + * Constructs a new NodeSearchOnlyRequest. + * + * @param index the name of the index being scaled + * @param shardIds the list of shard IDs to be synchronized on the target node + */ + ScaleIndexNodeRequest(String index, List shardIds) { + this.index = index; + this.shardIds = shardIds; + } + + /** + * Deserialization constructor. + * + * @param in the stream input to read from + * @throws IOException if there is an I/O error during deserialization + */ + ScaleIndexNodeRequest(StreamInput in) throws IOException { + super(in); + this.index = in.readString(); + this.shardIds = in.readList(ShardId::new); + } + + /** + * Serializes this request to the given output stream. + * + * @param out the output stream to write to + * @throws IOException if there is an I/O error during serialization + */ + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(index); + out.writeList(shardIds); + } + + /** + * Returns the index name associated with this request. + * + * @return the index name + */ + String getIndex() { + return index; + } + + /** + * Returns the list of shard IDs to be synchronized. + * + * @return the list of shard IDs + */ + List getShardIds() { + return shardIds; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexNodeResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexNodeResponse.java new file mode 100644 index 0000000000000..5b0f6846d8c3b --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexNodeResponse.java @@ -0,0 +1,89 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.scale.searchonly; + +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.transport.TransportResponse; + +import java.io.IOException; +import java.util.List; + +/** + * Response sent from nodes after processing a {@link ScaleIndexNodeRequest} during search-only scaling operations. + *

+ * This response contains information about the node that processed the request and the results of + * synchronization attempts for each requested shard. The cluster manager uses these responses to + * determine whether it's safe to proceed with finalizing a scale-down operation. + *

+ * Each response includes details about whether shards have any uncommitted operations or need + * additional synchronization, which would indicate the scale operation should be delayed until + * the cluster reaches a stable state. + */ +class ScaleIndexNodeResponse extends TransportResponse { + private final DiscoveryNode node; + private final List shardResponses; + + /** + * Constructs a new NodeSearchOnlyResponse. + * + * @param node the node that processed the synchronization request + * @param shardResponses the list of responses from individual shard synchronization attempts + */ + ScaleIndexNodeResponse(DiscoveryNode node, List shardResponses) { + this.node = node; + this.shardResponses = shardResponses; + } + + /** + * Deserialization constructor. + * + * @param in the stream input to read from + * @throws IOException if there is an I/O error during deserialization + */ + ScaleIndexNodeResponse(StreamInput in) throws IOException { + node = new DiscoveryNode(in); + shardResponses = in.readList(ScaleIndexShardResponse::new); + } + + /** + * Serializes this response to the given output stream. + * + * @param out the output stream to write to + * @throws IOException if there is an I/O error during serialization + */ + @Override + public void writeTo(StreamOutput out) throws IOException { + node.writeTo(out); + out.writeList(shardResponses); + } + + /** + * Returns the node that processed the synchronization request. + * + * @return the discovery node information + */ + public DiscoveryNode getNode() { + return node; + } + + /** + * Returns the list of shard-level synchronization responses. + *

+ * These responses contain critical information about the state of each shard, + * including whether there are uncommitted operations or if additional synchronization + * is needed before the scale operation can safely proceed. + * + * @return the list of shard responses + */ + public List getShardResponses() { + return shardResponses; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexOperationValidator.java b/server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexOperationValidator.java new file mode 100644 index 0000000000000..a749f8b3311c9 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexOperationValidator.java @@ -0,0 +1,88 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.scale.searchonly; + +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.core.action.ActionListener; +import org.opensearch.indices.replication.common.ReplicationType; + +/** + * Validates that indices meet the prerequisites for search-only scale operations. + *

+ * This validator ensures that indexes being scaled up or down satisfy all the + * necessary conditions for a safe scaling operation. It checks for required settings, + * index state compatibility, and configuration prerequisites such as remote store + * and segment replication settings. + */ +class ScaleIndexOperationValidator { + + /** + * Validates that the given index meets the prerequisites for the scale operation. + *

+ * For scale-down operations, this method verifies: + *

    + *
  • The index exists
  • + *
  • The index is not already in search-only mode
  • + *
  • The index has at least one search-only replica configured
  • + *
  • Remote store is enabled for the index
  • + *
  • Segment replication is enabled for the index
  • + *
+ *

+ * For scale-up operations, this method verifies: + *

    + *
  • The index exists
  • + *
  • The index is currently in search-only mode
  • + *
+ * + * @param indexMetadata the metadata of the index to validate + * @param index the name of the index being validated + * @param listener the action listener to notify in case of validation failure + * @param isScaleDown true if validating for scale-down, false for scale-up + * @return true if validation succeeds, false if validation fails (and listener is notified) + */ + boolean validateScalePrerequisites( + IndexMetadata indexMetadata, + String index, + ActionListener listener, + boolean isScaleDown + ) { + try { + if (indexMetadata == null) { + throw new IllegalArgumentException("Index [" + index + "] not found"); + } + if (isScaleDown) { + if (indexMetadata.getSettings().getAsBoolean(IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey(), false)) { + throw new IllegalStateException("Index [" + index + "] is already in search-only mode"); + } + + if (indexMetadata.getNumberOfSearchOnlyReplicas() == 0) { + throw new IllegalArgumentException("Cannot scale to zero without search replicas for index: " + index); + } + if (indexMetadata.getSettings().getAsBoolean(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, false) == false) { + throw new IllegalArgumentException( + "To scale to zero, " + IndexMetadata.SETTING_REMOTE_STORE_ENABLED + " must be enabled for index: " + index + ); + } + if (ReplicationType.SEGMENT.toString() + .equals(indexMetadata.getSettings().get(IndexMetadata.SETTING_REPLICATION_TYPE)) == false) { + throw new IllegalArgumentException("To scale to zero, segment replication must be enabled for index: " + index); + } + } else { + if (indexMetadata.getSettings().getAsBoolean(IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey(), false) == false) { + throw new IllegalStateException("Index [" + index + "] is not in search-only mode"); + } + } + return true; + } catch (Exception e) { + listener.onFailure(e); + return false; + } + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexRequest.java new file mode 100644 index 0000000000000..a8ec2754e8414 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexRequest.java @@ -0,0 +1,166 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.scale.searchonly; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.ValidateActions; +import org.opensearch.action.support.IndicesOptions; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.Objects; + +/** + * A request for search-only scale operations (up or down) on an index. + *

+ * This request represents an administrative operation to either: + *

    + *
  • Scale an index down to search-only mode, removing write capability while preserving search replicas
  • + *
  • Scale an index up from search-only mode back to full read-write operation
  • + *
+ *

+ * The request is processed by the cluster manager node, which coordinates the necessary + * cluster state changes, shard synchronization, and recovery operations needed to transition + * an index between normal and search-only states. + */ +class ScaleIndexRequest extends AcknowledgedRequest { + private final String index; + private boolean scaleDown; + private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen(); + + /** + * Constructs a new SearchOnlyRequest. + * + * @param index the name of the index to scale + * @param scaleDown true to scale down to search-only mode, false to scale up to normal operation + */ + ScaleIndexRequest(String index, boolean scaleDown) { + super(); + this.index = index; + this.scaleDown = scaleDown; + } + + /** + * Deserialization constructor. + * + * @param in the stream input to read from + * @throws IOException if there is an I/O error during deserialization + */ + ScaleIndexRequest(StreamInput in) throws IOException { + super(in); + this.index = in.readString(); + this.scaleDown = in.readBoolean(); + this.indicesOptions = IndicesOptions.readIndicesOptions(in); + } + + /** + * Validates this request. + *

+ * Ensures that the index name is provided and not empty. + * + * @return validation exception if invalid, null otherwise + */ + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (index == null || index.trim().isEmpty()) { + validationException = ValidateActions.addValidationError("index is required", validationException); + } + return validationException; + } + + /** + * Serializes this request to the given output stream. + * + * @param out the output stream to write to + * @throws IOException if there is an I/O error during serialization + */ + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(index); + out.writeBoolean(scaleDown); + indicesOptions.writeIndicesOptions(out); + } + + /** + * Checks if this request equals another object. + * + * @param o the object to compare with + * @return true if equal, false otherwise + */ + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ScaleIndexRequest that = (ScaleIndexRequest) o; + return scaleDown == that.scaleDown && Objects.equals(index, that.index) && Objects.equals(indicesOptions, that.indicesOptions); + } + + /** + * Returns a hash code for this request. + * + * @return the hash code + */ + @Override + public int hashCode() { + return Objects.hash(index, scaleDown, indicesOptions); + } + + /** + * Returns the name of the index to scale. + * + * @return the index name + */ + public String getIndex() { + return index; + } + + /** + * Returns whether this is a scale-down operation. + * + * @return true if scaling down to search-only mode, false if scaling up to normal operation + */ + public boolean isScaleDown() { + return scaleDown; + } + + /** + * Returns the indices options that specify how to resolve indices and handle unavailable indices. + * + * @return the indices options + */ + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + /** + * Sets the indices options for this request. + * + * @param indicesOptions the indices options to use + * @return this request (for method chaining) + */ + public ScaleIndexRequest indicesOptions(IndicesOptions indicesOptions) { + this.indicesOptions = indicesOptions; + return this; + } + + /** + * Sets whether this is a scale down operation. + * + * @param scaleDown true if scaling down, false if scaling up + * @return this request (for method chaining) + */ + public ScaleIndexRequest scaleDown(boolean scaleDown) { + this.scaleDown = scaleDown; + return this; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexRequestBuilder.java new file mode 100644 index 0000000000000..f18cdbe5c19a0 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexRequestBuilder.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.scale.searchonly; + +import org.opensearch.action.ActionRequestBuilder; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.transport.client.OpenSearchClient; + +/** + * A builder for constructing {@link ScaleIndexRequest} objects to perform search-only scale operations. + *

+ * This builder simplifies the creation of requests to scale indices up or down for search-only mode. + * It provides methods to configure the scaling direction and follows the builder pattern to allow + * method chaining for constructing requests. + *

+ * The builder is part of the public API since OpenSearch 3.0.0. + */ +@PublicApi(since = "3.0.0") +public class ScaleIndexRequestBuilder extends ActionRequestBuilder { + + /** + * Constructs a new builder for scaling an index, allowing explicit direction specification. + * + * @param client the client to use for executing the request + * @param searchOnly true for scaling down to search-only mode, false for scaling up to normal mode + * @param index the name of the index to scale + */ + public ScaleIndexRequestBuilder(OpenSearchClient client, boolean searchOnly, String index) { + super(client, ScaleIndexAction.INSTANCE, new ScaleIndexRequest(index, searchOnly)); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexResponse.java new file mode 100644 index 0000000000000..a81551a3c9898 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexResponse.java @@ -0,0 +1,136 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.scale.searchonly; + +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; + +/** + * Response for search-only scale operations, containing information about shards' synchronization status. + *

+ * This response aggregates the results of shard synchronization attempts from multiple nodes + * during a scale operation. It tracks: + *

    + *
  • Whether any shards have uncommitted operations
  • + *
  • Whether any shards still need synchronization
  • + *
  • Detailed failure reasons if the scale operation cannot proceed
  • + *
+ *

+ * The response is used by the cluster manager to determine whether a scale operation + * can be finalized or needs to be retried after more time is allowed for synchronization. + */ +class ScaleIndexResponse extends ActionResponse implements ToXContent { + private final Collection nodeResponses; + private final String failureReason; + private final boolean hasFailures; + + /** + * Constructs a new SearchOnlyResponse by aggregating responses from multiple nodes. + *

+ * This constructor analyzes the responses to determine if any shards report conditions + * that would prevent safely finalizing a scale operation, such as uncommitted operations + * or pending synchronization tasks. + * + * @param responses the collection of node responses containing shard status information + */ + ScaleIndexResponse(Collection responses) { + this.nodeResponses = responses; + this.hasFailures = responses.stream() + .anyMatch(r -> r.getShardResponses().stream().anyMatch(s -> s.hasUncommittedOperations() || s.needsSync())); + this.failureReason = buildFailureReason(); + } + + /** + * Serializes this response to the given output stream. + * + * @param out the output stream to write to + * @throws IOException if there is an I/O error during serialization + */ + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeList(new ArrayList<>(nodeResponses)); + out.writeBoolean(hasFailures); + out.writeOptionalString(failureReason); + } + + /** + * Indicates whether any shards reported conditions that would prevent + * safely finalizing the scale operation. + * + * @return true if any shard has uncommitted operations or needs sync, false otherwise + */ + boolean hasFailures() { + return hasFailures; + } + + /** + * Builds a detailed description of failure reasons if the scale operation cannot proceed. + *

+ * This method constructs a human-readable string explaining which shards on which nodes + * reported conditions that prevent the scale operation from being finalized, including + * whether they have uncommitted operations or need additional synchronization. + * + * @return a detailed failure description, or null if no failures were detected + */ + String buildFailureReason() { + if (!hasFailures) { + return null; + } + StringBuilder reason = new StringBuilder(); + for (ScaleIndexNodeResponse nodeResponse : nodeResponses) { + for (ScaleIndexShardResponse shardResponse : nodeResponse.getShardResponses()) { + if (shardResponse.hasUncommittedOperations() || shardResponse.needsSync()) { + reason.append("Shard ") + .append(shardResponse.getShardId()) + .append(" on node ") + .append(nodeResponse.getNode()) + .append(": "); + if (shardResponse.hasUncommittedOperations()) { + reason.append("has uncommitted operations "); + } + if (shardResponse.needsSync()) { + reason.append("needs sync "); + } + reason.append("; "); + } + } + } + return reason.toString(); + } + + /** + * Converts this response to XContent format for API responses. + *

+ * The generated content includes: + *

    + *
  • Whether any failures were detected
  • + *
  • Detailed failure reasons if applicable
  • + *
+ * + * @param builder the XContentBuilder to use + * @param params parameters for XContent generation + * @return the XContentBuilder with response data added + * @throws IOException if there is an error generating the XContent + */ + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + if (failureReason != null) { + builder.field("failure_reason", failureReason); + } + builder.endObject(); + return builder; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexShardResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexShardResponse.java new file mode 100644 index 0000000000000..8c7759e44e12f --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexShardResponse.java @@ -0,0 +1,109 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.scale.searchonly; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.index.shard.ShardId; + +import java.io.IOException; + +/** + * Response containing synchronization status for a single shard during search-only scale operations. + *

+ * This response captures the critical state information needed to determine if a shard + * is ready for a scale operation to proceed, including: + *

    + *
  • Whether the shard has uncommitted operations that need to be persisted
  • + *
  • Whether the shard needs additional synchronization with remote storage
  • + *
+ *

+ * The cluster manager uses this information from all primary shards to decide + * whether it's safe to finalize a scale-down operation. + */ +class ScaleIndexShardResponse implements Writeable { + private final ShardId shardId; + private final boolean needsSync; + private final int uncommittedOperations; + + /** + * Constructs a new ShardSearchOnlyResponse. + * + * @param shardId the ID of the shard that was synchronized + * @param needsSync whether the shard still needs additional synchronization + * @param uncommittedOperations the number of operations not yet committed to the transaction log + */ + ScaleIndexShardResponse(ShardId shardId, boolean needsSync, int uncommittedOperations) { + this.shardId = shardId; + this.needsSync = needsSync; + this.uncommittedOperations = uncommittedOperations; + } + + /** + * Deserialization constructor. + * + * @param in the stream input to read from + * @throws IOException if there is an I/O error during deserialization + */ + ScaleIndexShardResponse(StreamInput in) throws IOException { + this.shardId = new ShardId(in); + this.needsSync = in.readBoolean(); + this.uncommittedOperations = in.readVInt(); + } + + /** + * Serializes this response to the given output stream. + * + * @param out the output stream to write to + * @throws IOException if there is an I/O error during serialization + */ + @Override + public void writeTo(StreamOutput out) throws IOException { + shardId.writeTo(out); + out.writeBoolean(needsSync); + out.writeVInt(uncommittedOperations); + } + + /** + * Returns the shard ID associated with this response. + * + * @return the shard ID + */ + ShardId getShardId() { + return shardId; + } + + /** + * Indicates whether the shard needs additional synchronization before scaling. + *

+ * A shard may need synchronization if: + *

    + *
  • It has pending operations that need to be synced to remote storage
  • + *
  • The local and remote states don't match
  • + *
+ * + * @return true if additional synchronization is needed, false otherwise + */ + boolean needsSync() { + return needsSync; + } + + /** + * Indicates whether the shard has operations that haven't been committed to the transaction log. + *

+ * Uncommitted operations represent recent writes that haven't been fully persisted, + * making it unsafe to proceed with a scale-down operation until they are committed. + * + * @return true if there are uncommitted operations, false otherwise + */ + boolean hasUncommittedOperations() { + return uncommittedOperations > 0; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexShardSyncManager.java b/server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexShardSyncManager.java new file mode 100644 index 0000000000000..596bbc2c5270f --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexShardSyncManager.java @@ -0,0 +1,217 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.scale.searchonly; + +import org.opensearch.action.support.GroupedActionListener; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportException; +import org.opensearch.transport.TransportResponseHandler; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * Manages shard synchronization for scale operations + */ +/** + * Manages shard synchronization operations during search-only scaling. + *

+ * This manager coordinates the necessary synchronization across nodes to ensure all shards + * are in a consistent state before finalizing scale operations. It handles: + *

    + *
  • Dispatching sync requests to nodes hosting primary shards
  • + *
  • Aggregating responses from multiple nodes
  • + *
  • Validating that shards are ready for scale operations
  • + *
  • Tracking primary shard assignments across the cluster
  • + *
+ *

+ * The synchronization process is a critical safety mechanism that prevents data loss + * during transitions between normal and search-only modes. + */ +class ScaleIndexShardSyncManager { + + private final ClusterService clusterService; + private final TransportService transportService; + private final String transportActionName; + + /** + * Constructs a new ShardSyncManager. + * + * @param clusterService the cluster service for accessing cluster state + * @param transportService the transport service for sending requests to other nodes + * @param transportActionName the transport action name for shard sync requests + */ + ScaleIndexShardSyncManager(ClusterService clusterService, TransportService transportService, String transportActionName) { + this.clusterService = clusterService; + this.transportService = transportService; + this.transportActionName = transportActionName; + } + + /** + * Sends shard sync requests to each node that holds a primary shard. + *

+ * This method: + *

    + *
  • Groups shards by node to minimize network requests
  • + *
  • Creates a grouped listener to aggregate responses
  • + *
  • Dispatches sync requests to each relevant node
  • + *
+ *

+ * The listener is notified once all nodes have responded or if any errors occur. + * + * @param index the name of the index being scaled + * @param primaryShardsNodes map of shard IDs to node IDs for all primary shards + * @param listener the listener to notify when all nodes have responded + */ + void sendShardSyncRequests( + String index, + Map primaryShardsNodes, + ActionListener> listener + ) { + if (primaryShardsNodes.isEmpty()) { + listener.onFailure(new IllegalStateException("No primary shards found for index " + index)); + return; + } + + Map> nodeShardGroups = primaryShardsNodes.entrySet() + .stream() + .collect(Collectors.groupingBy(Map.Entry::getValue, Collectors.mapping(Map.Entry::getKey, Collectors.toList()))); + + final GroupedActionListener groupedListener = new GroupedActionListener<>(listener, nodeShardGroups.size()); + + for (Map.Entry> entry : nodeShardGroups.entrySet()) { + final String nodeId = entry.getKey(); + final List shards = entry.getValue(); + final DiscoveryNode targetNode = clusterService.state().nodes().get(nodeId); + + if (targetNode == null) { + groupedListener.onFailure(new IllegalStateException("Node [" + nodeId + "] not found")); + continue; + } + + sendNodeRequest(targetNode, index, shards, groupedListener); + } + } + + /** + * Sends a sync request to a specific node for the given shards. + *

+ * This method creates and sends a transport request to perform shard synchronization + * on a target node, registering appropriate response and error handlers. + * + * @param targetNode the node to send the request to + * @param index the name of the index being scaled + * @param shards the list of shards to synchronize on the target node + * @param listener the listener to notify with the response + */ + void sendNodeRequest(DiscoveryNode targetNode, String index, List shards, ActionListener listener) { + transportService.sendRequest( + targetNode, + transportActionName, + new ScaleIndexNodeRequest(index, shards), + new TransportResponseHandler() { + @Override + public ScaleIndexNodeResponse read(StreamInput in) throws IOException { + return new ScaleIndexNodeResponse(in); + } + + @Override + public void handleResponse(ScaleIndexNodeResponse response) { + listener.onResponse(response); + } + + @Override + public void handleException(TransportException exp) { + listener.onFailure(exp); + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + } + ); + } + + /** + * Aggregates node responses and verifies that no shard reports uncommitted operations or a pending sync. + *

+ * This validation ensures that all shards are in a consistent state before proceeding with + * a scale operation. If any shard reports conditions that would make scaling unsafe, the + * operation is failed with detailed information about which shards need more time. + * + * @param responses the collection of responses from all nodes + * @param listener the listener to notify with the aggregated result + */ + void validateNodeResponses(Collection responses, ActionListener listener) { + boolean hasUncommittedOps = false; + boolean needsSync = false; + List failedShards = new ArrayList<>(); + + for (ScaleIndexNodeResponse nodeResponse : responses) { + for (ScaleIndexShardResponse shardResponse : nodeResponse.getShardResponses()) { + if (shardResponse.hasUncommittedOperations()) { + hasUncommittedOps = true; + failedShards.add(shardResponse.getShardId().toString()); + } + if (shardResponse.needsSync()) { + needsSync = true; + failedShards.add(shardResponse.getShardId().toString()); + } + } + } + + if (hasUncommittedOps || needsSync) { + String errorDetails = "Pre-scale sync failed for shards: " + + String.join(", ", failedShards) + + (hasUncommittedOps ? " - uncommitted operations remain" : "") + + (needsSync ? " - sync needed" : ""); + listener.onFailure(new IllegalStateException(errorDetails)); + } else { + listener.onResponse(new ScaleIndexResponse(responses)); + } + } + + /** + * Returns the primary shard node assignments for a given index. + *

+ * Builds a mapping between shard IDs and the node IDs hosting their primary copies. + * This mapping is used to determine which nodes need to be contacted for shard + * synchronization during scale operations. + * + * @param indexMetadata the metadata of the index + * @param state the current cluster state + * @return a map of shard IDs to node IDs for all assigned primary shards + */ + Map getPrimaryShardAssignments(IndexMetadata indexMetadata, ClusterState state) { + Map assignments = new HashMap<>(); + for (int i = 0; i < indexMetadata.getNumberOfShards(); i++) { + ShardId shardId = new ShardId(indexMetadata.getIndex(), i); + ShardRouting primaryShard = state.routingTable().index(indexMetadata.getIndex().getName()).shard(i).primaryShard(); + if (primaryShard != null && primaryShard.assignedToNode()) { + assignments.put(shardId, primaryShard.currentNodeId()); + } + } + return assignments; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/TransportScaleIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/TransportScaleIndexAction.java new file mode 100644 index 0000000000000..7384f35a600f7 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/TransportScaleIndexAction.java @@ -0,0 +1,481 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.scale.searchonly; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.admin.indices.flush.FlushRequest; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.ChannelActionListener; +import org.opensearch.action.support.GroupedActionListener; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.ClusterStateUpdateTask; +import org.opensearch.cluster.block.ClusterBlock; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.block.ClusterBlocks; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.allocation.AllocationService; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Priority; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.IndexService; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.IndicesService; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportChannel; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_SEARCH_ONLY_BLOCK_ID; + +/** + * Transport action implementation for search-only scale operations. + *

+ * This class coordinates the entire process of scaling indices up or down between normal + * and search-only modes. It manages the multistep process including: + *

    + *
  • Validating prerequisites for scale operations
  • + *
  • Adding temporary write blocks during scale-down preparation
  • + *
  • Coordinating shard synchronization across nodes
  • + *
  • Modifying cluster state to transition indices between modes
  • + *
  • Handling synchronization requests from other nodes
  • + *
+ *

+ * The scale operation is implemented as a series of cluster state update tasks to ensure + * atomicity and consistency throughout the transition. + */ +public class TransportScaleIndexAction extends TransportClusterManagerNodeAction { + + private static final Logger logger = LogManager.getLogger(TransportScaleIndexAction.class); + /** + * Transport action name for shard sync requests + */ + public static final String NAME = ScaleIndexAction.NAME + "[s]"; + + public static final String SHARD_SYNC_EXECUTOR = ThreadPool.Names.MANAGEMENT; + + private final AllocationService allocationService; + private final IndicesService indicesService; + private final ThreadPool threadPool; + + private final ScaleIndexOperationValidator validator; + private final ScaleIndexClusterStateBuilder scaleIndexClusterStateBuilder; + private final ScaleIndexShardSyncManager scaleIndexShardSyncManager; + + /** + * Constructs a new TransportSearchOnlyAction. + * + * @param transportService the transport service for network communication + * @param clusterService the cluster service for accessing cluster state + * @param threadPool the thread pool for executing operations + * @param actionFilters filters for action requests + * @param indexNameExpressionResolver resolver for index names and expressions + * @param allocationService service for shard allocation decisions + * @param indicesService service for accessing index shards + */ + @Inject + public TransportScaleIndexAction( + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + AllocationService allocationService, + IndicesService indicesService + ) { + super( + ScaleIndexAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + ScaleIndexRequest::new, + indexNameExpressionResolver + ); + this.allocationService = allocationService; + this.indicesService = indicesService; + this.threadPool = threadPool; + this.validator = new ScaleIndexOperationValidator(); + this.scaleIndexClusterStateBuilder = new ScaleIndexClusterStateBuilder(); + this.scaleIndexShardSyncManager = new ScaleIndexShardSyncManager(clusterService, transportService, NAME); + + transportService.registerRequestHandler( + NAME, + ThreadPool.Names.SAME, + ScaleIndexNodeRequest::new, + (request, channel, task) -> handleShardSyncRequest(request, channel) + ); + } + + /** + * Returns the executor name for this transport action. + * + * @return the executor name + */ + @Override + protected String executor() { + return ThreadPool.Names.MANAGEMENT; + } + + /** + * Deserializes the response from stream. + * + * @param in the stream input + * @return the deserialized response + * @throws IOException if an I/O error occurs + */ + @Override + protected AcknowledgedResponse read(StreamInput in) throws IOException { + return new AcknowledgedResponse(in); + } + + /** + * Handles the search-only request on the cluster manager node. + *

+ * This method determines whether to execute a scale-up or scale-down operation + * based on the request parameters, and submits the appropriate cluster state update task. + * + * @param request the search-only scale request + * @param state the current cluster state + * @param listener the listener to notify with the operation result + */ + @Override + protected void clusterManagerOperation(ScaleIndexRequest request, ClusterState state, ActionListener listener) { + try { + String index = request.getIndex(); + if (request.isScaleDown()) { + submitScaleDownTask(index, listener); + } else { + submitScaleUpTask(index, state, listener); + } + } catch (Exception e) { + logger.error("Failed to execute cluster manager operation", e); + listener.onFailure(e); + } + } + + /** + * Submits the scale-down update task: it first adds a temporary block to the indices and then initiates shard synchronization. + */ + private void submitScaleDownTask(final String index, final ActionListener listener) { + final Map blockedIndices = new HashMap<>(); + + clusterService.submitStateUpdateTask( + "add-block-index-to-scale " + index, + new AddBlockClusterStateUpdateTask(index, blockedIndices, listener) + ); + } + + /** + * Sends shard sync requests to each node that holds a primary shard. + */ + private void proceedWithScaleDown( + String index, + Map primaryShardsNodes, + ActionListener listener + ) { + scaleIndexShardSyncManager.sendShardSyncRequests( + index, + primaryShardsNodes, + ActionListener.wrap(responses -> handleShardSyncResponses(responses, index, listener), listener::onFailure) + ); + } + + private void handleShardSyncResponses( + Collection responses, + String index, + ActionListener listener + ) { + scaleIndexShardSyncManager.validateNodeResponses( + responses, + ActionListener.wrap(searchOnlyResponse -> finalizeScaleDown(index, listener), listener::onFailure) + ); + } + + /** + * Finalizes scale-down by updating the metadata and routing table: + * removes the temporary block and adds a permanent search-only block. + */ + private void finalizeScaleDown(String index, ActionListener listener) { + clusterService.submitStateUpdateTask("finalize-scale-down", new FinalizeScaleDownTask(index, listener)); + } + + /** + * Handles an incoming shard sync request from another node. + */ + void handleShardSyncRequest(ScaleIndexNodeRequest request, TransportChannel channel) { + ClusterState state = clusterService.state(); + + IndexMetadata indexMetadata = state.metadata().index(request.getIndex()); + if (indexMetadata == null) { + throw new IllegalStateException("Index " + request.getIndex() + " not found"); + } + + IndexService indexService = getIndexService(indexMetadata); + ChannelActionListener listener = new ChannelActionListener<>( + channel, + "sync_shard", + request + ); + + syncShards(indexService, request.getShardIds(), listener); + } + + private IndexService getIndexService(IndexMetadata indexMetadata) { + IndexService indexService = indicesService.indexService(indexMetadata.getIndex()); + if (indexService == null) { + throw new IllegalStateException("IndexService not found for index " + indexMetadata.getIndex().getName()); + } + return indexService; + } + + private void syncShards(IndexService indexService, List shardIds, ActionListener listener) { + + GroupedActionListener groupedActionListener = new GroupedActionListener<>(new ActionListener<>() { + @Override + public void onResponse(Collection shardResponses) { + listener.onResponse(new ScaleIndexNodeResponse(clusterService.localNode(), shardResponses.stream().toList())); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }, shardIds.size()); + + for (ShardId shardId : shardIds) { + IndexShard shard = indexService.getShardOrNull(shardId.id()); + if (shard == null || shard.routingEntry().primary() == false) { + groupedActionListener.onFailure(new IllegalStateException("Attempting to scale down a replica shard")); + break; + } + threadPool.executor(SHARD_SYNC_EXECUTOR).execute(() -> { syncSingleShard(shard, groupedActionListener); }); + } + } + + void syncSingleShard(IndexShard shard, ActionListener listener) { + shard.acquireAllPrimaryOperationsPermits(new ActionListener<>() { + @Override + public void onResponse(Releasable releasable) { + logger.info("Performing final sync and flush for shard {}", shard.shardId()); + try { + shard.sync(); + shard.flush(new FlushRequest().force(true).waitIfOngoing(true)); + shard.waitForRemoteStoreSync(); + listener.onResponse( + new ScaleIndexShardResponse(shard.shardId(), shard.isSyncNeeded(), shard.translogStats().getUncommittedOperations()) + ); + } catch (IOException e) { + listener.onFailure(e); + } + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }, TimeValue.timeValueSeconds(30)); + } + + /** + * Submits the scale-up update task that rebuilds the routing table and updates index metadata. + */ + private void submitScaleUpTask( + final String index, + final ClusterState currentState, + final ActionListener listener + ) { + IndexMetadata indexMetadata = currentState.metadata().index(index); + if (!validator.validateScalePrerequisites(indexMetadata, index, listener, false)) { + return; + } + + clusterService.submitStateUpdateTask("scale-up-index", new ScaleUpClusterStateUpdateTask(index, listener)); + } + + @Override + protected ClusterBlockException checkBlock(ScaleIndexRequest request, ClusterState state) { + return state.blocks() + .indicesBlockedException( + ClusterBlockLevel.METADATA_WRITE, + indexNameExpressionResolver.concreteIndexNames(state, request.indicesOptions(), request.getIndex()) + ); + } + + /** + * Cluster state update task for adding a temporary block during the initial phase of scaling down. + *

+ * This task: + *

    + *
  • Validates that the index meets prerequisites for scaling down
  • + *
  • Adds a temporary write block to prevent new operations during scaling
  • + *
  • Initiates shard synchronization after the block is applied
  • + *
+ */ + class AddBlockClusterStateUpdateTask extends ClusterStateUpdateTask { + private final String index; + private final Map blockedIndices; + private final ActionListener listener; + + AddBlockClusterStateUpdateTask( + String index, + Map blockedIndices, + ActionListener listener + ) { + super(Priority.URGENT); + this.index = index; + this.blockedIndices = blockedIndices; + this.listener = listener; + } + + @Override + public ClusterState execute(final ClusterState currentState) { + IndexMetadata indexMetadata = currentState.metadata().index(index); + try { + validator.validateScalePrerequisites(indexMetadata, index, listener, true); + return scaleIndexClusterStateBuilder.buildScaleDownState(currentState, index, blockedIndices); + } catch (Exception e) { + return currentState; + } + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + if (oldState == newState) { + listener.onResponse(new AcknowledgedResponse(true)); + return; + } + + IndexMetadata indexMetadata = newState.metadata().index(index); + if (indexMetadata != null) { + Map primaryShardsNodes = scaleIndexShardSyncManager.getPrimaryShardAssignments(indexMetadata, newState); + proceedWithScaleDown(index, primaryShardsNodes, listener); + } + } + + @Override + public void onFailure(String source, Exception e) { + logger.error("Failed to process cluster state update for scale down", e); + listener.onFailure(e); + } + } + + /** + * Cluster state update task for finalizing a scale-down operation. + *

+ * This task: + *

    + *
  • Removes the temporary scale-down preparation block
  • + *
  • Updates index metadata to mark it as search-only
  • + *
  • Applies a permanent search-only block
  • + *
  • Updates the routing table to remove non-search-only shards
  • + *
+ */ + class FinalizeScaleDownTask extends ClusterStateUpdateTask { + private final String index; + private final ActionListener listener; + + FinalizeScaleDownTask(String index, ActionListener listener) { + super(Priority.URGENT); + this.index = index; + this.listener = listener; + } + + @Override + public ClusterState execute(ClusterState currentState) { + return scaleIndexClusterStateBuilder.buildFinalScaleDownState(currentState, index); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + listener.onResponse(new AcknowledgedResponse(true)); + } + + @Override + public void onFailure(String source, Exception e) { + logger.error("Failed to finalize scale-down operation", e); + listener.onFailure(e); + } + } + + /** + * Cluster state update task for scaling up an index from search-only mode to normal operation. + *

+ * This task: + *

    + *
  • Rebuilds the routing table to add primary and replica shards
  • + *
  • Removes the search-only block
  • + *
  • Updates index settings to disable search-only mode
  • + *
  • Triggers routing table updates to allocate the new shards
  • + *
+ */ + private class ScaleUpClusterStateUpdateTask extends ClusterStateUpdateTask { + private final String index; + private final ActionListener listener; + + ScaleUpClusterStateUpdateTask(String index, ActionListener listener) { + this.index = index; + this.listener = listener; + } + + @Override + public ClusterState execute(ClusterState currentState) { + RoutingTable newRoutingTable = scaleIndexClusterStateBuilder.buildScaleUpRoutingTable(currentState, index); + ClusterState tempState = ClusterState.builder(currentState).routingTable(newRoutingTable).build(); + + ClusterBlocks.Builder blocksBuilder = ClusterBlocks.builder().blocks(tempState.blocks()); + Metadata.Builder metadataBuilder = Metadata.builder(tempState.metadata()); + + blocksBuilder.removeIndexBlockWithId(index, INDEX_SEARCH_ONLY_BLOCK_ID); + IndexMetadata indexMetadata = tempState.metadata().index(index); + Settings updatedSettings = Settings.builder() + .put(indexMetadata.getSettings()) + .put(IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey(), false) + .build(); + metadataBuilder.put( + IndexMetadata.builder(indexMetadata).settings(updatedSettings).settingsVersion(indexMetadata.getSettingsVersion() + 1) + ); + + return allocationService.reroute( + ClusterState.builder(tempState).blocks(blocksBuilder).metadata(metadataBuilder).build(), + "restore indexing shards" + ); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + listener.onResponse(new AcknowledgedResponse(true)); + } + + @Override + public void onFailure(String source, Exception e) { + logger.error("Failed to execute cluster state update for scale up", e); + listener.onFailure(e); + } + } + +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/package-info.java new file mode 100644 index 0000000000000..cd1d28c0d798d --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/scale/searchonly/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Index Scaling transport handlers for managing search-only mode. */ +package org.opensearch.action.admin.indices.scale.searchonly; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java b/server/src/main/java/org/opensearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java index 1a3c657f5b1b8..fad504a476511 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java @@ -146,7 +146,7 @@ protected void clusterManagerOperation( final String customDataPath = IndexMetadata.INDEX_DATA_PATH_SETTING.get(state.metadata().index(index).getSettings()); for (IndexShardRoutingTable routing : indexShardRoutingTables) { final int shardId = routing.shardId().id(); - ClusterShardHealth shardHealth = new ClusterShardHealth(shardId, routing); + ClusterShardHealth shardHealth = new ClusterShardHealth(shardId, routing, state.metadata().index(index)); if (request.shardStatuses().contains(shardHealth.getStatus())) { shardsToFetch.add(Tuple.tuple(routing.shardId(), customDataPath)); } diff --git a/server/src/main/java/org/opensearch/cluster/block/ClusterBlockLevel.java b/server/src/main/java/org/opensearch/cluster/block/ClusterBlockLevel.java index 5d3bf94aedb19..2164fc48e4ba5 100644 --- a/server/src/main/java/org/opensearch/cluster/block/ClusterBlockLevel.java +++ b/server/src/main/java/org/opensearch/cluster/block/ClusterBlockLevel.java @@ -51,4 +51,5 @@ public enum ClusterBlockLevel { public static final EnumSet ALL = EnumSet.allOf(ClusterBlockLevel.class); public static final EnumSet READ_WRITE = EnumSet.of(READ, WRITE); + public static final EnumSet WRITE_BLOCK = EnumSet.of(WRITE); } diff --git a/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java b/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java index c894fa5dce714..4e119418f6917 100644 --- a/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java +++ b/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java @@ -432,6 +432,9 @@ public Builder addBlocks(IndexMetadata indexMetadata) { if (indexMetadata.isRemoteSnapshot()) { addIndexBlock(indexName, IndexMetadata.REMOTE_READ_ONLY_ALLOW_DELETE); } + if (IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING.get(indexMetadata.getSettings())) { + addIndexBlock(indexName, IndexMetadata.APIBlock.SEARCH_ONLY.getBlock()); + } return this; } diff --git a/server/src/main/java/org/opensearch/cluster/health/ClusterIndexHealth.java b/server/src/main/java/org/opensearch/cluster/health/ClusterIndexHealth.java index 77d96cb0af792..fae586e1adf03 100644 --- a/server/src/main/java/org/opensearch/cluster/health/ClusterIndexHealth.java +++ b/server/src/main/java/org/opensearch/cluster/health/ClusterIndexHealth.java @@ -156,7 +156,7 @@ public ClusterIndexHealth(final IndexMetadata indexMetadata, final IndexRoutingT shards = new HashMap<>(); for (IndexShardRoutingTable shardRoutingTable : indexRoutingTable) { int shardId = shardRoutingTable.shardId().id(); - shards.put(shardId, new ClusterShardHealth(shardId, shardRoutingTable)); + shards.put(shardId, new ClusterShardHealth(shardId, shardRoutingTable, indexMetadata)); } // update the index status @@ -212,11 +212,13 @@ public ClusterIndexHealth( int computeUnassignedShards = 0; int computeDelayedUnassignedShards = 0; + boolean isSearchOnlyClusterBlockEnabled = indexMetadata.getSettings() + .getAsBoolean(IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey(), false); boolean isShardLevelHealthRequired = healthLevel == ClusterHealthRequest.Level.SHARDS; if (isShardLevelHealthRequired) { for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) { int shardId = indexShardRoutingTable.shardId().id(); - ClusterShardHealth shardHealth = new ClusterShardHealth(shardId, indexShardRoutingTable); + ClusterShardHealth shardHealth = new ClusterShardHealth(shardId, indexShardRoutingTable, indexMetadata); if (shardHealth.isPrimaryActive()) { computeActivePrimaryShards++; } @@ -252,15 +254,25 @@ public ClusterIndexHealth( } } ShardRouting primaryShard = indexShardRoutingTable.primaryShard(); - if (primaryShard.active()) { - computeActivePrimaryShards++; + + if (primaryShard == null) { + if (isSearchOnlyClusterBlockEnabled) { + computeStatus = getIndexHealthStatus(ClusterHealthStatus.GREEN, computeStatus); + } else { + computeStatus = getIndexHealthStatus(ClusterHealthStatus.RED, computeStatus); + } + } else { + if (primaryShard.active()) { + computeActivePrimaryShards++; + } + ClusterHealthStatus shardHealth = ClusterShardHealth.getShardHealth( + primaryShard, + activeShardsPerShardId, + shardRoutingCountPerShardId, + indexMetadata + ); + computeStatus = getIndexHealthStatus(shardHealth, computeStatus); } - ClusterHealthStatus shardHealth = ClusterShardHealth.getShardHealth( - primaryShard, - activeShardsPerShardId, - shardRoutingCountPerShardId - ); - computeStatus = getIndexHealthStatus(shardHealth, computeStatus); } } diff --git a/server/src/main/java/org/opensearch/cluster/health/ClusterShardHealth.java b/server/src/main/java/org/opensearch/cluster/health/ClusterShardHealth.java index ace4537a5e291..15f255d78ae0d 100644 --- a/server/src/main/java/org/opensearch/cluster/health/ClusterShardHealth.java +++ b/server/src/main/java/org/opensearch/cluster/health/ClusterShardHealth.java @@ -32,6 +32,7 @@ package org.opensearch.cluster.health; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.ShardRouting; @@ -113,7 +114,11 @@ public final class ClusterShardHealth implements Writeable, ToXContentFragment { private int delayedUnassignedShards; private final boolean primaryActive; - public ClusterShardHealth(final int shardId, final IndexShardRoutingTable shardRoutingTable) { + public ClusterShardHealth( + final int shardId, + final IndexShardRoutingTable shardRoutingTable, + final boolean isSearchOnlyClusterBlockEnabled + ) { this.shardId = shardId; int computeActiveShards = 0; int computeRelocatingShards = 0; @@ -126,7 +131,6 @@ public ClusterShardHealth(final int shardId, final IndexShardRoutingTable shardR if (shardRouting.active()) { computeActiveShards++; if (shardRouting.relocating()) { - // the shard is relocating, the one it is relocating to will be in initializing state, so we don't count it computeRelocatingShards++; } } else if (shardRouting.initializing()) { @@ -139,13 +143,22 @@ public ClusterShardHealth(final int shardId, final IndexShardRoutingTable shardR } } final ShardRouting primaryRouting = shardRoutingTable.primaryShard(); - this.status = getShardHealth(primaryRouting, computeActiveShards, shardRoutingTable.size()); + this.status = getShardHealth(primaryRouting, computeActiveShards, shardRoutingTable.size(), isSearchOnlyClusterBlockEnabled); this.activeShards = computeActiveShards; this.relocatingShards = computeRelocatingShards; this.initializingShards = computeInitializingShards; this.unassignedShards = computeUnassignedShards; this.delayedUnassignedShards = computeDelayedUnassignedShards; - this.primaryActive = primaryRouting.active(); + this.primaryActive = primaryRouting != null && primaryRouting.active(); + } + + // Original constructor can call the new one + public ClusterShardHealth(final int shardId, final IndexShardRoutingTable shardRoutingTable, final IndexMetadata indexMetadata) { + this( + shardId, + shardRoutingTable, + indexMetadata.getSettings().getAsBoolean(IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey(), false) + ); } public ClusterShardHealth(final StreamInput in) throws IOException { @@ -229,9 +242,40 @@ public void writeTo(final StreamOutput out) throws IOException { * Shard health is YELLOW when primary shard is active but at-least one replica shard is inactive. * Shard health is RED when the primary is not active. *

+ *

+ * In search-only mode (when {@code isSearchOnlyClusterBlockEnabled} is {@code true}): + *

+ *
    + *
  • Shard health is GREEN when all expected search replicas are active
  • + *
  • Shard health is YELLOW when some (but not all) search replicas are active
  • + *
  • Shard health is RED when no search replicas are active
  • + *
+ * + * @param primaryRouting the routing entry for the primary shard, may be null + * @param activeShards the number of active shards (primary and replicas) + * @param totalShards the total number of shards (primary and replicas) + * @param isSearchOnlyClusterBlockEnabled whether the index is in search-only mode + * @return the health status for the shard */ - public static ClusterHealthStatus getShardHealth(final ShardRouting primaryRouting, final int activeShards, final int totalShards) { - assert primaryRouting != null : "Primary shard routing can't be null"; + public static ClusterHealthStatus getShardHealth( + final ShardRouting primaryRouting, + final int activeShards, + final int totalShards, + final boolean isSearchOnlyClusterBlockEnabled + ) { + + if (primaryRouting == null) { + if (isSearchOnlyClusterBlockEnabled) { + if (activeShards == 0) { + return ClusterHealthStatus.RED; + } else { + return (activeShards < totalShards) ? ClusterHealthStatus.YELLOW : ClusterHealthStatus.GREEN; + } + } else { + return ClusterHealthStatus.RED; + } + } + if (primaryRouting.active()) { if (activeShards == totalShards) { return ClusterHealthStatus.GREEN; @@ -243,6 +287,35 @@ public static ClusterHealthStatus getShardHealth(final ShardRouting primaryRouti } } + /** + * Computes the shard health of an index. + *

+ * Shard health is GREEN when all primary and replica shards of the indices are active. + * Shard health is YELLOW when primary shard is active but at-least one replica shard is inactive. + * Shard health is RED when the primary is not active. + *

+ *

+ * In search-only mode (when {@link IndexMetadata#INDEX_BLOCKS_SEARCH_ONLY_SETTING} is enabled): + *

+ *
    + *
  • Shard health is GREEN when all expected search replicas are active
  • + *
  • Shard health is YELLOW when some (but not all) search replicas are active
  • + *
  • Shard health is RED when no search replicas are active
  • + *
+ */ + public static ClusterHealthStatus getShardHealth( + final ShardRouting primaryRouting, + final int activeShards, + final int totalShards, + final IndexMetadata indexMetadata + ) { + + boolean isSearchOnlyClusterBlockEnabled = indexMetadata.getSettings() + .getAsBoolean(IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey(), false); + + return getShardHealth(primaryRouting, activeShards, totalShards, isSearchOnlyClusterBlockEnabled); + } + /** * Checks if an inactive primary shard should cause the cluster health to go RED. *

diff --git a/server/src/main/java/org/opensearch/cluster/health/ClusterStateHealth.java b/server/src/main/java/org/opensearch/cluster/health/ClusterStateHealth.java index 5eeac822e7c3e..ea09555d9d537 100644 --- a/server/src/main/java/org/opensearch/cluster/health/ClusterStateHealth.java +++ b/server/src/main/java/org/opensearch/cluster/health/ClusterStateHealth.java @@ -93,6 +93,7 @@ public ClusterStateHealth(final ClusterState clusterState, final String[] concre numberOfDataNodes = clusterState.nodes().getDataNodes().size(); hasDiscoveredClusterManager = clusterState.nodes().getClusterManagerNodeId() != null; indices = new HashMap<>(); + for (String index : concreteIndices) { IndexRoutingTable indexRoutingTable = clusterState.routingTable().index(index); IndexMetadata indexMetadata = clusterState.metadata().index(index); @@ -101,7 +102,6 @@ public ClusterStateHealth(final ClusterState clusterState, final String[] concre } ClusterIndexHealth indexHealth = new ClusterIndexHealth(indexMetadata, indexRoutingTable); - indices.put(indexHealth.getIndex(), indexHealth); } @@ -123,6 +123,21 @@ public ClusterStateHealth(final ClusterState clusterState, final String[] concre computeStatus = getClusterHealthStatus(indexHealth, computeStatus); } + Map searchOnlyIndices = collectSearchOnlyIndices(clusterState, concreteIndices, indices); + + if (searchOnlyIndices.isEmpty() == false) { + for (ClusterIndexHealth indexHealth : searchOnlyIndices.values()) { + if (indexHealth.getStatus() == ClusterHealthStatus.RED) { + computeStatus = ClusterHealthStatus.RED; + break; + } + if (indexHealth.getUnassignedShards() > 0 && indexHealth.getActiveShards() == 0) { + computeStatus = ClusterHealthStatus.RED; + break; + } + } + } + if (clusterState.blocks().hasGlobalBlockWithStatus(RestStatus.SERVICE_UNAVAILABLE)) { computeStatus = ClusterHealthStatus.RED; } @@ -177,7 +192,12 @@ public ClusterStateHealth( continue; } - ClusterIndexHealth indexHealth = new ClusterIndexHealth(indexMetadata, indexRoutingTable, healthLevel); + ClusterHealthRequest.Level indexHealthLevel = healthLevel; + if (healthLevel == ClusterHealthRequest.Level.CLUSTER && isSearchOnlyClusterBlockEnabled(indexMetadata)) { + indexHealthLevel = ClusterHealthRequest.Level.SHARDS; + } + + ClusterIndexHealth indexHealth = new ClusterIndexHealth(indexMetadata, indexRoutingTable, indexHealthLevel); computeActivePrimaryShards += indexHealth.getActivePrimaryShards(); computeActiveShards += indexHealth.getActiveShards(); computeRelocatingShards += indexHealth.getRelocatingShards(); @@ -186,12 +206,25 @@ public ClusterStateHealth( computeDelayedUnassignedShards += indexHealth.getDelayedUnassignedShards(); computeStatus = getClusterHealthStatus(indexHealth, computeStatus); - if (isIndexOrShardLevelHealthRequired) { - // Store ClusterIndexHealth only when the health is requested at Index or Shard level + if (isIndexOrShardLevelHealthRequired + || (isSearchOnlyClusterBlockEnabled(indexMetadata) && indexHealthLevel == ClusterHealthRequest.Level.SHARDS)) { + // Store ClusterIndexHealth when: + // 1. Health is requested at Index or Shard level, OR + // 2. This is a search_only index we're examining at SHARDS level indices.put(indexHealth.getIndex(), indexHealth); } } + Map searchOnlyIndices = collectSearchOnlyIndices(clusterState, concreteIndices, indices); + if (searchOnlyIndices.isEmpty() == false) { + for (ClusterIndexHealth indexHealth : searchOnlyIndices.values()) { + if (indexHealth.getUnassignedShards() > 0 && indexHealth.getActiveShards() == 0) { + computeStatus = ClusterHealthStatus.RED; + break; + } + } + } + if (clusterState.blocks().hasGlobalBlockWithStatus(RestStatus.SERVICE_UNAVAILABLE)) { computeStatus = ClusterHealthStatus.RED; } @@ -219,6 +252,46 @@ public ClusterStateHealth( } } + /** + * Checks if an index has search-only mode enabled. + * + * @param indexMetadata The index metadata + * @return true if search-only mode is enabled, false otherwise + */ + private static boolean isSearchOnlyClusterBlockEnabled(IndexMetadata indexMetadata) { + return indexMetadata.getSettings().getAsBoolean(IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey(), false); + } + + /** + * Collects health information for search-only indices. + * + * @param clusterState The current cluster state + * @param concreteIndices Array of index names + * @param healthIndices Map of existing index health objects + * @return Map of index health objects for search-only indices + */ + private static Map collectSearchOnlyIndices( + final ClusterState clusterState, + final String[] concreteIndices, + final Map healthIndices + ) { + + Map searchOnlyIndices = new HashMap<>(); + for (String index : concreteIndices) { + IndexMetadata indexMetadata = clusterState.metadata().index(index); + if (indexMetadata == null) continue; + + if (isSearchOnlyClusterBlockEnabled(indexMetadata)) { + String indexName = indexMetadata.getIndex().getName(); + ClusterIndexHealth indexHealth = healthIndices.get(indexName); + if (indexHealth != null) { + searchOnlyIndices.put(indexName, indexHealth); + } + } + } + return searchOnlyIndices; + } + private static ClusterHealthStatus getClusterHealthStatus(ClusterIndexHealth indexHealth, ClusterHealthStatus computeStatus) { switch (indexHealth.getStatus()) { case RED: diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index 9005c830167f9..8782e06e68419 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -166,6 +166,24 @@ public class IndexMetadata implements Diffable, ToXContentFragmen EnumSet.of(ClusterBlockLevel.METADATA_WRITE, ClusterBlockLevel.WRITE) ); + // Block ID and block for scale operations (IDs 20-29 reserved for scaling) + public static final int INDEX_SEARCH_ONLY_BLOCK_ID = 20; + + /** + * Permanent cluster block applied to indices in search-only mode. + *

+ * This block prevents write operations to the index while allowing read operations. + */ + public static final ClusterBlock INDEX_SEARCH_ONLY_BLOCK = new ClusterBlock( + INDEX_SEARCH_ONLY_BLOCK_ID, + "index scaled down", + false, + false, + false, + RestStatus.FORBIDDEN, + EnumSet.of(ClusterBlockLevel.WRITE) + ); + /** * The state of the index. * @@ -507,7 +525,8 @@ public enum APIBlock implements Writeable { READ("read", INDEX_READ_BLOCK), WRITE("write", INDEX_WRITE_BLOCK), METADATA("metadata", INDEX_METADATA_BLOCK), - READ_ONLY_ALLOW_DELETE("read_only_allow_delete", INDEX_READ_ONLY_ALLOW_DELETE_BLOCK); + READ_ONLY_ALLOW_DELETE("read_only_allow_delete", INDEX_READ_ONLY_ALLOW_DELETE_BLOCK), + SEARCH_ONLY("search_only", INDEX_SEARCH_ONLY_BLOCK); final String name; final String settingName; @@ -576,6 +595,8 @@ public static APIBlock readFrom(StreamInput input) throws IOException { public static final String SETTING_READ_ONLY_ALLOW_DELETE = APIBlock.READ_ONLY_ALLOW_DELETE.settingName(); public static final Setting INDEX_BLOCKS_READ_ONLY_ALLOW_DELETE_SETTING = APIBlock.READ_ONLY_ALLOW_DELETE.setting(); + public static final Setting INDEX_BLOCKS_SEARCH_ONLY_SETTING = APIBlock.SEARCH_ONLY.setting(); + public static final String SETTING_VERSION_CREATED = "index.version.created"; public static final Setting SETTING_INDEX_VERSION_CREATED = Setting.versionSetting( diff --git a/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java index 08574dddc007c..d1bb689672cca 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java +++ b/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java @@ -122,11 +122,11 @@ public Index getIndex() { boolean validate(Metadata metadata) { // check index exists if (!metadata.hasIndex(index.getName())) { - throw new IllegalStateException(index + " exists in routing does not exists in metadata"); + throw new IllegalStateException(index + " exists in routing but does not exist in metadata"); } IndexMetadata indexMetadata = metadata.index(index.getName()); if (indexMetadata.getIndexUUID().equals(index.getUUID()) == false) { - throw new IllegalStateException(index.getName() + " exists in routing does not exists in metadata with the same uuid"); + throw new IllegalStateException(index.getName() + " exists in routing but does not exist in metadata with the same uuid"); } // check the number of shards @@ -141,27 +141,33 @@ boolean validate(Metadata metadata) { throw new IllegalStateException("Wrong number of shards in routing table, missing: " + expected); } - // check the replicas + boolean isSearchOnlyClusterBlockEnabled = indexMetadata.getSettings() + .getAsBoolean(IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey(), false); + for (IndexShardRoutingTable indexShardRoutingTable : this) { int routingNumberOfReplicas = indexShardRoutingTable.size() - 1; - if (routingNumberOfReplicas != indexMetadata.getNumberOfReplicas() + indexMetadata.getNumberOfSearchOnlyReplicas()) { + int expectedReplicas = indexMetadata.getNumberOfReplicas() + indexMetadata.getNumberOfSearchOnlyReplicas(); + + // Only throw if we are NOT in search-only mode. Otherwise, we ignore or log the mismatch. + if (routingNumberOfReplicas != expectedReplicas && isSearchOnlyClusterBlockEnabled == false) { throw new IllegalStateException( "Shard [" + indexShardRoutingTable.shardId().id() + "] routing table has wrong number of replicas, expected [" - + "Replicas: " + + "Replicas: " + indexMetadata.getNumberOfReplicas() - + "Search Replicas: " + + ", Search Replicas: " + indexMetadata.getNumberOfSearchOnlyReplicas() + "], got [" + routingNumberOfReplicas + "]" ); } + for (ShardRouting shardRouting : indexShardRoutingTable) { if (!shardRouting.index().equals(index)) { throw new IllegalStateException( - "shard routing has an index [" + shardRouting.index() + "] that is different " + "from the routing table" + "shard routing has an index [" + shardRouting.index() + "] that is different from the routing table" ); } final Set inSyncAllocationIds = indexMetadata.inSyncAllocationIds(shardRouting.id()); @@ -185,8 +191,8 @@ boolean validate(Metadata metadata) { throw new IllegalStateException( "a primary shard routing " + shardRouting - + " is a primary that is recovering from a stale primary has unexpected allocation ids in in-sync " - + "allocation set " + + " is a primary that is recovering " + + "from a stale primary but has unexpected allocation ids in the in-sync set " + inSyncAllocationIds ); } @@ -194,8 +200,8 @@ boolean validate(Metadata metadata) { throw new IllegalStateException( "a primary shard routing " + shardRouting - + " is a primary that is recovering from a known allocation id but has no corresponding entry in the in-sync " - + "allocation set " + + " is a primary that is recovering " + + "from a known allocation id but has no corresponding entry in the in-sync set " + inSyncAllocationIds ); } @@ -425,6 +431,7 @@ public Builder initializeAsNew(IndexMetadata indexMetadata) { /** * Initializes an existing index. */ + public Builder initializeAsRecovery(IndexMetadata indexMetadata) { return initializeEmpty(indexMetadata, new UnassignedInfo(UnassignedInfo.Reason.CLUSTER_RECOVERED, null)); } @@ -602,6 +609,9 @@ private Builder initializeAsRestore( return this; } + /** + * Initializes a new empty index, with an option to control if its from an API or not. + */ /** * Initializes a new empty index, with an option to control if its from an API or not. */ @@ -610,6 +620,34 @@ private Builder initializeEmpty(IndexMetadata indexMetadata, UnassignedInfo unas if (!shards.isEmpty()) { throw new IllegalStateException("trying to initialize an index with fresh shards, but already has shards created"); } + + // Check if search-only mode is enabled + boolean isSearchOnly = indexMetadata.getSettings().getAsBoolean(IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey(), false); + + // For search-only mode, only initialize search replicas + if (isSearchOnly) { + for (int shardNumber = 0; shardNumber < indexMetadata.getNumberOfShards(); shardNumber++) { + ShardId shardId = new ShardId(index, shardNumber); + IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId); + + // Add only search replicas + for (int i = 0; i < indexMetadata.getNumberOfSearchOnlyReplicas(); i++) { + indexShardRoutingBuilder.addShard( + ShardRouting.newUnassigned( + shardId, + false, + true, + RecoverySource.EmptyStoreRecoverySource.INSTANCE, + unassignedInfo + ) + ); + } + shards.put(shardNumber, indexShardRoutingBuilder.build()); + } + return this; + } + + // Standard initialization for non-search-only mode for (int shardNumber = 0; shardNumber < indexMetadata.getNumberOfShards(); shardNumber++) { ShardId shardId = new ShardId(index, shardNumber); final RecoverySource primaryRecoverySource; diff --git a/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java b/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java index 6db70cc5f4fc5..4d2232afd9a5e 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java @@ -128,7 +128,15 @@ public RoutingNodes(ClusterState clusterState, boolean readOnly) { // also fill replicaSet information for (final IndexRoutingTable indexRoutingTable : routingTable.indicesRouting().values()) { for (IndexShardRoutingTable indexShard : indexRoutingTable) { - assert indexShard.primary != null; + IndexMetadata idxMetadata = metadata.index(indexShard.shardId().getIndex()); + boolean isSearchOnlyClusterBlockEnabled = false; + if (idxMetadata != null) { + isSearchOnlyClusterBlockEnabled = idxMetadata.getSettings() + .getAsBoolean(IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey(), false); + } + if (isSearchOnlyClusterBlockEnabled == false) { + assert indexShard.primary != null : "Primary shard routing can't be null for non-search-only indices"; + } for (ShardRouting shard : indexShard) { // to get all the shards belonging to an index, including the replicas, // we define a replica set and keep track of it. A replica set is identified @@ -184,8 +192,18 @@ private void updateRecoveryCounts(final ShardRouting routing, final boolean incr final int howMany = increment ? 1 : -1; assert routing.initializing() : "routing must be initializing: " + routing; + + IndexMetadata idxMetadata = metadata.index(routing.index()); + boolean isSearchOnlyClusterBlockEnabled = false; + if (idxMetadata != null) { + isSearchOnlyClusterBlockEnabled = idxMetadata.getSettings() + .getAsBoolean(IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey(), false); + } + // TODO: check primary == null || primary.active() after all tests properly add ReplicaAfterPrimaryActiveAllocationDecider - assert primary == null || primary.assignedToNode() : "shard is initializing but its primary is not assigned to a node"; + if (isSearchOnlyClusterBlockEnabled == false) { + assert primary == null || primary.assignedToNode() : "shard is initializing but its primary is not assigned to a node"; + } // Primary shard routing, excluding the relocating primaries. if (routing.primary() && (primary == null || primary == routing)) { diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ReplicaAfterPrimaryActiveAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ReplicaAfterPrimaryActiveAllocationDecider.java index 52b89f5c403e8..c516b247ee0b0 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ReplicaAfterPrimaryActiveAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ReplicaAfterPrimaryActiveAllocationDecider.java @@ -32,6 +32,7 @@ package org.opensearch.cluster.routing.allocation.decider; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.allocation.RoutingAllocation; @@ -57,8 +58,17 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocat } ShardRouting primary = allocation.routingNodes().activePrimary(shardRouting.shardId()); if (primary == null) { - return allocation.decision(Decision.NO, NAME, "primary shard for this replica is not yet active"); + boolean isSearchOnlyClusterBlockEnabled = allocation.metadata() + .getIndexSafe(shardRouting.index()) + .getSettings() + .getAsBoolean(IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey(), false); + if (shardRouting.isSearchOnly() && isSearchOnlyClusterBlockEnabled) { + return allocation.decision(Decision.YES, NAME, "search only: both shard and index are marked search-only"); + } else { + return allocation.decision(Decision.NO, NAME, "primary shard for this replica is not yet active"); + } } + return allocation.decision(Decision.YES, NAME, "primary shard for this replica is already active"); } } diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index 3793b9b09e3b2..5353ad9776522 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -109,6 +109,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexMetadata.INDEX_BLOCKS_METADATA_SETTING, IndexMetadata.INDEX_BLOCKS_READ_ONLY_ALLOW_DELETE_SETTING, IndexMetadata.INDEX_PRIORITY_SETTING, + IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING, IndexMetadata.INDEX_DATA_PATH_SETTING, IndexMetadata.INDEX_FORMAT_SETTING, IndexMetadata.INDEX_HIDDEN_SETTING, diff --git a/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java b/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java index c30ee8479ac97..2844a378df571 100644 --- a/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java @@ -55,6 +55,7 @@ import org.opensearch.indices.store.TransportNodesListShardStoreMetadataHelper.StoreFilesMetadata; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; @@ -248,14 +249,30 @@ protected AllocateUnassignedDecision getAllocationDecision( final RoutingNodes routingNodes = allocation.routingNodes(); final boolean explain = allocation.debugDecision(); ShardRouting primaryShard = routingNodes.activePrimary(unassignedShard.shardId()); + if (primaryShard == null) { - assert explain : "primary should only be null here if we are in explain mode, so we didn't " - + "exit early when canBeAllocatedToAtLeastOneNode didn't return a YES decision"; - return AllocateUnassignedDecision.no( - UnassignedInfo.AllocationStatus.fromDecision(allocationDecision.v1().type()), - new ArrayList<>(allocationDecision.v2().values()) - ); + // Determine if the index is configured for search-only. + + if (unassignedShard.isSearchOnly()) { + boolean isSearchOnlyClusterBlockEnabled = allocation.metadata() + .getIndexSafe(unassignedShard.index()) + .getSettings() + .getAsBoolean(IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey(), false); + + if (isSearchOnlyClusterBlockEnabled) { + return getSearchReplicaAllocationDecision(unassignedShard, allocation); + } + } else { + // For non-search-only replicas, if there is no active primary we do not attempt an allocation. + assert explain : "primary should only be null here if we are in explain mode, so we didn't " + + "exit early when canBeAllocatedToAtLeastOneNode didn't return a YES decision"; + return AllocateUnassignedDecision.no( + UnassignedInfo.AllocationStatus.fromDecision(allocationDecision.v1().type()), + new ArrayList<>(allocationDecision.v2().values()) + ); + } } + assert primaryShard.currentNodeId() != null; final DiscoveryNode primaryNode = allocation.nodes().get(primaryShard.currentNodeId()); final StoreFilesMetadata primaryStore = findStore(primaryNode, nodeShardStores); @@ -331,6 +348,33 @@ protected AllocateUnassignedDecision getAllocationDecision( return AllocateUnassignedDecision.NOT_TAKEN; } + /** + * Handles allocation decisions for search-only replica shards + */ + private AllocateUnassignedDecision getSearchReplicaAllocationDecision(ShardRouting unassignedShard, RoutingAllocation allocation) { + // Obtain the collection of data nodes once + Collection dataNodes = allocation.nodes().getDataNodes().values(); + + // Use a stream to find the first candidate node where the allocation decider returns YES + DiscoveryNode selectedCandidate = dataNodes.stream() + .filter(candidate -> allocation.routingNodes().node(candidate.getId()) != null) + .filter(candidate -> { + RoutingNode node = allocation.routingNodes().node(candidate.getId()); + Decision decision = allocation.deciders().canAllocate(unassignedShard, node, allocation); + return decision.type() == Decision.Type.YES; + }) + .findFirst() + .orElse(null); + + // If a candidate was found, return a YES allocation decision + if (selectedCandidate != null) { + return AllocateUnassignedDecision.yes(selectedCandidate, null, new ArrayList<>(), false); + } + + // If there are no data nodes available, delay allocation + return AllocateUnassignedDecision.delayed(0L, 0L, null); + } + /** * Determines if the shard can be allocated on at least one node based on the allocation deciders. *

diff --git a/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java b/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java index 73a79a54ca588..9ddb84f58543e 100644 --- a/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java +++ b/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java @@ -49,7 +49,6 @@ import java.util.function.Function; import java.util.stream.Collectors; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; import static org.opensearch.common.util.IndexUtils.filterIndices; import static org.opensearch.repositories.blobstore.BlobStoreRepository.SYSTEM_REPOSITORY_SETTING; @@ -145,14 +144,13 @@ public RemoteRestoreResult restore( boolean metadataFromRemoteStore = (restoreClusterUUID == null || restoreClusterUUID.isEmpty() || restoreClusterUUID.isBlank()) == false; + if (metadataFromRemoteStore) { try { - // Restore with current cluster UUID will fail as same indices would be present in the cluster which we are trying to - // restore if (currentState.metadata().clusterUUID().equals(restoreClusterUUID)) { - throw new IllegalArgumentException("clusterUUID to restore from should be different from current cluster UUID"); + throw new IllegalArgumentException("Cluster UUID for restore must be different from the current cluster UUID."); } - logger.info("Restoring cluster state from remote store from cluster UUID : [{}]", restoreClusterUUID); + logger.info("Restoring cluster state from remote store for cluster UUID: [{}]", restoreClusterUUID); remoteState = remoteClusterStateService.getLatestClusterState( currentState.getClusterName().value(), restoreClusterUUID, @@ -170,13 +168,21 @@ public RemoteRestoreResult restore( indexNames, IndicesOptions.fromOptions(true, true, true, true) ); + for (String indexName : filteredIndices) { IndexMetadata indexMetadata = currentState.metadata().index(indexName); if (indexMetadata == null) { logger.warn("Index restore is not supported for non-existent index. Skipping: {}", indexName); - } else if (indexMetadata.getSettings().getAsBoolean(SETTING_REMOTE_STORE_ENABLED, false) == false) { - logger.warn("Remote store is not enabled for index: {}", indexName); - } else if (restoreAllShards && IndexMetadata.State.CLOSE.equals(indexMetadata.getState()) == false) { + continue; + } + boolean isSearchOnlyClusterBlockEnabled = indexMetadata.getSettings() + .getAsBoolean(IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey(), false); + if (isSearchOnlyClusterBlockEnabled) { + throw new IllegalArgumentException( + String.format(Locale.ROOT, "Cannot use _remotestore/_restore on search_only mode enabled index [%s].", indexName) + ); + } + if (restoreAllShards && IndexMetadata.State.CLOSE.equals(indexMetadata.getState()) == false) { throw new IllegalStateException( String.format( Locale.ROOT, @@ -184,9 +190,8 @@ public RemoteRestoreResult restore( indexName ) + " Close the existing index." ); - } else { - indexMetadataMap.put(indexName, new Tuple<>(false, indexMetadata)); } + indexMetadataMap.put(indexName, new Tuple<>(false, indexMetadata)); } } return executeRestore(currentState, indexMetadataMap, restoreAllShards, remoteState); diff --git a/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java b/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java index 30d9c362b6269..bb79fab32cf9f 100644 --- a/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java +++ b/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java @@ -67,13 +67,32 @@ public void getCheckpointMetadata( try (final GatedCloseable segmentInfosSnapshot = indexShard.getSegmentInfosSnapshot()) { final Version version = segmentInfosSnapshot.get().getCommitLuceneVersion(); final RemoteSegmentMetadata mdFile = getRemoteSegmentMetadata(); - // During initial recovery flow, the remote store might not - // have metadata as primary hasn't uploaded anything yet. - if (mdFile == null && indexShard.state().equals(IndexShardState.STARTED) == false) { - listener.onResponse(new CheckpointInfoResponse(checkpoint, Collections.emptyMap(), null)); - return; + + // Handle null metadata file case + if (mdFile == null) { + // During initial recovery flow, the remote store might not + // have metadata as primary hasn't uploaded anything yet. + if (indexShard.state().equals(IndexShardState.STARTED) == false) { + // Non-started shard during recovery + listener.onResponse(new CheckpointInfoResponse(checkpoint, Collections.emptyMap(), null)); + return; + } else if (indexShard.routingEntry().isSearchOnly()) { + // Allow search-only replicas to become active without metadata + logger.debug("Search-only replica proceeding without remote metadata: {}", indexShard.shardId()); + listener.onResponse( + new CheckpointInfoResponse(indexShard.getLatestReplicationCheckpoint(), Collections.emptyMap(), null) + ); + return; + } else { + // Regular replicas should not be active without metadata + listener.onFailure( + new IllegalStateException("Remote metadata file can't be null if shard is active: " + indexShard.shardId()) + ); + return; + } } - assert mdFile != null : "Remote metadata file can't be null if shard is active " + indexShard.state(); + + // Process metadata when it exists metadataMap = mdFile.getMetadata() .entrySet() .stream() diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicator.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicator.java index b8a5774c21c1f..c35898cfffe1e 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicator.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicator.java @@ -345,7 +345,7 @@ SegmentReplicationTarget get(ShardId shardId) { } ReplicationCheckpoint getPrimaryCheckpoint(ShardId shardId) { - return primaryCheckpoint.getOrDefault(shardId, ReplicationCheckpoint.empty(shardId)); + return primaryCheckpoint.get(shardId); } ReplicationCollection.ReplicationRef get(long id) { diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestScaleIndexAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestScaleIndexAction.java new file mode 100644 index 0000000000000..c36b3c16198c9 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestScaleIndexAction.java @@ -0,0 +1,88 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.admin.indices; + +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestToXContentListener; +import org.opensearch.transport.client.node.NodeClient; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static java.util.Arrays.asList; +import static java.util.Collections.unmodifiableList; +import static org.opensearch.rest.RestRequest.Method.POST; + +/** + * Rest action for scaling index operations + * + * @opensearch.internal + */ +public class RestScaleIndexAction extends BaseRestHandler { + + private static final String SEARCH_ONLY_FIELD = "search_only"; + + @Override + public List routes() { + return unmodifiableList(asList(new Route(POST, "/{index}/_scale"))); + } + + @Override + public String getName() { + return "search_only_index_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(final RestRequest request, NodeClient client) throws IOException { + String index = request.param("index"); + if (index == null || index.trim().isEmpty()) { + throw new IllegalArgumentException("index is required"); + } + + // Parse the request body first to get the scale down value + final boolean searchOnly = parseSearchOnlyValue(request); + + // Then use the final value in the lambda + return channel -> client.admin().indices().prepareScaleSearchOnly(index, searchOnly).execute(new RestToXContentListener<>(channel)); + } + + /** + * Parses and validates the search_only parameter from the request body. + */ + private boolean parseSearchOnlyValue(RestRequest request) { + try { + Map source; + try { + source = request.contentParser().map(); + } catch (Exception e) { + throw new IllegalArgumentException("Request body must be valid JSON", e); + } + for (String key : source.keySet()) { + if (SEARCH_ONLY_FIELD.equals(key) == false) { + throw new IllegalArgumentException("Unknown parameter [" + key + "]. Only [" + SEARCH_ONLY_FIELD + "] is allowed."); + } + } + if (source.containsKey(SEARCH_ONLY_FIELD) == false) { + throw new IllegalArgumentException("Parameter [" + SEARCH_ONLY_FIELD + "] is required"); + } + Object value = source.get(SEARCH_ONLY_FIELD); + if ((value instanceof Boolean) == false) { + throw new IllegalArgumentException("Parameter [" + SEARCH_ONLY_FIELD + "] must be a boolean (true or false)"); + } + return (Boolean) value; + } catch (Exception e) { + if (e instanceof IllegalArgumentException) { + throw e; + } + throw new IllegalArgumentException("Request body must be valid JSON", e); + } + } +} diff --git a/server/src/main/java/org/opensearch/transport/client/IndicesAdminClient.java b/server/src/main/java/org/opensearch/transport/client/IndicesAdminClient.java index 2beec71785d91..6b8d168ecbbda 100644 --- a/server/src/main/java/org/opensearch/transport/client/IndicesAdminClient.java +++ b/server/src/main/java/org/opensearch/transport/client/IndicesAdminClient.java @@ -92,6 +92,7 @@ import org.opensearch.action.admin.indices.rollover.RolloverRequest; import org.opensearch.action.admin.indices.rollover.RolloverRequestBuilder; import org.opensearch.action.admin.indices.rollover.RolloverResponse; +import org.opensearch.action.admin.indices.scale.searchonly.ScaleIndexRequestBuilder; import org.opensearch.action.admin.indices.segments.IndicesSegmentResponse; import org.opensearch.action.admin.indices.segments.IndicesSegmentsRequest; import org.opensearch.action.admin.indices.segments.IndicesSegmentsRequestBuilder; @@ -889,4 +890,13 @@ public interface IndicesAdminClient extends OpenSearchClient { /** Get ingestion state */ void getIngestionState(GetIngestionStateRequest request, ActionListener listener); + + /** + * Prepares a request to scale an index between normal and search-only modes. + * + * @param index The name of the index to scale + * @param searchOnly Whether to scale to search-only mode (true) or back to normal mode (false) + * @return The request builder configured with the specified scaling direction + */ + ScaleIndexRequestBuilder prepareScaleSearchOnly(String index, boolean searchOnly); } diff --git a/server/src/main/java/org/opensearch/transport/client/support/AbstractClient.java b/server/src/main/java/org/opensearch/transport/client/support/AbstractClient.java index 317613fd2a86e..9c408a82402b5 100644 --- a/server/src/main/java/org/opensearch/transport/client/support/AbstractClient.java +++ b/server/src/main/java/org/opensearch/transport/client/support/AbstractClient.java @@ -268,6 +268,7 @@ import org.opensearch.action.admin.indices.rollover.RolloverRequest; import org.opensearch.action.admin.indices.rollover.RolloverRequestBuilder; import org.opensearch.action.admin.indices.rollover.RolloverResponse; +import org.opensearch.action.admin.indices.scale.searchonly.ScaleIndexRequestBuilder; import org.opensearch.action.admin.indices.segments.IndicesSegmentResponse; import org.opensearch.action.admin.indices.segments.IndicesSegmentsAction; import org.opensearch.action.admin.indices.segments.IndicesSegmentsRequest; @@ -2150,6 +2151,10 @@ public void updateView(CreateViewAction.Request request, ActionListener updateView(CreateViewAction.Request request) { return execute(UpdateViewAction.INSTANCE, request); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexClusterStateBuilderTests.java b/server/src/test/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexClusterStateBuilderTests.java new file mode 100644 index 0000000000000..9e0ce2a339a66 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexClusterStateBuilderTests.java @@ -0,0 +1,169 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.scale.searchonly; + +import org.opensearch.Version; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlock; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.RecoverySource; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.UnassignedInfo; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.HashMap; +import java.util.Map; + +import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING; +import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_SEARCH_ONLY_BLOCK; +import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_SEARCH_ONLY_BLOCK_ID; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_CREATION_DATE; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_INDEX_UUID; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_VERSION_CREATED; +import static org.opensearch.cluster.routing.ShardRoutingState.UNASSIGNED; + +public class ScaleIndexClusterStateBuilderTests extends OpenSearchTestCase { + + private ScaleIndexClusterStateBuilder builder; + private ClusterState initialState; + private String testIndex; + private IndexMetadata indexMetadata; + + @Override + public void setUp() throws Exception { + super.setUp(); + builder = new ScaleIndexClusterStateBuilder(); + testIndex = "test_index"; + + // Create basic index metadata with segment replication enabled + Settings indexSettings = Settings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_INDEX_UUID, randomAlphaOfLength(8)) + .put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_NUMBER_OF_REPLICAS, 1) + .put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 1) // Add search replicas + .put(SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) // Enable segment replication + .put(SETTING_REMOTE_STORE_ENABLED, true) // Enable remote store + .put(SETTING_CREATION_DATE, System.currentTimeMillis()) + .build(); + + indexMetadata = IndexMetadata.builder(testIndex).settings(indexSettings).build(); + + // Create initial cluster state with routing table + Metadata metadata = Metadata.builder().put(indexMetadata, true).build(); + + initialState = ClusterState.builder(ClusterState.EMPTY_STATE) + .metadata(metadata) + .routingTable(RoutingTable.builder().addAsNew(indexMetadata).build()) + .build(); + } + + public void testBuildScaleDownState() { + Map blockedIndices = new HashMap<>(); + + // Execute scale down state build + ClusterState newState = builder.buildScaleDownState(initialState, testIndex, blockedIndices); + + // Verify block was added + assertTrue("Scale down block should be present", newState.blocks().hasIndexBlockWithId(testIndex, INDEX_SEARCH_ONLY_BLOCK_ID)); + + // Verify blocked indices map was updated + assertFalse("Blocked indices map should not be empty", blockedIndices.isEmpty()); + assertEquals("Should have one blocked index", 1, blockedIndices.size()); + assertTrue("Index should be in blocked indices map", blockedIndices.containsKey(indexMetadata.getIndex())); + } + + public void testBuildFinalScaleDownState() { + Map blockedIndices = new HashMap<>(); + ClusterState stateWithBlock = builder.buildScaleDownState(initialState, testIndex, blockedIndices); + + ClusterState finalState = builder.buildFinalScaleDownState(stateWithBlock, testIndex); + + // Verify blocks + assertFalse( + "Temporary block should be removed", + finalState.blocks().hasIndexBlock(testIndex, blockedIndices.get(indexMetadata.getIndex())) + ); + assertTrue("Search-only block should be present", finalState.blocks().hasIndexBlock(testIndex, INDEX_SEARCH_ONLY_BLOCK)); + + // Verify metadata was updated + IndexMetadata updatedMetadata = finalState.metadata().index(testIndex); + assertTrue( + "Index should be marked as search-only", + updatedMetadata.getSettings().getAsBoolean(INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey(), false) + ); + } + + public void testBuildScaleUpRoutingTable() { + // Prepare a proper search-only state + Settings scaleUpSettings = Settings.builder() + .put(indexMetadata.getSettings()) + .put(INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey(), true) + .build(); + + IndexMetadata searchOnlyMetadata = IndexMetadata.builder(indexMetadata).settings(scaleUpSettings).build(); + + // Create search-only shard routing + ShardRouting searchOnlyShard = ShardRouting.newUnassigned( + new ShardId(searchOnlyMetadata.getIndex(), 0), + false, // not primary + true, // search only + RecoverySource.EmptyStoreRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "test") + ); + + // Build routing table with search-only shard + IndexRoutingTable.Builder routingTableBuilder = new IndexRoutingTable.Builder(searchOnlyMetadata.getIndex()).addShard( + searchOnlyShard + ); + + ClusterState searchOnlyState = ClusterState.builder(initialState) + .metadata(Metadata.builder(initialState.metadata()).put(searchOnlyMetadata, true)) + .routingTable(RoutingTable.builder().add(routingTableBuilder.build()).build()) + .build(); + + // Execute scale up + RoutingTable newRoutingTable = builder.buildScaleUpRoutingTable(searchOnlyState, testIndex); + + // Verify routing table + IndexRoutingTable indexRoutingTable = newRoutingTable.index(testIndex); + assertNotNull("Index routing table should exist", indexRoutingTable); + + // Verify primary shard was added + boolean hasPrimary = indexRoutingTable.shardsWithState(UNASSIGNED).stream().anyMatch(ShardRouting::primary); + assertTrue("Should have an unassigned primary shard", hasPrimary); + + // Verify regular replicas were added (excluding search replicas) + long replicaCount = indexRoutingTable.shardsWithState(UNASSIGNED) + .stream() + .filter(shard -> !shard.primary() && !shard.isSearchOnly()) + .count(); + assertEquals("Should have correct number of replica shards", indexMetadata.getNumberOfReplicas(), replicaCount); + + // Verify search replicas were preserved + long searchReplicaCount = indexRoutingTable.shardsWithState(UNASSIGNED).stream().filter(ShardRouting::isSearchOnly).count(); + assertEquals("Should preserve search replicas", indexMetadata.getNumberOfSearchOnlyReplicas(), searchReplicaCount); + } + + public void testBuildFinalScaleDownStateWithInvalidIndex() { + expectThrows(IllegalStateException.class, () -> builder.buildFinalScaleDownState(initialState, "nonexistent_index")); + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexNodeRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexNodeRequestTests.java new file mode 100644 index 0000000000000..f2974c97f1e24 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexNodeRequestTests.java @@ -0,0 +1,100 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.scale.searchonly; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class ScaleIndexNodeRequestTests extends OpenSearchTestCase { + + public void testConstructorAndGetters() { + String indexName = "test_index"; + List shardIds = createTestShardIds(indexName, 3); + + ScaleIndexNodeRequest request = new ScaleIndexNodeRequest(indexName, shardIds); + + assertEquals("Index name should match", indexName, request.getIndex()); + assertEquals("Shard IDs should match", shardIds, request.getShardIds()); + } + + public void testSerializationRoundTrip() throws IOException { + String indexName = "test_index"; + List shardIds = createTestShardIds(indexName, 3); + + ScaleIndexNodeRequest originalRequest = new ScaleIndexNodeRequest(indexName, shardIds); + + BytesStreamOutput output = new BytesStreamOutput(); + originalRequest.writeTo(output); + + StreamInput input = output.bytes().streamInput(); + ScaleIndexNodeRequest deserializedRequest = new ScaleIndexNodeRequest(input); + + assertEquals("Index name should survive serialization", originalRequest.getIndex(), deserializedRequest.getIndex()); + assertEquals("Shard IDs should survive serialization", originalRequest.getShardIds(), deserializedRequest.getShardIds()); + } + + public void testSerializationWithEmptyShardList() throws IOException { + String indexName = "test_index"; + List emptyShardIds = new ArrayList<>(); + + ScaleIndexNodeRequest originalRequest = new ScaleIndexNodeRequest(indexName, emptyShardIds); + + BytesStreamOutput output = new BytesStreamOutput(); + originalRequest.writeTo(output); + + StreamInput input = output.bytes().streamInput(); + ScaleIndexNodeRequest deserializedRequest = new ScaleIndexNodeRequest(input); + + assertEquals("Index name should survive serialization", originalRequest.getIndex(), deserializedRequest.getIndex()); + assertTrue("Empty shard list should survive serialization", deserializedRequest.getShardIds().isEmpty()); + } + + public void testSerializationWithMultipleShards() throws IOException { + String indexName = "test_index"; + List shardIds = createTestShardIds(indexName, 5); + + ScaleIndexNodeRequest originalRequest = new ScaleIndexNodeRequest(indexName, shardIds); + + BytesStreamOutput output = new BytesStreamOutput(); + originalRequest.writeTo(output); + + StreamInput input = output.bytes().streamInput(); + ScaleIndexNodeRequest deserializedRequest = new ScaleIndexNodeRequest(input); + + assertEquals( + "Should have correct number of shards after deserialization", + shardIds.size(), + deserializedRequest.getShardIds().size() + ); + + for (int i = 0; i < shardIds.size(); i++) { + ShardId original = shardIds.get(i); + ShardId deserialized = deserializedRequest.getShardIds().get(i); + + assertEquals("Shard ID should match", original.id(), deserialized.id()); + assertEquals("Index name should match", original.getIndexName(), deserialized.getIndexName()); + } + } + + private List createTestShardIds(String indexName, int count) { + List shardIds = new ArrayList<>(count); + Index index = new Index(indexName, "uuid"); + for (int i = 0; i < count; i++) { + shardIds.add(new ShardId(index, i)); + } + return shardIds; + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexOperationValidatorTests.java b/server/src/test/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexOperationValidatorTests.java new file mode 100644 index 0000000000000..71976f17499df --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexOperationValidatorTests.java @@ -0,0 +1,159 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.scale.searchonly; + +import org.opensearch.Version; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.test.OpenSearchTestCase; + +import org.mockito.ArgumentMatcher; +import org.mockito.Mockito; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.argThat; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; + +public class ScaleIndexOperationValidatorTests extends OpenSearchTestCase { + + private ScaleIndexOperationValidator validator; + private ActionListener listener; + + @Override + public void setUp() throws Exception { + super.setUp(); + validator = new ScaleIndexOperationValidator(); + // Create a mock listener so we can verify onFailure is called with the expected exception. + listener = Mockito.mock(ActionListener.class); + } + + public void testValidateScalePrerequisites_NullIndexMetadata() { + // When index metadata is null, validation should fail. + boolean result = validator.validateScalePrerequisites(null, "test-index", listener, true); + assertFalse(result); + verify(listener).onFailure(argThat(new ExceptionMatcher("Index [test-index] not found"))); + } + + public void testValidateScalePrerequisites_ScaleDown_AlreadySearchOnly() { + // For scale-down, if the index is already marked as search-only, validation should fail. + Settings settings = Settings.builder().put(IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey(), true).build(); + IndexMetadata indexMetadata = createTestIndexMetadata("test-index", settings, 1); + boolean result = validator.validateScalePrerequisites(indexMetadata, "test-index", listener, true); + assertFalse(result); + verify(listener).onFailure(argThat(new ExceptionMatcher("already in search-only mode"))); + } + + public void testValidateScalePrerequisites_ScaleDown_NoSearchOnlyReplicas() { + // If there are zero search-only replicas, validation should fail. + Settings settings = Settings.builder() + .put(IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey(), false) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, "SEGMENT") + .build(); + // Pass zero for the number of search-only replicas. + IndexMetadata indexMetadata = createTestIndexMetadata("test-index", settings, 0); + boolean result = validator.validateScalePrerequisites(indexMetadata, "test-index", listener, true); + assertFalse(result); + verify(listener).onFailure(argThat(new ExceptionMatcher("Cannot scale to zero without search replicas"))); + } + + public void testValidateScalePrerequisites_ScaleDown_RemoteStoreNotEnabled() { + // If remote store is not enabled, validation should fail. + Settings settings = Settings.builder() + .put(IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey(), false) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, false) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, "SEGMENT") + .build(); + IndexMetadata indexMetadata = createTestIndexMetadata("test-index", settings, 1); + boolean result = validator.validateScalePrerequisites(indexMetadata, "test-index", listener, true); + assertFalse(result); + verify(listener).onFailure(argThat(new ExceptionMatcher(IndexMetadata.SETTING_REMOTE_STORE_ENABLED))); + } + + public void testValidateScalePrerequisites_ScaleDown_InvalidReplicationType() { + // If the replication type is not SEGMENT, validation should fail. + Settings settings = Settings.builder() + .put(IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey(), false) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, "OTHER") + .build(); + IndexMetadata indexMetadata = createTestIndexMetadata("test-index", settings, 1); + boolean result = validator.validateScalePrerequisites(indexMetadata, "test-index", listener, true); + assertFalse(result); + verify(listener).onFailure(argThat(new ExceptionMatcher("segment replication must be enabled"))); + } + + public void testValidateScalePrerequisites_ScaleDown_Valid() { + // All prerequisites for scaling down are met. + Settings settings = Settings.builder() + .put(IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey(), false) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, "SEGMENT") + .build(); + IndexMetadata indexMetadata = createTestIndexMetadata("test-index", settings, 1); + boolean result = validator.validateScalePrerequisites(indexMetadata, "test-index", listener, true); + assertTrue(result); + verify(listener, never()).onFailure(any()); + } + + public void testValidateScalePrerequisites_ScaleUp_NotSearchOnly() { + // For scale-up, the index must be in search-only mode. + Settings settings = Settings.builder().put(IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey(), false).build(); + IndexMetadata indexMetadata = createTestIndexMetadata("test-index", settings, 1); + boolean result = validator.validateScalePrerequisites(indexMetadata, "test-index", listener, false); + assertFalse(result); + verify(listener).onFailure(argThat(new ExceptionMatcher("not in search-only mode"))); + } + + public void testValidateScalePrerequisites_ScaleUp_Valid() { + // Valid scale-up: the index is in search-only mode. + Settings settings = Settings.builder().put(IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey(), true).build(); + IndexMetadata indexMetadata = createTestIndexMetadata("test-index", settings, 1); + boolean result = validator.validateScalePrerequisites(indexMetadata, "test-index", listener, false); + assertTrue(result); + verify(listener, never()).onFailure(any()); + } + + /** + * Helper method to create a dummy IndexMetadata. + * Adjust this helper to match your actual IndexMetadata builder. + */ + private IndexMetadata createTestIndexMetadata(String indexName, Settings settings, int searchOnlyReplicas) { + Settings updatedSettings = Settings.builder() + .put(settings) + // Add the required index version setting. You can use a hardcoded value or Version.CURRENT.toString() + .put("index.version.created", Version.CURRENT) + .build(); + return IndexMetadata.builder(indexName) + .settings(updatedSettings) + .numberOfShards(1) + .numberOfReplicas(1) + .numberOfSearchReplicas(searchOnlyReplicas) + .build(); + } + + /** + * A custom ArgumentMatcher to check that an exception’s message contains a given substring. + */ + private static class ExceptionMatcher implements ArgumentMatcher { + private final String substring; + + ExceptionMatcher(String substring) { + this.substring = substring; + } + + @Override + public boolean matches(Exception e) { + return e != null && e.getMessage() != null && e.getMessage().contains(substring); + } + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexRequestTests.java new file mode 100644 index 0000000000000..c34de67290edf --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexRequestTests.java @@ -0,0 +1,56 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.scale.searchonly; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class ScaleIndexRequestTests extends OpenSearchTestCase { + + public void testSerialization() throws IOException { + ScaleIndexRequest request = new ScaleIndexRequest("test_index", true); + + BytesStreamOutput out = new BytesStreamOutput(); + request.writeTo(out); + + StreamInput in = out.bytes().streamInput(); + ScaleIndexRequest deserializedRequest = new ScaleIndexRequest(in); + + assertEquals(request.getIndex(), deserializedRequest.getIndex()); + assertEquals(request.isScaleDown(), deserializedRequest.isScaleDown()); + } + + public void testValidation() { + ScaleIndexRequest request = new ScaleIndexRequest(null, true); + assertNotNull(request.validate()); + + request = new ScaleIndexRequest("", true); + assertNotNull(request.validate()); + + request = new ScaleIndexRequest(" ", true); + assertNotNull(request.validate()); + + request = new ScaleIndexRequest("test_index", true); + assertNull(request.validate()); + } + + public void testEquals() { + ScaleIndexRequest request1 = new ScaleIndexRequest("test_index", true); + ScaleIndexRequest request2 = new ScaleIndexRequest("test_index", true); + ScaleIndexRequest request3 = new ScaleIndexRequest("other_index", true); + ScaleIndexRequest request4 = new ScaleIndexRequest("test_index", false); + + assertEquals(request1, request2); + assertNotEquals(request1, request3); + assertNotEquals(request1, request4); + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexResponseTests.java b/server/src/test/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexResponseTests.java new file mode 100644 index 0000000000000..ba377e5e887be --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexResponseTests.java @@ -0,0 +1,234 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.scale.searchonly; + +import org.opensearch.Version; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.test.OpenSearchTestCase; + +import java.net.InetAddress; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Set; + +public class ScaleIndexResponseTests extends OpenSearchTestCase { + + private DiscoveryNode createTestNode() throws Exception { + return new DiscoveryNode( + "test_node", + "test_node_id", + new TransportAddress(InetAddress.getByName("127.0.0.1"), 9300), + Collections.emptyMap(), + Set.of(DiscoveryNodeRole.DATA_ROLE), + Version.CURRENT + ); + } + + public void testSuccessfulResponse() throws Exception { + // Create test node and responses with no failures + DiscoveryNode node = createTestNode(); + List shardResponses = new ArrayList<>(); + + // Add successful shard responses + shardResponses.add( + new ScaleIndexShardResponse( + new ShardId(new Index("test_index", "test_uuid"), 0), + false, // doesn't need sync + 0 // no uncommitted operations + ) + ); + + List nodeResponses = Collections.singletonList(new ScaleIndexNodeResponse(node, shardResponses)); + + ScaleIndexResponse response = new ScaleIndexResponse(nodeResponses); + + // Verify response state + assertFalse("Response should not have failures", response.hasFailures()); + assertNull("Failure reason should be null", response.buildFailureReason()); + } + + public void testResponseWithFailures() throws Exception { + DiscoveryNode node = createTestNode(); + List shardResponses = new ArrayList<>(); + + // Create an Index instance + Index index = new Index("test_index", "test_uuid"); + + // Add a failed shard response (needs sync) + ShardId shardId0 = new ShardId(index, 0); + shardResponses.add( + new ScaleIndexShardResponse( + shardId0, + true, // needs sync + 0 // no uncommitted operations + ) + ); + + // Add another failed shard response (has uncommitted operations) + ShardId shardId1 = new ShardId(index, 1); + shardResponses.add( + new ScaleIndexShardResponse( + shardId1, + false, // doesn't need sync + 5 // has uncommitted operations + ) + ); + + List nodeResponses = Collections.singletonList(new ScaleIndexNodeResponse(node, shardResponses)); + + ScaleIndexResponse response = new ScaleIndexResponse(nodeResponses); + + // Verify response state + assertTrue("Response should have failures", response.hasFailures()); + assertNotNull("Failure reason should not be null", response.buildFailureReason()); + String failureReason = response.buildFailureReason(); + + // Verify the exact shard IDs appear in the failure reason + assertTrue("Failure reason should mention shard 0", failureReason.contains("Shard " + shardId0)); + assertTrue("Failure reason should mention shard 1", failureReason.contains("Shard " + shardId1)); + assertTrue("Failure reason should mention sync needed", failureReason.contains("needs sync")); + assertTrue("Failure reason should mention uncommitted operations", failureReason.contains("has uncommitted operations")); + } + + public void testSerialization() throws Exception { + DiscoveryNode node = createTestNode(); + List shardResponses = new ArrayList<>(); + + // Add mixed success/failure responses + shardResponses.add( + new ScaleIndexShardResponse( + new ShardId(new Index("test_index", "test_uuid"), 0), + false, // doesn't need sync + 0 // no uncommitted operations + ) + ); + shardResponses.add( + new ScaleIndexShardResponse( + new ShardId(new Index("test_index", "test_uuid"), 1), + true, // needs sync + 3 // has uncommitted operations + ) + ); + + List nodeResponses = Collections.singletonList(new ScaleIndexNodeResponse(node, shardResponses)); + + ScaleIndexResponse originalResponse = new ScaleIndexResponse(nodeResponses); + + // Serialize + BytesStreamOutput output = new BytesStreamOutput(); + originalResponse.writeTo(output); + + // Deserialize - first read the node responses + StreamInput input = output.bytes().streamInput(); + List deserializedNodeResponses = input.readList(ScaleIndexNodeResponse::new); + ScaleIndexResponse deserializedResponse = new ScaleIndexResponse(deserializedNodeResponses); + + // Verify serialization preserved state + assertEquals("Failure state should match after serialization", originalResponse.hasFailures(), deserializedResponse.hasFailures()); + assertEquals( + "Failure reason should match after serialization", + originalResponse.buildFailureReason(), + deserializedResponse.buildFailureReason() + ); + } + + public void testToXContent() throws Exception { + DiscoveryNode node = createTestNode(); + List shardResponses = new ArrayList<>(); + + // Add a failed shard response + shardResponses.add( + new ScaleIndexShardResponse( + new ShardId(new Index("test_index", "test_uuid"), 0), + true, // needs sync + 2 // has uncommitted operations + ) + ); + + List nodeResponses = Collections.singletonList(new ScaleIndexNodeResponse(node, shardResponses)); + + ScaleIndexResponse response = new ScaleIndexResponse(nodeResponses); + + // Convert to XContent + XContentBuilder builder = XContentFactory.jsonBuilder(); + response.toXContent(builder, null); + String json = builder.toString(); + + // Verify XContent output contains only the fields defined in toXContent() + assertTrue("XContent should contain failure_reason field", json.contains("\"failure_reason\"")); + // The failure reason will contain details about the failure + assertTrue("XContent should contain failure details", json.contains("Shard") && json.contains("needs sync")); + } + + public void testEmptyResponse() throws Exception { + // Create response with empty node responses + ScaleIndexResponse response = new ScaleIndexResponse(Collections.emptyList()); + + // Verify empty response state + assertFalse("Empty response should not have failures", response.hasFailures()); + assertNull("Empty response should have null failure reason", response.buildFailureReason()); + } + + public void testMultiNodeResponse() throws Exception { + List nodeResponses = new ArrayList<>(); + + // Create two nodes + DiscoveryNode node1 = createTestNode(); + DiscoveryNode node2 = new DiscoveryNode( + "test_node2", + "test_node_id2", + new TransportAddress(InetAddress.getByName("127.0.0.2"), 9300), + Collections.emptyMap(), + Set.of(DiscoveryNodeRole.DATA_ROLE), + Version.CURRENT + ); + + // Create index and shards + Index index = new Index("test_index", "test_uuid"); + ShardId shardId0 = new ShardId(index, 0); + ShardId shardId1 = new ShardId(index, 1); + + // Add responses from both nodes + List shardResponses1 = Collections.singletonList( + new ScaleIndexShardResponse( + shardId0, + false, // doesn't need sync + 0 // no uncommitted operations + ) + ); + + List shardResponses2 = Collections.singletonList( + new ScaleIndexShardResponse( + shardId1, + true, // needs sync + 0 // no uncommitted operations + ) + ); + + nodeResponses.add(new ScaleIndexNodeResponse(node1, shardResponses1)); + nodeResponses.add(new ScaleIndexNodeResponse(node2, shardResponses2)); + + ScaleIndexResponse response = new ScaleIndexResponse(nodeResponses); + + // Verify multi-node response + assertTrue("Response should have failures due to node2", response.hasFailures()); + String failureReason = response.buildFailureReason(); + assertTrue("Failure reason should mention node2", failureReason.contains("test_node2")); + assertTrue("Failure reason should mention shard 1", failureReason.contains("Shard " + shardId1)); + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexShardResponseTests.java b/server/src/test/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexShardResponseTests.java new file mode 100644 index 0000000000000..6ec24514cfcf5 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexShardResponseTests.java @@ -0,0 +1,104 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.scale.searchonly; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class ScaleIndexShardResponseTests extends OpenSearchTestCase { + + public void testConstructorAndGetters() { + ShardId shardId = createTestShardId(); + boolean needsSync = randomBoolean(); + int uncommittedOps = randomIntBetween(0, 100); + + ScaleIndexShardResponse response = new ScaleIndexShardResponse(shardId, needsSync, uncommittedOps); + + assertEquals("Shard ID should match", shardId, response.getShardId()); + assertEquals("Needs sync flag should match", needsSync, response.needsSync()); + assertEquals("Uncommitted operations status should match", uncommittedOps > 0, response.hasUncommittedOperations()); + } + + public void testSerializationRoundTrip() throws IOException { + ShardId shardId = createTestShardId(); + boolean needsSync = randomBoolean(); + int uncommittedOps = randomIntBetween(0, 100); + + ScaleIndexShardResponse originalResponse = new ScaleIndexShardResponse(shardId, needsSync, uncommittedOps); + + BytesStreamOutput output = new BytesStreamOutput(); + originalResponse.writeTo(output); + + StreamInput input = output.bytes().streamInput(); + ScaleIndexShardResponse deserializedResponse = new ScaleIndexShardResponse(input); + + assertEquals("Shard ID should survive serialization", originalResponse.getShardId(), deserializedResponse.getShardId()); + assertEquals("Needs sync flag should survive serialization", originalResponse.needsSync(), deserializedResponse.needsSync()); + assertEquals( + "Uncommitted operations status should survive serialization", + originalResponse.hasUncommittedOperations(), + deserializedResponse.hasUncommittedOperations() + ); + } + + public void testZeroUncommittedOperations() { + ShardId shardId = createTestShardId(); + ScaleIndexShardResponse response = new ScaleIndexShardResponse(shardId, randomBoolean(), 0); + + assertFalse("Should report no uncommitted operations when count is 0", response.hasUncommittedOperations()); + } + + public void testNonZeroUncommittedOperations() { + ShardId shardId = createTestShardId(); + int uncommittedOps = randomIntBetween(1, 100); + ScaleIndexShardResponse response = new ScaleIndexShardResponse(shardId, randomBoolean(), uncommittedOps); + + assertTrue("Should report uncommitted operations when count is > 0", response.hasUncommittedOperations()); + } + + public void testSerializationWithExtremeValues() throws IOException { + ShardId shardId = createTestShardId(); + + // Test with Integer.MAX_VALUE uncommitted operations + ScaleIndexShardResponse originalResponse = new ScaleIndexShardResponse(shardId, true, Integer.MAX_VALUE); + + BytesStreamOutput output = new BytesStreamOutput(); + originalResponse.writeTo(output); + + StreamInput input = output.bytes().streamInput(); + ScaleIndexShardResponse deserializedResponse = new ScaleIndexShardResponse(input); + + assertTrue("Max value should be preserved and indicate uncommitted operations", deserializedResponse.hasUncommittedOperations()); + } + + public void testSerializationWithVariousShardIds() throws IOException { + // Test with different shard numbers + for (int shardNum : new int[] { 0, 1, 100, Integer.MAX_VALUE }) { + ShardId shardId = new ShardId(new Index("test_index", "uuid"), shardNum); + ScaleIndexShardResponse originalResponse = new ScaleIndexShardResponse(shardId, randomBoolean(), randomIntBetween(0, 100)); + + BytesStreamOutput output = new BytesStreamOutput(); + originalResponse.writeTo(output); + + StreamInput input = output.bytes().streamInput(); + ScaleIndexShardResponse deserializedResponse = new ScaleIndexShardResponse(input); + + assertEquals("Shard number should survive serialization", shardId.id(), deserializedResponse.getShardId().id()); + } + } + + private ShardId createTestShardId() { + return new ShardId(new Index("test_index", "uuid"), randomIntBetween(0, 10)); + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexShardSyncManagerTests.java b/server/src/test/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexShardSyncManagerTests.java new file mode 100644 index 0000000000000..9f40d24fbb992 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexShardSyncManagerTests.java @@ -0,0 +1,340 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.scale.searchonly; + +import org.opensearch.Version; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.IndexShardRoutingTable; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.cluster.routing.TestShardRouting; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.transport.TransportException; +import org.opensearch.transport.TransportResponseHandler; +import org.opensearch.transport.TransportService; + +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class ScaleIndexShardSyncManagerTests extends OpenSearchTestCase { + + private ClusterService clusterService; + private TransportService transportService; + private ScaleIndexShardSyncManager syncManager; + private final String transportActionName = "dummyAction"; + + @Override + public void setUp() throws Exception { + super.setUp(); + clusterService = mock(ClusterService.class); + transportService = mock(TransportService.class); + syncManager = new ScaleIndexShardSyncManager(clusterService, transportService, transportActionName); + } + + public void testSendShardSyncRequests_emptyPrimaryShards() { + ActionListener> listener = new ActionListener<>() { + @Override + public void onResponse(Collection responses) { + fail("Expected failure when primary shards map is empty"); + } + + @Override + public void onFailure(Exception e) { + assertTrue(e instanceof IllegalStateException); + assertEquals("No primary shards found for index test_index", e.getMessage()); + } + }; + syncManager.sendShardSyncRequests("test_index", Collections.emptyMap(), listener); + } + + public void testSendShardSyncRequests_nodeNotFound() { + // Prepare a mapping: one shard assigned to node "node1" + ShardId shardId = new ShardId(new Index("test_index", "uuid"), 0); + Map primaryShardsNodes = Collections.singletonMap(shardId, "node1"); + + // Set cluster state with empty discovery nodes so "node1" is missing. + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().build(); + ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build(); + when(clusterService.state()).thenReturn(clusterState); + + ActionListener> listener = new ActionListener<>() { + @Override + public void onResponse(Collection responses) { + fail("Expected failure due to missing node"); + } + + @Override + public void onFailure(Exception e) { + assertTrue(e.getMessage().contains("Node [node1] not found")); + } + }; + + syncManager.sendShardSyncRequests("test_index", primaryShardsNodes, listener); + } + + public void testSendShardSyncRequests_success() throws Exception { + // Prepare a mapping: one shard assigned to node "node1" + ShardId shardId = new ShardId(new Index("test_index", "uuid"), 0); + Map primaryShardsNodes = Collections.singletonMap(shardId, "node1"); + + // Build cluster state with discovery node "node1" + DiscoveryNode node = new DiscoveryNode("node1", buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(node).localNodeId("node1").build(); + ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build(); + when(clusterService.state()).thenReturn(clusterState); + + // Stub transportService.sendRequest to return a dummy response. + doAnswer(invocation -> { + TransportResponseHandler handler = invocation.getArgument(3); + handler.handleResponse(new ScaleIndexNodeResponse(node, Collections.emptyList())); + return null; + }).when(transportService) + .sendRequest( + any(DiscoveryNode.class), + eq(transportActionName), + any(ScaleIndexNodeRequest.class), + any(TransportResponseHandler.class) + ); + + CountDownLatch latch = new CountDownLatch(1); + AtomicReference> responseRef = new AtomicReference<>(); + AtomicReference exceptionRef = new AtomicReference<>(); + + ActionListener> listener = new ActionListener<>() { + @Override + public void onResponse(Collection responses) { + responseRef.set(responses); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + exceptionRef.set(e); + latch.countDown(); + } + }; + + syncManager.sendShardSyncRequests("test_index", primaryShardsNodes, listener); + assertTrue(latch.await(5, TimeUnit.SECONDS)); + assertNull(exceptionRef.get()); + Collection responses = responseRef.get(); + assertNotNull(responses); + // We expect one response since there's one node. + assertEquals(1, responses.size()); + } + + public void testSendNodeRequest_success() throws Exception { + DiscoveryNode node = new DiscoveryNode("node1", buildNewFakeTransportAddress(), Version.CURRENT); + String index = "test_index"; + List shards = Collections.singletonList(new ShardId(new Index("test_index", "uuid"), 0)); + + CountDownLatch latch = new CountDownLatch(1); + AtomicReference responseRef = new AtomicReference<>(); + + ActionListener listener = new ActionListener<>() { + @Override + public void onResponse(ScaleIndexNodeResponse response) { + responseRef.set(response); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + fail("Unexpected failure: " + e.getMessage()); + } + }; + + doAnswer(invocation -> { + TransportResponseHandler handler = invocation.getArgument(3); + handler.handleResponse(new ScaleIndexNodeResponse(node, Collections.emptyList())); + return null; + }).when(transportService) + .sendRequest(eq(node), eq(transportActionName), any(ScaleIndexNodeRequest.class), any(TransportResponseHandler.class)); + + syncManager.sendNodeRequest(node, index, shards, listener); + assertTrue(latch.await(5, TimeUnit.SECONDS)); + assertNotNull(responseRef.get()); + } + + public void testSendNodeRequest_failure() throws Exception { + DiscoveryNode node = new DiscoveryNode("node1", buildNewFakeTransportAddress(), Version.CURRENT); + String index = "test_index"; + List shards = Collections.singletonList(new ShardId(new Index("test_index", "uuid"), 0)); + + CountDownLatch latch = new CountDownLatch(1); + AtomicReference exceptionRef = new AtomicReference<>(); + + ActionListener listener = new ActionListener<>() { + @Override + public void onResponse(ScaleIndexNodeResponse response) { + fail("Expected failure"); + } + + @Override + public void onFailure(Exception e) { + exceptionRef.set(e); + latch.countDown(); + } + }; + + // Use a dummy Throwable as the cause instead of passing the node. + doAnswer(invocation -> { + TransportResponseHandler handler = invocation.getArgument(3); + handler.handleException(new TransportException("Test exception", new Exception("dummy cause"))); + return null; + }).when(transportService) + .sendRequest(eq(node), eq(transportActionName), any(ScaleIndexNodeRequest.class), any(TransportResponseHandler.class)); + + syncManager.sendNodeRequest(node, index, shards, listener); + assertTrue(latch.await(5, TimeUnit.SECONDS)); + assertNotNull(exceptionRef.get()); + assertTrue(exceptionRef.get() instanceof TransportException); + } + + public void testValidateNodeResponses_success() { + // Create a shard response with no failures. + ShardId shardId = new ShardId(new Index("test_index", "uuid"), 0); + ScaleIndexShardResponse shardResponse = new ScaleIndexShardResponse(shardId, false, 0); + ScaleIndexNodeResponse nodeResponse = new ScaleIndexNodeResponse( + new DiscoveryNode("node1", buildNewFakeTransportAddress(), Version.CURRENT), + Collections.singletonList(shardResponse) + ); + + List responses = Collections.singletonList(nodeResponse); + AtomicReference responseRef = new AtomicReference<>(); + AtomicReference exceptionRef = new AtomicReference<>(); + + syncManager.validateNodeResponses(responses, new ActionListener() { + @Override + public void onResponse(ScaleIndexResponse response) { + responseRef.set(response); + } + + @Override + public void onFailure(Exception e) { + exceptionRef.set(e); + } + }); + + assertNull(exceptionRef.get()); + assertNotNull(responseRef.get()); + } + + public void testValidateNodeResponses_failure_uncommitted() { + // Create a shard response indicating uncommitted operations. + ShardId shardId = new ShardId(new Index("test_index", "uuid"), 0); + ScaleIndexShardResponse shardResponse = new ScaleIndexShardResponse(shardId, false, 5); + ScaleIndexNodeResponse nodeResponse = new ScaleIndexNodeResponse( + new DiscoveryNode("node1", buildNewFakeTransportAddress(), Version.CURRENT), + Collections.singletonList(shardResponse) + ); + + List responses = Collections.singletonList(nodeResponse); + AtomicReference exceptionRef = new AtomicReference<>(); + + syncManager.validateNodeResponses(responses, new ActionListener() { + @Override + public void onResponse(ScaleIndexResponse response) { + fail("Expected failure due to uncommitted operations"); + } + + @Override + public void onFailure(Exception e) { + exceptionRef.set(e); + } + }); + + assertNotNull(exceptionRef.get()); + assertTrue(exceptionRef.get().getMessage().contains("uncommitted operations")); + } + + public void testValidateNodeResponses_failure_needsSync() { + // Create a shard response indicating that a shard needs sync. + ShardId shardId = new ShardId(new Index("test_index", "uuid"), 0); + ScaleIndexShardResponse shardResponse = new ScaleIndexShardResponse(shardId, true, 0); + ScaleIndexNodeResponse nodeResponse = new ScaleIndexNodeResponse( + new DiscoveryNode("node1", buildNewFakeTransportAddress(), Version.CURRENT), + Collections.singletonList(shardResponse) + ); + + List responses = Collections.singletonList(nodeResponse); + AtomicReference exceptionRef = new AtomicReference<>(); + + syncManager.validateNodeResponses(responses, new ActionListener() { + @Override + public void onResponse(ScaleIndexResponse response) { + fail("Expected failure due to sync needed"); + } + + @Override + public void onFailure(Exception e) { + exceptionRef.set(e); + } + }); + + assertNotNull(exceptionRef.get()); + assertTrue(exceptionRef.get().getMessage().contains("sync needed")); + } + + public void testGetPrimaryShardAssignments_withRouting() { + // Create index settings with an explicit uuid. + Settings indexSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put("index.uuid", "uuid") + .build(); + // Build IndexMetadata using the index name. The builder will pick up the uuid from the settings. + IndexMetadata indexMetadata = IndexMetadata.builder("test_index") + .settings(indexSettings) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + + // Build a minimal routing table using the same index name and uuid. + Index index = new Index("test_index", "uuid"); + ShardId shardId = new ShardId(index, 0); + ShardRouting primaryShardRouting = TestShardRouting.newShardRouting(shardId, "node1", true, ShardRoutingState.STARTED); + IndexShardRoutingTable shardRoutingTable = new IndexShardRoutingTable.Builder(shardId).addShard(primaryShardRouting).build(); + IndexRoutingTable indexRoutingTable = new IndexRoutingTable.Builder(index).addIndexShard(shardRoutingTable).build(); + RoutingTable routingTable = RoutingTable.builder().add(indexRoutingTable).build(); + + // Build a cluster state that contains the routing table. + ClusterState state = ClusterState.builder(new ClusterName("test")).routingTable(routingTable).build(); + + // Invoke the method under test. + Map assignments = syncManager.getPrimaryShardAssignments(indexMetadata, state); + // We expect one mapping: shard0 -> "node1" + assertEquals(1, assignments.size()); + // Construct the expected shard id using the same Index (name and uuid). + ShardId expectedShardId = new ShardId(new Index("test_index", "uuid"), 0); + assertEquals("node1", assignments.get(expectedShardId)); + } + +} diff --git a/server/src/test/java/org/opensearch/action/admin/indices/scale/searchonly/TransportScaleIndexActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/scale/searchonly/TransportScaleIndexActionTests.java new file mode 100644 index 0000000000000..c5a0e810db3b5 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/scale/searchonly/TransportScaleIndexActionTests.java @@ -0,0 +1,798 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.scale.searchonly; + +import org.opensearch.Version; +import org.opensearch.action.admin.indices.flush.FlushRequest; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.ClusterStateUpdateTask; +import org.opensearch.cluster.block.ClusterBlock; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.block.ClusterBlocks; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.IndexShardRoutingTable; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.cluster.routing.TestShardRouting; +import org.opensearch.cluster.routing.allocation.AllocationService; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.index.IndexService; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.translog.TranslogStats; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportChannel; +import org.opensearch.transport.TransportResponseHandler; +import org.opensearch.transport.TransportService; +import org.junit.After; +import org.junit.Before; + +import java.util.Collection; +import java.util.Collections; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import org.mockito.ArgumentCaptor; + +import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_SEARCH_ONLY_BLOCK_ID; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.clearInvocations; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class TransportScaleIndexActionTests extends OpenSearchTestCase { + + private TransportService transportService; + private ClusterService clusterService; + private AllocationService allocationService; + private IndicesService indicesService; + private ThreadPool threadPool; + private TransportScaleIndexAction action; + + @Before + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool("ScaleIndexActionTests"); + transportService = mock(TransportService.class); + clusterService = mock(ClusterService.class); + allocationService = mock(AllocationService.class); + indicesService = mock(IndicesService.class); + + action = new TransportScaleIndexAction( + transportService, + clusterService, + threadPool, + new ActionFilters(Collections.emptySet()), + new IndexNameExpressionResolver(new ThreadContext(Settings.EMPTY)), + allocationService, + indicesService + ); + + // Setup basic cluster state + ClusterState.Builder stateBuilder = ClusterState.builder(new ClusterName("test")); + stateBuilder.nodes(DiscoveryNodes.builder().build()); + when(clusterService.state()).thenReturn(stateBuilder.build()); + } + + @After + public void tearDown() throws Exception { + ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); + super.tearDown(); + } + + public void testScaleDownValidation() { + String indexName = "test_index"; + ScaleIndexRequest request = new ScaleIndexRequest(indexName, true); + + // Test validation when index doesn't exist + ClusterState state = createClusterStateWithoutIndex(indexName); + when(clusterService.state()).thenReturn(state); + + ActionListener listener = new ActionListener<>() { + @Override + public void onResponse(AcknowledgedResponse response) { + fail("Expected validation to fail"); + } + + @Override + public void onFailure(Exception e) { + assertTrue(e instanceof IllegalArgumentException); + assertEquals("Index [" + indexName + "] not found", e.getMessage()); + } + }; + + action.clusterManagerOperation(request, state, listener); + } + + public void testScaleDownWithSearchOnlyAlreadyEnabled() { + String indexName = "test_index"; + ScaleIndexRequest request = new ScaleIndexRequest(indexName, true); + + // Create cluster state with search-only already enabled + ClusterState state = createClusterStateWithSearchOnlyEnabled(indexName); + when(clusterService.state()).thenReturn(state); + + ActionListener listener = new ActionListener<>() { + @Override + public void onResponse(AcknowledgedResponse response) { + fail("Expected validation to fail"); + } + + @Override + public void onFailure(Exception e) { + assertTrue(e instanceof IllegalStateException); + assertEquals("Index [" + indexName + "] is already in search-only mode", e.getMessage()); + } + }; + + action.clusterManagerOperation(request, state, listener); + } + + public void testScaleUpValidation() { + String indexName = "test_index"; + ScaleIndexRequest request = new ScaleIndexRequest(indexName, false); + + // Test validation when index is not in search-only mode + ClusterState state = createClusterStateWithoutSearchOnly(indexName); + when(clusterService.state()).thenReturn(state); + + ActionListener listener = new ActionListener() { + @Override + public void onResponse(AcknowledgedResponse response) { + fail("Expected validation to fail"); + } + + @Override + public void onFailure(Exception e) { + assertTrue(e instanceof IllegalStateException); + assertEquals("Index [" + indexName + "] is not in search-only mode", e.getMessage()); + } + }; + + action.clusterManagerOperation(request, state, listener); + } + + private ClusterState createClusterStateWithoutIndex(String indexName) { + return ClusterState.builder(new ClusterName("test")).metadata(Metadata.builder().build()).build(); + } + + private ClusterState createClusterStateWithSearchOnlyEnabled(String indexName) { + Settings indexSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey(), true) + .build(); + + IndexMetadata indexMetadata = IndexMetadata.builder(indexName) + .settings(indexSettings) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + + return ClusterState.builder(new ClusterName("test")).metadata(Metadata.builder().put(indexMetadata, true).build()).build(); + } + + private ClusterState createClusterStateWithoutSearchOnly(String indexName) { + Settings indexSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey(), false) + .build(); + + IndexMetadata indexMetadata = IndexMetadata.builder(indexName) + .settings(indexSettings) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + + return ClusterState.builder(new ClusterName("test")).metadata(Metadata.builder().put(indexMetadata, true).build()).build(); + } + + public void testAddBlockClusterStateUpdateTask() { + String indexName = "test_index"; + ScaleIndexRequest request = new ScaleIndexRequest(indexName, true); + + // Create initial cluster state with necessary index metadata + Settings indexSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS, 1) + .build(); + + IndexMetadata indexMetadata = IndexMetadata.builder(indexName) + .settings(indexSettings) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + + ClusterState initialState = ClusterState.builder(new ClusterName("test")) + .metadata(Metadata.builder().put(indexMetadata, true).build()) + .build(); + + when(clusterService.state()).thenReturn(initialState); + + ActionListener listener = new ActionListener<>() { + @Override + public void onResponse(AcknowledgedResponse response) { + assertTrue("Expected block to be added successfully", response.isAcknowledged()); + } + + @Override + public void onFailure(Exception e) { + fail("Should not fail: " + e.getMessage()); + } + }; + + action.clusterManagerOperation(request, initialState, listener); + + // Verify that the appropriate block was added + verify(clusterService).submitStateUpdateTask(eq("add-block-index-to-scale " + indexName), any()); + } + + public void testFinalizeScaleDownTaskSimple() throws Exception { + String indexName = "test_index"; + ScaleIndexRequest request = new ScaleIndexRequest(indexName, true); + + // Create minimal index metadata that meets scale-down prerequisites. + Settings indexSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS, 1) + .build(); + IndexMetadata indexMetadata = IndexMetadata.builder(indexName) + .settings(indexSettings) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + + // Build a minimal routing table for the index. + Index index = indexMetadata.getIndex(); + ShardId shardId = new ShardId(index, 0); + ShardRouting primaryShardRouting = TestShardRouting.newShardRouting(shardId, "node1", true, ShardRoutingState.STARTED); + IndexShardRoutingTable shardRoutingTable = new IndexShardRoutingTable.Builder(shardId).addShard(primaryShardRouting).build(); + IndexRoutingTable indexRoutingTable = new IndexRoutingTable.Builder(index).addIndexShard(shardRoutingTable).build(); + RoutingTable routingTable = RoutingTable.builder().add(indexRoutingTable).build(); + + // Create a DiscoveryNode and include it in the cluster state's nodes. + DiscoveryNode node = new DiscoveryNode("node1", buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(node).localNodeId("node1").build(); + + // Build the complete cluster state with metadata, routing table, and nodes. + ClusterState initialState = ClusterState.builder(new ClusterName("test")) + .metadata(Metadata.builder().put(indexMetadata, true).build()) + .routingTable(routingTable) + .nodes(discoveryNodes) + .build(); + when(clusterService.state()).thenReturn(initialState); + + // Stub transportService.sendRequest so that any shard sync request immediately succeeds. + doAnswer(invocation -> { + TransportResponseHandler handler = invocation.getArgument(3); + handler.handleResponse(new ScaleIndexNodeResponse(node, Collections.emptyList())); + return null; + }).when(transportService) + .sendRequest( + any(DiscoveryNode.class), + eq(TransportScaleIndexAction.NAME), + any(ScaleIndexNodeRequest.class), + any(TransportResponseHandler.class) + ); + + // Execute the scale-down operation. + action.clusterManagerOperation(request, initialState, new ActionListener<>() { + @Override + public void onResponse(AcknowledgedResponse response) {} + + @Override + public void onFailure(Exception e) { + fail("Operation should not fail: " + e.getMessage()); + } + }); + + // Capture the add-block task submitted by the action. + ArgumentCaptor captor = ArgumentCaptor.forClass(ClusterStateUpdateTask.class); + verify(clusterService).submitStateUpdateTask(eq("add-block-index-to-scale " + indexName), captor.capture()); + ClusterStateUpdateTask addBlockTask = captor.getValue(); + + // Create a new cluster state that is different from initialState. + // For example, add a dummy block to the index. + ClusterBlocks.Builder blocksBuilder = ClusterBlocks.builder().blocks(initialState.blocks()); + blocksBuilder.addIndexBlock( + indexName, + new ClusterBlock(123, "dummy", false, false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE)) + ); + ClusterState newState = ClusterState.builder(initialState).blocks(blocksBuilder).build(); + + // Simulate the add-block task callback (with a changed state) to trigger finalize. + addBlockTask.clusterStateProcessed("test-source", initialState, newState); + + // Verify that the finalize-scale-down update task was submitted. + verify(clusterService).submitStateUpdateTask(eq("finalize-scale-down"), any(ClusterStateUpdateTask.class)); + } + + public void testScaleUpClusterStateUpdateTask() throws Exception { + String indexName = "test_index"; + ScaleIndexRequest request = new ScaleIndexRequest(indexName, false); + + // Create index metadata with search-only mode enabled. + Settings indexSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey(), true) + .build(); + IndexMetadata indexMetadata = IndexMetadata.builder(indexName) + .settings(indexSettings) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + + // Build a minimal routing table for the index. + Index index = indexMetadata.getIndex(); + ShardId shardId = new ShardId(index, 0); + // Create a dummy shard routing in STARTED state. + ShardRouting searchOnlyShardRouting = TestShardRouting.newShardRouting(shardId, "node1", true, ShardRoutingState.STARTED); + IndexShardRoutingTable shardRoutingTable = new IndexShardRoutingTable.Builder(shardId).addShard(searchOnlyShardRouting).build(); + IndexRoutingTable indexRoutingTable = new IndexRoutingTable.Builder(index).addIndexShard(shardRoutingTable).build(); + RoutingTable routingTable = RoutingTable.builder().add(indexRoutingTable).build(); + + // Build the complete cluster state with metadata and the routing table. + ClusterState initialState = ClusterState.builder(new ClusterName("test")) + .metadata(Metadata.builder().put(indexMetadata, true).build()) + .routingTable(routingTable) + .build(); + + // Stub allocationService.reroute to return a valid state. + ClusterState stateAfterReroute = ClusterState.builder(initialState).build(); + when(allocationService.reroute(any(ClusterState.class), anyString())).thenReturn(stateAfterReroute); + when(clusterService.state()).thenReturn(initialState); + + ActionListener listener = new ActionListener<>() { + @Override + public void onResponse(AcknowledgedResponse response) { + assertTrue("Expected scale up to complete successfully", response.isAcknowledged()); + } + + @Override + public void onFailure(Exception e) { + fail("Should not fail: " + e.getMessage()); + } + }; + + // Trigger the scale-up operation. + action.clusterManagerOperation(request, initialState, listener); + + // Capture the update task submitted for scaling up. + ArgumentCaptor captor = ArgumentCaptor.forClass(ClusterStateUpdateTask.class); + verify(clusterService).submitStateUpdateTask(eq("scale-up-index"), captor.capture()); + ClusterStateUpdateTask scaleUpTask = captor.getValue(); + + // Manually simulate execution of the scale-up task. + ClusterState updatedState = scaleUpTask.execute(initialState); + scaleUpTask.clusterStateProcessed("test-source", initialState, updatedState); + + // Verify that allocationService.reroute was called with the expected reason. + verify(allocationService).reroute(any(ClusterState.class), eq("restore indexing shards")); + } + + public void testScaleDownWithMissingIndex() { + String indexName = "non_existent_index"; + ScaleIndexRequest request = new ScaleIndexRequest(indexName, true); + + ClusterState state = ClusterState.builder(new ClusterName("test")).metadata(Metadata.builder().build()).build(); + + ActionListener listener = new ActionListener<>() { + @Override + public void onResponse(AcknowledgedResponse response) { + fail("Should fail for missing index"); + } + + @Override + public void onFailure(Exception e) { + assertTrue(e instanceof IllegalArgumentException); + assertEquals("Index [" + indexName + "] not found", e.getMessage()); + } + }; + + action.clusterManagerOperation(request, state, listener); + } + + public void testScaleUpWithSearchOnlyNotEnabled() { + String indexName = "test_index"; + ScaleIndexRequest request = new ScaleIndexRequest(indexName, false); + + Settings indexSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey(), false) + .build(); + + IndexMetadata indexMetadata = IndexMetadata.builder(indexName) + .settings(indexSettings) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + + ClusterState state = ClusterState.builder(new ClusterName("test")) + .metadata(Metadata.builder().put(indexMetadata, true).build()) + .build(); + + ActionListener listener = new ActionListener<>() { + @Override + public void onResponse(AcknowledgedResponse response) { + fail("Should fail when search-only is not enabled"); + } + + @Override + public void onFailure(Exception e) { + assertTrue(e instanceof IllegalStateException); + assertEquals("Index [" + indexName + "] is not in search-only mode", e.getMessage()); + } + }; + action.clusterManagerOperation(request, state, listener); + } + + public void testHandleShardSyncRequest() throws Exception { + // Mock dependencies + TransportService transportService = mock(TransportService.class); + ClusterService clusterService = mock(ClusterService.class); + IndicesService indicesService = mock(IndicesService.class); + TransportChannel channel = mock(TransportChannel.class); + DiscoveryNode localNode = new DiscoveryNode("local", buildNewFakeTransportAddress(), Version.CURRENT); + + // Use a real ThreadPool but with a controlled executor + ThreadPool threadPool = new TestThreadPool("testHandleShardSyncRequest"); + + try { + // Create test data + String indexName = "test_index"; + Index index = new Index(indexName, "_na_"); + ShardId shardId = new ShardId(index, 0); + List shardIds = Collections.singletonList(shardId); + ScaleIndexNodeRequest request = new ScaleIndexNodeRequest(indexName, shardIds); + + // Mock cluster state + ClusterState clusterState = mock(ClusterState.class); + Metadata metadata = mock(Metadata.class); + IndexMetadata indexMetadata = mock(IndexMetadata.class); + when(clusterService.state()).thenReturn(clusterState); + when(clusterState.metadata()).thenReturn(metadata); + when(metadata.index(indexName)).thenReturn(indexMetadata); + when(indexMetadata.getIndex()).thenReturn(index); + when(clusterService.localNode()).thenReturn(localNode); + + // Mock index service and shard + IndexService indexService = mock(IndexService.class); + IndexShard indexShard = mock(IndexShard.class); + TranslogStats translogStats = mock(TranslogStats.class); + + when(indicesService.indexService(any(Index.class))).thenReturn(indexService); + when(indexService.getShardOrNull(anyInt())).thenReturn(indexShard); + when(indexShard.shardId()).thenReturn(shardId); + when(indexShard.translogStats()).thenReturn(translogStats); + when(translogStats.getUncommittedOperations()).thenReturn(0); + when(indexShard.isSyncNeeded()).thenReturn(false); + + // Mock shard routing to return a primary routing + ShardRouting shardRouting = mock(ShardRouting.class); + when(shardRouting.primary()).thenReturn(true); + when(indexShard.routingEntry()).thenReturn(shardRouting); + + // Mock the acquireAllPrimaryOperationsPermits method to immediately call the listener + doAnswer(invocation -> { + ActionListener listener = invocation.getArgument(0); + Releasable releasable = mock(Releasable.class); + listener.onResponse(releasable); + return null; + }).when(indexShard).acquireAllPrimaryOperationsPermits(any(ActionListener.class), any(TimeValue.class)); + + // Create action instance with the real ThreadPool + TransportScaleIndexAction action = new TransportScaleIndexAction( + transportService, + clusterService, + threadPool, + new ActionFilters(Collections.emptySet()), + new IndexNameExpressionResolver(new ThreadContext(Settings.EMPTY)), + allocationService, + indicesService + ); + + // Call handleShardSyncRequest + action.handleShardSyncRequest(request, channel); + + // Wait a short time for the async task to execute + assertBusy(() -> { verify(channel).sendResponse(any(ScaleIndexNodeResponse.class)); }, 5, TimeUnit.SECONDS); + } finally { + ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); + } + } + + public void testSyncSingleShard() throws Exception { + IndexShard shard = mock(IndexShard.class); + ShardId shardId = new ShardId(new Index("test_index", "_na_"), 0); + TranslogStats translogStats = mock(TranslogStats.class); + + when(shard.shardId()).thenReturn(shardId); + when(shard.translogStats()).thenReturn(translogStats); + + TransportScaleIndexAction action = new TransportScaleIndexAction( + transportService, + clusterService, + threadPool, + new ActionFilters(Collections.emptySet()), + new IndexNameExpressionResolver(new ThreadContext(Settings.EMPTY)), + allocationService, + indicesService + ); + + when(translogStats.getUncommittedOperations()).thenReturn(0); + when(shard.isSyncNeeded()).thenReturn(false); + + final AtomicReference successResponseRef = new AtomicReference<>(); + final AtomicReference successExceptionRef = new AtomicReference<>(); + final CountDownLatch successLatch = new CountDownLatch(1); + + action.syncSingleShard(shard, new ActionListener<>() { + @Override + public void onResponse(ScaleIndexShardResponse response) { + successResponseRef.set(response); + successLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + successExceptionRef.set(e); + successLatch.countDown(); + } + }); + + ArgumentCaptor successPermitCaptor = ArgumentCaptor.forClass(ActionListener.class); + verify(shard).acquireAllPrimaryOperationsPermits(successPermitCaptor.capture(), any(TimeValue.class)); + successPermitCaptor.getValue().onResponse(mock(Releasable.class)); + + assertTrue(successLatch.await(1, TimeUnit.SECONDS)); + + assertNull("No exception expected", successExceptionRef.get()); + assertNotNull("Response should not be null", successResponseRef.get()); + assertFalse("Response should not indicate sync needed", successResponseRef.get().needsSync()); + assertFalse("Response should not indicate uncommitted operations", successResponseRef.get().hasUncommittedOperations()); + + verify(shard, times(1)).sync(); + verify(shard, times(1)).flush(any(FlushRequest.class)); + verify(shard, times(1)).waitForRemoteStoreSync(); + + clearInvocations(shard); + + when(translogStats.getUncommittedOperations()).thenReturn(5); + when(shard.isSyncNeeded()).thenReturn(true); + + final AtomicReference responseRef = new AtomicReference<>(); + final AtomicReference exceptionRef = new AtomicReference<>(); + final CountDownLatch latch = new CountDownLatch(1); + + action.syncSingleShard(shard, new ActionListener<>() { + @Override + public void onResponse(ScaleIndexShardResponse response) { + responseRef.set(response); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + exceptionRef.set(e); + latch.countDown(); + } + }); + + ArgumentCaptor permitListenerCaptor = ArgumentCaptor.forClass(ActionListener.class); + verify(shard).acquireAllPrimaryOperationsPermits(permitListenerCaptor.capture(), any(TimeValue.class)); + permitListenerCaptor.getValue().onResponse(mock(Releasable.class)); + + assertTrue(latch.await(1, TimeUnit.SECONDS)); + + assertNull("No exception expected", exceptionRef.get()); + assertNotNull("Response should not be null", responseRef.get()); + assertTrue("Response should indicate uncommitted operations", responseRef.get().hasUncommittedOperations()); + assertTrue("Response should indicate sync needed", responseRef.get().needsSync()); + + verify(shard, times(1)).sync(); + verify(shard, times(1)).flush(any(FlushRequest.class)); + verify(shard, times(1)).waitForRemoteStoreSync(); + } + + public void testCheckBlock() { + // Mock dependencies + TransportScaleIndexAction action = new TransportScaleIndexAction( + transportService, + clusterService, + threadPool, + new ActionFilters(Collections.emptySet()), + new IndexNameExpressionResolver(new ThreadContext(Settings.EMPTY)), + allocationService, + indicesService + ); + + // Create test data + String indexName = "test_index"; + ScaleIndexRequest request = new ScaleIndexRequest(indexName, true); + + // Create index metadata + Settings indexSettings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).build(); + IndexMetadata indexMetadata = IndexMetadata.builder(indexName) + .settings(indexSettings) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + + // Test with no blocks + ClusterState state = ClusterState.builder(new ClusterName("test")) + .metadata(Metadata.builder().put(indexMetadata, true)) + .blocks(ClusterBlocks.builder().build()) + .build(); + assertNull(action.checkBlock(request, state)); + + // Test with metadata write block + ClusterBlocks.Builder blocksBuilder = ClusterBlocks.builder(); + ClusterBlock metadataBlock = new ClusterBlock( + 1, + "test block", + false, + false, + false, + RestStatus.FORBIDDEN, + EnumSet.of(ClusterBlockLevel.METADATA_WRITE) + ); + blocksBuilder.addGlobalBlock(metadataBlock); + state = ClusterState.builder(new ClusterName("test")) + .metadata(Metadata.builder().put(indexMetadata, true)) + .blocks(blocksBuilder.build()) + .build(); + + ClusterBlockException exception = action.checkBlock(request, state); + assertNotNull(exception); + assertTrue(exception.blocks().contains(metadataBlock)); + } + + public void testAddBlockClusterStateUpdateTaskExecute() { + // Mock dependencies + String indexName = "test_index"; + ActionListener listener = mock(ActionListener.class); + Map blockedIndices = new HashMap<>(); + + // Create initial cluster state + Settings indexSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS, 1) + .build(); + + IndexMetadata indexMetadata = IndexMetadata.builder(indexName) + .settings(indexSettings) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + + ClusterState initialState = ClusterState.builder(new ClusterName("test")) + .metadata(Metadata.builder().put(indexMetadata, true).build()) + .build(); + + // Create action and task + TransportScaleIndexAction action = new TransportScaleIndexAction( + transportService, + clusterService, + threadPool, + new ActionFilters(Collections.emptySet()), + new IndexNameExpressionResolver(new ThreadContext(Settings.EMPTY)), + allocationService, + indicesService + ); + + TransportScaleIndexAction.AddBlockClusterStateUpdateTask task = action.new AddBlockClusterStateUpdateTask( + indexName, blockedIndices, listener + ); + + // Test successful execution + ClusterState newState = task.execute(initialState); + assertNotEquals(initialState, newState); + + // Verify that a block with the correct ID was added + Collection indexBlocks = newState.blocks().indices().get(indexName); + assertNotNull("Index blocks should not be null", indexBlocks); + assertTrue("Index should have at least one block", !indexBlocks.isEmpty()); + boolean hasBlockWithCorrectId = indexBlocks.stream().anyMatch(block -> block.id() == INDEX_SEARCH_ONLY_BLOCK_ID); + assertTrue("Should find a block with ID " + INDEX_SEARCH_ONLY_BLOCK_ID, hasBlockWithCorrectId); + + // Test execution with missing index + initialState = ClusterState.builder(new ClusterName("test")).metadata(Metadata.builder().build()).build(); + + ClusterState resultState = task.execute(initialState); + assertEquals(initialState, resultState); + + // Test onFailure + Exception testException = new Exception("Test failure"); + task.onFailure("test", testException); + verify(listener).onFailure(testException); + } + + public void testFinalizeScaleDownTaskFailure() { + // Mock dependencies + String indexName = "test_index"; + ActionListener listener = mock(ActionListener.class); + + TransportScaleIndexAction action = new TransportScaleIndexAction( + transportService, + clusterService, + threadPool, + new ActionFilters(Collections.emptySet()), + new IndexNameExpressionResolver(new ThreadContext(Settings.EMPTY)), + allocationService, + indicesService + ); + + TransportScaleIndexAction.FinalizeScaleDownTask task = action.new FinalizeScaleDownTask(indexName, listener); + + // Test onFailure + Exception testException = new Exception("Test failure"); + task.onFailure("test", testException); + verify(listener).onFailure(testException); + + // Test successful processing + ClusterState state = mock(ClusterState.class); + task.clusterStateProcessed("test", state, state); + verify(listener).onResponse(any(AcknowledgedResponse.class)); + } + + public void testThreadPoolConstantValidity() { + ThreadPool threadPool = new TestThreadPool("testThreadPoolConstantValidity"); + try { + // Verify that our constant points to a valid thread pool + assertNotNull("Thread pool executor should exist", threadPool.executor(TransportScaleIndexAction.SHARD_SYNC_EXECUTOR)); + + // Verify SHARD_SYNC_EXECUTOR is using the MANAGEMENT pool as expected + assertEquals( + "SHARD_SYNC_EXECUTOR should be set to MANAGEMENT", + ThreadPool.Names.MANAGEMENT, + TransportScaleIndexAction.SHARD_SYNC_EXECUTOR + ); + } finally { + ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); + } + } + +} diff --git a/server/src/test/java/org/opensearch/cluster/health/ClusterShardHealthTests.java b/server/src/test/java/org/opensearch/cluster/health/ClusterShardHealthTests.java index 01319867bcd86..0d71f449f9b4f 100644 --- a/server/src/test/java/org/opensearch/cluster/health/ClusterShardHealthTests.java +++ b/server/src/test/java/org/opensearch/cluster/health/ClusterShardHealthTests.java @@ -31,12 +31,15 @@ package org.opensearch.cluster.health; +import org.opensearch.Version; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.TestShardRouting; import org.opensearch.cluster.routing.UnassignedInfo; +import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; @@ -64,8 +67,19 @@ public void testClusterShardGreenHealth() { indexShardRoutingBuilder.addShard( TestShardRouting.newShardRouting(indexName, shardID, "node_1", null, false, ShardRoutingState.STARTED) ); + + IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(indexName) + .settings( + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + ) + .creationDate(System.currentTimeMillis()); + IndexMetadata indexMetadata = indexMetadataBuilder.build(); IndexShardRoutingTable indexShardRoutingTable = indexShardRoutingBuilder.build(); - ClusterShardHealth clusterShardHealth = new ClusterShardHealth(shardID, indexShardRoutingTable); + + ClusterShardHealth clusterShardHealth = new ClusterShardHealth(shardID, indexShardRoutingTable, indexMetadata); assertEquals(2, clusterShardHealth.getActiveShards()); assertEquals(0, clusterShardHealth.getInitializingShards()); assertEquals(0, clusterShardHealth.getRelocatingShards()); @@ -112,7 +126,18 @@ public void testClusterShardYellowHealth() { ) ); IndexShardRoutingTable indexShardRoutingTable = indexShardRoutingBuilder.build(); - ClusterShardHealth clusterShardHealth = new ClusterShardHealth(shardID, indexShardRoutingTable); + + IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(indexName) + .settings( + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + ) + .creationDate(System.currentTimeMillis()); + IndexMetadata indexMetadata = indexMetadataBuilder.build(); + + ClusterShardHealth clusterShardHealth = new ClusterShardHealth(shardID, indexShardRoutingTable, indexMetadata); assertEquals(2, clusterShardHealth.getActiveShards()); assertEquals(1, clusterShardHealth.getInitializingShards()); assertEquals(1, clusterShardHealth.getRelocatingShards()); @@ -150,7 +175,18 @@ public void testClusterShardRedHealth() { TestShardRouting.newShardRouting(indexName, shardID, null, null, false, ShardRoutingState.UNASSIGNED) ); IndexShardRoutingTable indexShardRoutingTable = indexShardRoutingBuilder.build(); - ClusterShardHealth clusterShardHealth = new ClusterShardHealth(shardID, indexShardRoutingTable); + + IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(indexName) + .settings( + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + ) + .creationDate(System.currentTimeMillis()); + IndexMetadata indexMetadata = indexMetadataBuilder.build(); + + ClusterShardHealth clusterShardHealth = new ClusterShardHealth(shardID, indexShardRoutingTable, indexMetadata); assertEquals(0, clusterShardHealth.getActiveShards()); assertEquals(0, clusterShardHealth.getInitializingShards()); assertEquals(0, clusterShardHealth.getRelocatingShards()); @@ -161,7 +197,30 @@ public void testClusterShardRedHealth() { } public void testShardRoutingNullCheck() { - assertThrows(AssertionError.class, () -> ClusterShardHealth.getShardHealth(null, 0, 0)); + IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder("test") + .settings( + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + ); + IndexMetadata indexMetadata = indexMetadataBuilder.build(); + + // When search-only is not enabled (default), expect RED status + assertEquals(ClusterHealthStatus.RED, ClusterShardHealth.getShardHealth(null, 0, 0, indexMetadata)); + + // When search-only is enabled, expect RED status + IndexMetadata.Builder searchOnlyMetadataBuilder = IndexMetadata.builder("test") + .settings( + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey(), true) + ); + IndexMetadata searchOnlyMetadata = searchOnlyMetadataBuilder.build(); + + assertEquals(ClusterHealthStatus.RED, ClusterShardHealth.getShardHealth(null, 0, 0, searchOnlyMetadata)); } @Override diff --git a/server/src/test/java/org/opensearch/indices/replication/RemoteStoreReplicationSourceTests.java b/server/src/test/java/org/opensearch/indices/replication/RemoteStoreReplicationSourceTests.java index 287962b158c79..88c0046796ce6 100644 --- a/server/src/test/java/org/opensearch/indices/replication/RemoteStoreReplicationSourceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/RemoteStoreReplicationSourceTests.java @@ -11,6 +11,7 @@ import org.apache.lucene.store.FilterDirectory; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.settings.Settings; import org.opensearch.index.engine.InternalEngineFactory; import org.opensearch.index.engine.NRTReplicationEngineFactory; @@ -137,29 +138,43 @@ public void testGetSegmentFilesReturnEmptyResponse() throws ExecutionException, assert (response.files.isEmpty()); } - public void testGetCheckpointMetadataEmpty() throws ExecutionException, InterruptedException, IOException { + public void testGetCheckpointMetadataEmpty() throws ExecutionException, InterruptedException { IndexShard mockShard = mock(IndexShard.class); - // Build mockShard to return replicaShard directory so that empty metadata file is returned. + // Build mockShard to return replicaShard directory so that an empty metadata file is returned. buildIndexShardBehavior(mockShard, replicaShard); replicationSource = new RemoteStoreReplicationSource(mockShard); - // Mock replica shard state to RECOVERING so that getCheckpointInfo return empty map + // For a RECOVERING shard, the response should have an empty metadata map. final ReplicationCheckpoint checkpoint = replicaShard.getLatestReplicationCheckpoint(); final PlainActionFuture res = PlainActionFuture.newFuture(); when(mockShard.state()).thenReturn(IndexShardState.RECOVERING); - replicationSource = new RemoteStoreReplicationSource(mockShard); - // Recovering shard should just do a noop and return empty metadata map. replicationSource.getCheckpointMetadata(REPLICATION_ID, checkpoint, res); CheckpointInfoResponse response = res.get(); - assert (response.getCheckpoint().equals(checkpoint)); - assert (response.getMetadataMap().isEmpty()); + assertTrue(response.getCheckpoint().equals(checkpoint)); + assertTrue(response.getMetadataMap().isEmpty()); - // Started shard should fail with assertion error. + // For a STARTED shard, the new behavior needs mock routing entry when(mockShard.state()).thenReturn(IndexShardState.STARTED); - expectThrows(AssertionError.class, () -> { - final PlainActionFuture res2 = PlainActionFuture.newFuture(); - replicationSource.getCheckpointMetadata(REPLICATION_ID, checkpoint, res2); - }); + // Mock a routing entry for the search-only condition + ShardRouting mockRouting = mock(ShardRouting.class); + when(mockRouting.isSearchOnly()).thenReturn(true); // Make it a search-only replica + when(mockShard.routingEntry()).thenReturn(mockRouting); + + // Ensure the mock returns the expected checkpoint when getLatestReplicationCheckpoint is called. + when(mockShard.getLatestReplicationCheckpoint()).thenReturn(replicaShard.getLatestReplicationCheckpoint()); + final PlainActionFuture res2 = PlainActionFuture.newFuture(); + replicationSource.getCheckpointMetadata(REPLICATION_ID, checkpoint, res2); + CheckpointInfoResponse response2 = res2.get(); + assertTrue(response2.getCheckpoint().equals(replicaShard.getLatestReplicationCheckpoint())); + assertTrue(response2.getMetadataMap().isEmpty()); + + // Additional test for non-search-only replica (should fail with exception) + when(mockRouting.isSearchOnly()).thenReturn(false); + final PlainActionFuture res3 = PlainActionFuture.newFuture(); + replicationSource.getCheckpointMetadata(REPLICATION_ID, checkpoint, res3); + ExecutionException exception = assertThrows(ExecutionException.class, () -> res3.get()); + assertTrue(exception.getCause() instanceof IllegalStateException); + assertTrue(exception.getCause().getMessage().contains("Remote metadata file can't be null if shard is active")); } private void buildIndexShardBehavior(IndexShard mockShard, IndexShard indexShard) { diff --git a/server/src/test/java/org/opensearch/rest/action/admin/indices/RestScaleIndexActionTests.java b/server/src/test/java/org/opensearch/rest/action/admin/indices/RestScaleIndexActionTests.java new file mode 100644 index 0000000000000..c8f5921f8b0ab --- /dev/null +++ b/server/src/test/java/org/opensearch/rest/action/admin/indices/RestScaleIndexActionTests.java @@ -0,0 +1,161 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.admin.indices; + +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.rest.RestRequest; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.rest.FakeRestRequest; +import org.opensearch.transport.client.node.NodeClient; + +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.notNullValue; +import static org.mockito.Mockito.mock; + +public class RestScaleIndexActionTests extends OpenSearchTestCase { + + private RestScaleIndexAction action; + + @Override + public void setUp() throws Exception { + super.setUp(); + action = new RestScaleIndexAction(); + } + + public void testMissingIndexParam() { + // Build a fake request with no "index" parameter + Map params = new HashMap<>(); + FakeRestRequest restRequest = new FakeRestRequest.Builder(xContentRegistry()).withParams(params) + .withMethod(RestRequest.Method.POST) + .build(); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> action.prepareRequest(restRequest, mock(NodeClient.class)) + ); + assertThat(e.getMessage(), containsString("index is required")); + } + + public void testEmptyIndexParam() { + // Build a fake request with an empty "index" parameter + Map params = new HashMap<>(); + params.put("index", " "); + FakeRestRequest restRequest = new FakeRestRequest.Builder(xContentRegistry()).withParams(params) + .withMethod(RestRequest.Method.POST) + .build(); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> action.prepareRequest(restRequest, mock(NodeClient.class)) + ); + assertThat(e.getMessage(), containsString("index is required")); + } + + public void testUnknownParameterInBody() { + String json = "{\"unknown\": \"value\"}"; + Map params = new HashMap<>(); + params.put("index", "test-index"); + FakeRestRequest restRequest = new FakeRestRequest.Builder(xContentRegistry()).withParams(params) + .withMethod(RestRequest.Method.POST) + .withContent(new BytesArray(json), XContentType.JSON) + .build(); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> action.prepareRequest(restRequest, mock(NodeClient.class)) + ); + assertThat(e.getMessage(), containsString("Unknown parameter [unknown]. Only [search_only] is allowed.")); + } + + public void testEmptyBody() { + String json = "{}"; + Map params = new HashMap<>(); + params.put("index", "test-index"); + FakeRestRequest restRequest = new FakeRestRequest.Builder(xContentRegistry()).withParams(params) + .withMethod(RestRequest.Method.POST) + .withContent(new BytesArray(json), XContentType.JSON) + .build(); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> action.prepareRequest(restRequest, mock(NodeClient.class)) + ); + assertThat(e.getMessage(), containsString("Parameter [search_only] is required")); + } + + public void testInvalidSearchOnlyType() { + String json = "{\"search_only\": \"not_a_boolean\"}"; + Map params = new HashMap<>(); + params.put("index", "test-index"); + FakeRestRequest restRequest = new FakeRestRequest.Builder(xContentRegistry()).withParams(params) + .withMethod(RestRequest.Method.POST) + .withContent(new BytesArray(json), XContentType.JSON) + .build(); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> action.prepareRequest(restRequest, mock(NodeClient.class)) + ); + assertThat(e.getMessage(), containsString("Parameter [search_only] must be a boolean (true or false)")); + } + + public void testValidRequestWithSearchOnlyTrue() throws Exception { + String json = "{\"search_only\": true}"; + Map params = new HashMap<>(); + params.put("index", "test-index"); + FakeRestRequest restRequest = new FakeRestRequest.Builder(xContentRegistry()).withParams(params) + .withMethod(RestRequest.Method.POST) + .withContent(new BytesArray(json), XContentType.JSON) + .build(); + Object consumer = action.prepareRequest(restRequest, mock(NodeClient.class)); + assertThat(consumer, notNullValue()); + } + + public void testValidRequestWithSearchOnlyFalse() throws Exception { + String json = "{\"search_only\": false}"; + Map params = new HashMap<>(); + params.put("index", "test-index"); + FakeRestRequest restRequest = new FakeRestRequest.Builder(xContentRegistry()).withParams(params) + .withMethod(RestRequest.Method.POST) + .withContent(new BytesArray(json), XContentType.JSON) + .build(); + Object consumer = action.prepareRequest(restRequest, mock(NodeClient.class)); + assertThat(consumer, notNullValue()); + } + + public void testInvalidJson() { + String json = "{\"search_only\": fa}"; // Invalid JSON + Map params = new HashMap<>(); + params.put("index", "test-index"); + FakeRestRequest restRequest = new FakeRestRequest.Builder(xContentRegistry()).withParams(params) + .withMethod(RestRequest.Method.POST) + .withContent(new BytesArray(json), XContentType.JSON) + .build(); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> action.prepareRequest(restRequest, mock(NodeClient.class)) + ); + assertThat(e.getMessage(), containsString("Request body must be valid JSON")); + } + + public void testParseScaleDownValueMultipleFields() { + String json = "{\"search_only\": true, \"unknown_field\": \"value\"}"; + Map params = new HashMap<>(); + params.put("index", "test-index"); + FakeRestRequest restRequest = new FakeRestRequest.Builder(xContentRegistry()).withParams(params) + .withMethod(RestRequest.Method.POST) + .withContent(new BytesArray(json), XContentType.JSON) + .build(); + + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> action.prepareRequest(restRequest, mock(NodeClient.class)) + ); + assertThat(e.getMessage(), containsString("Unknown parameter [unknown_field]. Only [search_only] is allowed.")); + } +} From cec8fb967dfbe33a8e5f39be5a2a36b7aecc1246 Mon Sep 17 00:00:00 2001 From: "Daniel (dB.) Doubrovkine" Date: Sun, 6 Apr 2025 23:08:31 -0400 Subject: [PATCH 147/550] Remove dblock@ from core maintainers. (#17805) Signed-off-by: Daniel (dB.) Doubrovkine --- .github/CODEOWNERS | 20 ++++++------ MAINTAINERS.md | 80 +++++++++++++++++++++++----------------------- 2 files changed, 50 insertions(+), 50 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 5915365677ca2..0c770d62a45ae 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -11,27 +11,27 @@ # 3. Use the command palette to run the CODEOWNERS: Show owners of current file command, which will display all code owners for the current file. # Default ownership for all repo files -* @anasalkouz @andrross @ashking94 @bugmakerrrrrr @Bukhtawar @CEHENKLE @cwperks @dblock @dbwiddis @gbbafna @jainankitk @kotwanikunal @linuxpi @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +* @anasalkouz @andrross @ashking94 @bugmakerrrrrr @Bukhtawar @CEHENKLE @cwperks @dbwiddis @gbbafna @jainankitk @kotwanikunal @linuxpi @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah -/modules/lang-painless/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah -/modules/parent-join/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/modules/lang-painless/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/modules/parent-join/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah /modules/transport-netty4/ @peternied /plugins/identity-shiro/ @peternied @cwperks -/server/src/internalClusterTest/java/org/opensearch/index/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @cwperks @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah -/server/src/internalClusterTest/java/org/opensearch/search/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @cwperks @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/server/src/internalClusterTest/java/org/opensearch/index/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @cwperks @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/server/src/internalClusterTest/java/org/opensearch/search/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @cwperks @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah /server/src/main/java/org/opensearch/extensions/ @peternied /server/src/main/java/org/opensearch/identity/ @peternied @cwperks -/server/src/main/java/org/opensearch/index/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @cwperks @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah -/server/src/main/java/org/opensearch/search/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @cwperks @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/server/src/main/java/org/opensearch/index/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @cwperks @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/server/src/main/java/org/opensearch/search/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @cwperks @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah /server/src/main/java/org/opensearch/threadpool/ @jed326 @peternied /server/src/main/java/org/opensearch/transport/ @peternied -/server/src/test/java/org/opensearch/index/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @cwperks @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah -/server/src/test/java/org/opensearch/search/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @cwperks @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/server/src/test/java/org/opensearch/index/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @cwperks @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/server/src/test/java/org/opensearch/search/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @cwperks @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah /.github/ @jed326 @peternied -/MAINTAINERS.md @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @cwperks @dblock @dbwiddis @gaobinlong @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @peternied @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/MAINTAINERS.md @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @cwperks @dbwiddis @gaobinlong @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @peternied @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 7906596f047d5..bcb3b0164db49 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -4,46 +4,46 @@ This document contains a list of maintainers in this repo. See [opensearch-proje ## Current Maintainers -| Maintainer | GitHub ID | Affiliation | -| ------------------------ | ------------------------------------------------------- | ----------- | -| Anas Alkouz | [anasalkouz](https://github.com/anasalkouz) | Amazon | -| Andrew Ross | [andrross](https://github.com/andrross) | Amazon | -| Andriy Redko | [reta](https://github.com/reta) | Independent | -| Ankit Jain | [jainankitk](https://github.com/jainankitk) | Amazon | -| Ashish Singh | [ashking94](https://github.com/ashking94) | Amazon | -| Bukhtawar Khan | [Bukhtawar](https://github.com/Bukhtawar) | Amazon | -| Charlotte Henkle | [CEHENKLE](https://github.com/CEHENKLE) | Amazon | -| Craig Perkins | [cwperks](https://github.com/cwperks) | Amazon | -| Dan Widdis | [dbwiddis](https://github.com/dbwiddis) | Amazon | -| Daniel "dB." Doubrovkine | [dblock](https://github.com/dblock) | Independent | -| Binlong Gao | [gaobinlong](https://github.com/gaobinlong) | Amazon | -| Gaurav Bafna | [gbbafna](https://github.com/gbbafna) | Amazon | -| Jay Deng | [jed326](https://github.com/jed326) | Amazon | -| Kunal Kotwani | [kotwanikunal](https://github.com/kotwanikunal) | Amazon | -| Varun Bansal | [linuxpi](https://github.com/linuxpi) | Amazon | -| Marc Handalian | [mch2](https://github.com/mch2) | Amazon | -| Michael Froh | [msfroh](https://github.com/msfroh) | Amazon | -| Nick Knize | [nknize](https://github.com/nknize) | Lucenia | -| Owais Kazi | [owaiskazi19](https://github.com/owaiskazi19) | Amazon | -| Pan Guixin | [bugmakerrrrrr](https://github.com/bugmakerrrrrr) | ByteDance | -| Peter Nied | [peternied](https://github.com/peternied) | Amazon | -| Rishikesh Pasham | [Rishikesh1159](https://github.com/Rishikesh1159) | Amazon | -| Sachin Kale | [sachinpkale](https://github.com/sachinpkale) | Amazon | -| Sarat Vemulapalli | [saratvemulapalli](https://github.com/saratvemulapalli) | Amazon | -| Shweta Thareja | [shwetathareja](https://github.com/shwetathareja) | Amazon | -| Sorabh Hamirwasia | [sohami](https://github.com/sohami) | Amazon | -| Vacha Shah | [VachaShah](https://github.com/VachaShah) | Amazon | +| Maintainer | GitHub ID | Affiliation | +| ----------------- | ------------------------------------------------------- | ----------- | +| Anas Alkouz | [anasalkouz](https://github.com/anasalkouz) | Amazon | +| Andrew Ross | [andrross](https://github.com/andrross) | Amazon | +| Andriy Redko | [reta](https://github.com/reta) | Independent | +| Ankit Jain | [jainankitk](https://github.com/jainankitk) | Amazon | +| Ashish Singh | [ashking94](https://github.com/ashking94) | Amazon | +| Bukhtawar Khan | [Bukhtawar](https://github.com/Bukhtawar) | Amazon | +| Charlotte Henkle | [CEHENKLE](https://github.com/CEHENKLE) | Amazon | +| Craig Perkins | [cwperks](https://github.com/cwperks) | Amazon | +| Dan Widdis | [dbwiddis](https://github.com/dbwiddis) | Amazon | +| Binlong Gao | [gaobinlong](https://github.com/gaobinlong) | Amazon | +| Gaurav Bafna | [gbbafna](https://github.com/gbbafna) | Amazon | +| Jay Deng | [jed326](https://github.com/jed326) | Amazon | +| Kunal Kotwani | [kotwanikunal](https://github.com/kotwanikunal) | Amazon | +| Varun Bansal | [linuxpi](https://github.com/linuxpi) | Amazon | +| Marc Handalian | [mch2](https://github.com/mch2) | Amazon | +| Michael Froh | [msfroh](https://github.com/msfroh) | Amazon | +| Nick Knize | [nknize](https://github.com/nknize) | Lucenia | +| Owais Kazi | [owaiskazi19](https://github.com/owaiskazi19) | Amazon | +| Pan Guixin | [bugmakerrrrrr](https://github.com/bugmakerrrrrr) | ByteDance | +| Peter Nied | [peternied](https://github.com/peternied) | Amazon | +| Rishikesh Pasham | [Rishikesh1159](https://github.com/Rishikesh1159) | Amazon | +| Sachin Kale | [sachinpkale](https://github.com/sachinpkale) | Amazon | +| Sarat Vemulapalli | [saratvemulapalli](https://github.com/saratvemulapalli) | Amazon | +| Shweta Thareja | [shwetathareja](https://github.com/shwetathareja) | Amazon | +| Sorabh Hamirwasia | [sohami](https://github.com/sohami) | Amazon | +| Vacha Shah | [VachaShah](https://github.com/VachaShah) | Amazon | ## Emeritus -| Maintainer | GitHub ID | Affiliation | -| --------------------- | ------------------------------------------- | ----------- | -| Megha Sai Kavikondala | [meghasaik](https://github.com/meghasaik) | Amazon | -| Xue Zhou | [xuezhou25](https://github.com/xuezhou25) | Amazon | -| Kartik Ganesh | [kartg](https://github.com/kartg) | Amazon | -| Abbas Hussain | [abbashus](https://github.com/abbashus) | Meta | -| Himanshu Setia | [setiah](https://github.com/setiah) | Amazon | -| Ryan Bogan | [ryanbogan](https://github.com/ryanbogan) | Amazon | -| Rabi Panda | [adnapibar](https://github.com/adnapibar) | Independent | -| Tianli Feng | [tlfeng](https://github.com/tlfeng) | Amazon | -| Suraj Singh | [dreamer-89](https://github.com/dreamer-89) | Amazon | +| Maintainer | GitHub ID | Affiliation | +| ------------------------ | ------------------------------------------- | ----------- | +| Megha Sai Kavikondala | [meghasaik](https://github.com/meghasaik) | Amazon | +| Xue Zhou | [xuezhou25](https://github.com/xuezhou25) | Amazon | +| Kartik Ganesh | [kartg](https://github.com/kartg) | Amazon | +| Abbas Hussain | [abbashus](https://github.com/abbashus) | Meta | +| Himanshu Setia | [setiah](https://github.com/setiah) | Amazon | +| Ryan Bogan | [ryanbogan](https://github.com/ryanbogan) | Amazon | +| Rabi Panda | [adnapibar](https://github.com/adnapibar) | Independent | +| Tianli Feng | [tlfeng](https://github.com/tlfeng) | Amazon | +| Suraj Singh | [dreamer-89](https://github.com/dreamer-89) | Amazon | +| Daniel "dB." Doubrovkine | [dblock](https://github.com/dblock) | Independent | From ad54bd21469a0af6cffea92eb2907c94b5ed7a7f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Apr 2025 13:12:39 -0400 Subject: [PATCH 148/550] Bump com.google.api.grpc:proto-google-iam-v1 from 1.33.0 to 1.49.1 in /plugins/repository-gcs (#17811) * Bump com.google.api.grpc:proto-google-iam-v1 in /plugins/repository-gcs Bumps [com.google.api.grpc:proto-google-iam-v1](https://github.com/googleapis/sdk-platform-java) from 1.33.0 to 1.49.1. - [Release notes](https://github.com/googleapis/sdk-platform-java/releases) - [Changelog](https://github.com/googleapis/sdk-platform-java/blob/main/CHANGELOG.md) - [Commits](https://github.com/googleapis/sdk-platform-java/commits) --- updated-dependencies: - dependency-name: com.google.api.grpc:proto-google-iam-v1 dependency-version: 1.49.1 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- CHANGELOG.md | 1 + plugins/repository-gcs/build.gradle | 2 +- .../repository-gcs/licenses/proto-google-iam-v1-1.33.0.jar.sha1 | 1 - .../repository-gcs/licenses/proto-google-iam-v1-1.49.1.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-gcs/licenses/proto-google-iam-v1-1.33.0.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/proto-google-iam-v1-1.49.1.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 08f839aa55dbb..ab9110e06755a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -47,6 +47,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.google.api.grpc:proto-google-common-protos` from 2.52.0 to 2.54.1 ([#17733](https://github.com/opensearch-project/OpenSearch/pull/17733)) - Bump `ch.qos.logback:logback-classic` from 1.5.17 to 1.5.18 ([#17730](https://github.com/opensearch-project/OpenSearch/pull/17730)) - Bump `reactor_netty` from 1.1.26 to 1.2.3 ([#17322](https://github.com/opensearch-project/OpenSearch/pull/17322), [#17377](https://github.com/opensearch-project/OpenSearch/pull/17377)) +- Bump `com.google.api.grpc:proto-google-iam-v1` from 1.33.0 to 1.49.1 ([#17811](https://github.com/opensearch-project/OpenSearch/pull/17811)) ### Changed diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 6d9b9635fa39c..d6352ef99f987 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -57,7 +57,7 @@ dependencies { api 'com.google.api-client:google-api-client:2.7.0' api 'com.google.api.grpc:proto-google-common-protos:2.54.1' - api 'com.google.api.grpc:proto-google-iam-v1:1.33.0' + api 'com.google.api.grpc:proto-google-iam-v1:1.49.1' api "com.google.auth:google-auth-library-credentials:${versions.google_auth}" api "com.google.auth:google-auth-library-oauth2-http:${versions.google_auth}" diff --git a/plugins/repository-gcs/licenses/proto-google-iam-v1-1.33.0.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-iam-v1-1.33.0.jar.sha1 deleted file mode 100644 index ba04056c54697..0000000000000 --- a/plugins/repository-gcs/licenses/proto-google-iam-v1-1.33.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4766da92d1f36c8b612c1c142d5f3ace3774f098 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-iam-v1-1.49.1.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-iam-v1-1.49.1.jar.sha1 new file mode 100644 index 0000000000000..242da16cddf42 --- /dev/null +++ b/plugins/repository-gcs/licenses/proto-google-iam-v1-1.49.1.jar.sha1 @@ -0,0 +1 @@ +3340df39c56ae913b068f17818bf016a4b4c4177 \ No newline at end of file From 86a2000c8427f6c062aef78fd0101f2814d1f1d4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Apr 2025 13:12:53 -0400 Subject: [PATCH 149/550] Bump tj-actions/changed-files from 46.0.3 to 46.0.4 (#17813) * Bump tj-actions/changed-files from 46.0.3 to 46.0.4 Bumps [tj-actions/changed-files](https://github.com/tj-actions/changed-files) from 46.0.3 to 46.0.4. - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v46.0.3...v46.0.4) --- updated-dependencies: - dependency-name: tj-actions/changed-files dependency-version: 46.0.4 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/gradle-check.yml | 2 +- CHANGELOG.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index 99e2c6a8965c5..8ffe395370804 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -24,7 +24,7 @@ jobs: - uses: actions/checkout@v4 - name: Get changed files id: changed-files-specific - uses: tj-actions/changed-files@v46.0.3 + uses: tj-actions/changed-files@v46.0.4 with: files_ignore: | release-notes/*.md diff --git a/CHANGELOG.md b/CHANGELOG.md index ab9110e06755a..ca85f39e91c8c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,7 +38,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `dangoslen/dependabot-changelog-helper` from 3 to 4 ([#17498](https://github.com/opensearch-project/OpenSearch/pull/17498)) - Bump `com.google.api:gax` from 2.35.0 to 2.63.1 ([#17465](https://github.com/opensearch-project/OpenSearch/pull/17465)) - Bump `com.azure:azure-storage-blob` from 12.29.1 to 12.30.0 ([#17667](https://github.com/opensearch-project/OpenSearch/pull/17667)) -- Bump `tj-actions/changed-files` from 46.0.1 to 46.0.3 ([#17666](https://github.com/opensearch-project/OpenSearch/pull/17666)) +- Bump `tj-actions/changed-files` from 46.0.1 to 46.0.4 ([#17666](https://github.com/opensearch-project/OpenSearch/pull/17666), [#17813](https://github.com/opensearch-project/OpenSearch/pull/17813)) - Bump `com.google.code.gson:gson` from 2.11.0 to 2.12.1 ([#17668](https://github.com/opensearch-project/OpenSearch/pull/17668)) - Bump `com.github.luben:zstd-jni` from 1.5.5-1 to 1.5.6-1 ([#17674](https://github.com/opensearch-project/OpenSearch/pull/17674)) - Bump `lycheeverse/lychee-action` from 2.3.0 to 2.4.0 ([#17731](https://github.com/opensearch-project/OpenSearch/pull/17731)) From b0bfdc735f871b56e315adac0607f659ef1e9ca0 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Mon, 7 Apr 2025 13:58:48 -0400 Subject: [PATCH 150/550] Fix FileInterceptor to deduce the access level from the list of OpenOption (#17816) Signed-off-by: Andriy Redko --- .../opensearch/javaagent/FileInterceptor.java | 20 ++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/FileInterceptor.java b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/FileInterceptor.java index 605aa5a7d31df..823b4e4fe0726 100644 --- a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/FileInterceptor.java +++ b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/FileInterceptor.java @@ -12,8 +12,10 @@ import java.io.FilePermission; import java.lang.reflect.Method; +import java.nio.file.OpenOption; import java.nio.file.Path; import java.nio.file.Paths; +import java.nio.file.StandardOpenOption; import java.security.Policy; import java.security.ProtectionDomain; import java.util.Collection; @@ -59,13 +61,21 @@ public static void intercept(@Advice.AllArguments Object[] args, @Advice.Origin final Collection callers = walker.walk(StackCallerProtectionDomainChainExtractor.INSTANCE); final String name = method.getName(); - final boolean isMutating = name.equals("copy") - || name.equals("move") - || name.equals("write") - || name.equals("newByteChannel") - || name.startsWith("create"); + boolean isMutating = name.equals("copy") || name.equals("move") || name.equals("write") || name.startsWith("create"); final boolean isDelete = isMutating == false ? name.startsWith("delete") : false; + if (isMutating == false && isDelete == false && name.equals("newByteChannel") == true) { + if (args.length > 1 && args[1] instanceof OpenOption[] opts) { + for (final OpenOption opt : opts) { + if (opt != StandardOpenOption.READ) { + isMutating = true; + break; + } + } + + } + } + // Check each permission separately for (final ProtectionDomain domain : callers) { // Handle FileChannel.open() separately to check read/write permissions properly From 115de22102b68e62c0d6f818c4e083c59008c72a Mon Sep 17 00:00:00 2001 From: Gulshan <71965388+kumargu@users.noreply.github.com> Date: Tue, 8 Apr 2025 00:26:26 +0530 Subject: [PATCH 151/550] Add a policy parser for Java Agent (#17753) * Add a policy parser for java agent Signed-off-by: Gulshan * Url no depricated version of url resolution Signed-off-by: Gulshan * Remove unused methods and switch to modern Java collections Signed-off-by: Gulshan * Use record classes and other small refactorings Signed-off-by: Andrew Ross --------- Signed-off-by: Gulshan Signed-off-by: Andrew Ross Co-authored-by: Andrew Ross --- CHANGELOG.md | 1 + gradle/missing-javadoc.gradle | 1 + libs/agent-sm/agent-policy/build.gradle | 27 ++ .../secure_sm/policy/GrantEntry.java | 13 + .../secure_sm/policy/PermissionEntry.java | 11 + .../secure_sm/policy/PolicyFile.java | 320 ++++++++++++++++++ .../policy/PolicyInitializationException.java | 27 ++ .../secure_sm/policy/PolicyParser.java | 199 +++++++++++ .../secure_sm/policy/PropertyExpander.java | 82 +++++ .../opensearch/secure_sm/policy/Token.java | 12 + .../secure_sm/policy/TokenStream.java | 54 +++ .../secure_sm/policy/Tokenizer.java | 60 ++++ .../secure_sm/policy/package-info.java | 12 + .../secure_sm/policy/PolicyParserTests.java | 61 ++++ 14 files changed, 880 insertions(+) create mode 100644 libs/agent-sm/agent-policy/build.gradle create mode 100644 libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/GrantEntry.java create mode 100644 libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PermissionEntry.java create mode 100644 libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyFile.java create mode 100644 libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyInitializationException.java create mode 100644 libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyParser.java create mode 100644 libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PropertyExpander.java create mode 100644 libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/Token.java create mode 100644 libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/TokenStream.java create mode 100644 libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/Tokenizer.java create mode 100644 libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/package-info.java create mode 100644 libs/agent-sm/agent-policy/src/test/java/org/opensearch/secure_sm/policy/PolicyParserTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index ca85f39e91c8c..1f2adbf9d3e4f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Security Manager Replacement] Create initial Java Agent to intercept Socket::connect calls ([#17724](https://github.com/opensearch-project/OpenSearch/pull/17724)) - Add ingestion management APIs for pause, resume and get ingestion state ([#17631](https://github.com/opensearch-project/OpenSearch/pull/17631)) - [Security Manager Replacement] Enhance Java Agent to intercept System::exit ([#17746](https://github.com/opensearch-project/OpenSearch/pull/17746)) +- [Security Manager Replacement] Add a policy parser for Java agent security policies ([#17753](https://github.com/opensearch-project/OpenSearch/pull/17753)) - [Security Manager Replacement] Implement File Interceptor and add integration tests ([#17760](https://github.com/opensearch-project/OpenSearch/pull/17760)) - [Security Manager Replacement] Enhance Java Agent to intercept Runtime::halt ([#17757](https://github.com/opensearch-project/OpenSearch/pull/17757)) - Support AutoExpand for SearchReplica ([#17741](https://github.com/opensearch-project/OpenSearch/pull/17741)) diff --git a/gradle/missing-javadoc.gradle b/gradle/missing-javadoc.gradle index 6e31f838e678a..9f27dc5cadcd2 100644 --- a/gradle/missing-javadoc.gradle +++ b/gradle/missing-javadoc.gradle @@ -106,6 +106,7 @@ configure([ project(":libs:opensearch-secure-sm"), project(":libs:opensearch-ssl-config"), project(":libs:opensearch-x-content"), + project(":libs:agent-sm:agent-policy"), project(":modules:aggs-matrix-stats"), project(":modules:analysis-common"), project(":modules:geo"), diff --git a/libs/agent-sm/agent-policy/build.gradle b/libs/agent-sm/agent-policy/build.gradle new file mode 100644 index 0000000000000..a44c2c1349909 --- /dev/null +++ b/libs/agent-sm/agent-policy/build.gradle @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +apply plugin: 'opensearch.build' +apply plugin: 'opensearch.publish' + +ext { + failOnJavadocWarning = false +} + +base { + archivesName = 'opensearch-agent-policy' +} + +disableTasks('forbiddenApisMain') + +dependencies { + testImplementation(project(":test:framework")) +} diff --git a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/GrantEntry.java b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/GrantEntry.java new file mode 100644 index 0000000000000..00d3f7b02f7c0 --- /dev/null +++ b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/GrantEntry.java @@ -0,0 +1,13 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.secure_sm.policy; + +import java.util.List; + +public record GrantEntry(String codeBase, List permissionEntries) { +} diff --git a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PermissionEntry.java b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PermissionEntry.java new file mode 100644 index 0000000000000..a173aef26ff78 --- /dev/null +++ b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PermissionEntry.java @@ -0,0 +1,11 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.secure_sm.policy; + +public record PermissionEntry(String permission, String name, String action) { +} diff --git a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyFile.java b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyFile.java new file mode 100644 index 0000000000000..183ca5222b017 --- /dev/null +++ b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyFile.java @@ -0,0 +1,320 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.secure_sm.policy; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FilePermission; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.net.MalformedURLException; +import java.net.NetPermission; +import java.net.SocketPermission; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.net.URLDecoder; +import java.nio.charset.StandardCharsets; +import java.security.AllPermission; +import java.security.CodeSource; +import java.security.Permission; +import java.security.PermissionCollection; +import java.security.Permissions; +import java.security.ProtectionDomain; +import java.security.SecurityPermission; +import java.security.cert.Certificate; +import java.util.ArrayList; +import java.util.Enumeration; +import java.util.List; +import java.util.Optional; +import java.util.PropertyPermission; +import java.util.Set; + +@SuppressWarnings("removal") +public class PolicyFile extends java.security.Policy { + public static final Set PERM_CLASSES_TO_SKIP = Set.of( + "org.opensearch.secure_sm.ThreadContextPermission", + "org.opensearch.secure_sm.ThreadPermission", + "org.opensearch.SpecialPermission", + "org.bouncycastle.crypto.CryptoServicesPermission", + "org.opensearch.script.ClassPermission", + "javax.security.auth.AuthPermission", + "javax.security.auth.kerberos.ServicePermission" + ); + + private final PolicyInfo policyInfo; + private final URL url; + + public PolicyFile(URL url) { + this.url = url; + try { + policyInfo = init(url); + } catch (PolicyInitializationException e) { + throw new RuntimeException("Failed to initialize policy file", e); + } + } + + private PolicyInfo init(URL policy) throws PolicyInitializationException { + PolicyInfo info = new PolicyInfo(); + try (InputStreamReader reader = new InputStreamReader(getInputStream(policy), StandardCharsets.UTF_8)) { + List grantEntries = PolicyParser.read(reader); + for (GrantEntry grantEntry : grantEntries) { + addGrantEntry(grantEntry, info); + } + } catch (Exception e) { + throw new PolicyInitializationException("Failed to load policy from: " + policy, e); + } + return info; + } + + public static InputStream getInputStream(URL url) throws IOException { + if ("file".equals(url.getProtocol())) { + String path = url.getFile().replace('/', File.separatorChar); + path = URLDecoder.decode(path, StandardCharsets.UTF_8); + return new FileInputStream(path); + } else { + return url.openStream(); + } + } + + private CodeSource getCodeSource(GrantEntry grantEntry) throws PolicyInitializationException { + try { + Certificate[] certs = null; + URL location = (grantEntry.codeBase() != null) ? newURL(grantEntry.codeBase()) : null; + return canonicalizeCodebase(new CodeSource(location, certs)); + } catch (Exception e) { + throw new PolicyInitializationException("Failed to get CodeSource", e); + } + } + + private void addGrantEntry(GrantEntry grantEntry, PolicyInfo newInfo) throws PolicyInitializationException { + CodeSource codesource = getCodeSource(grantEntry); + if (codesource == null) { + throw new PolicyInitializationException("Null CodeSource for: " + grantEntry.codeBase()); + } + + List permissions = new ArrayList<>(); + List permissionList = grantEntry.permissionEntries(); + for (PermissionEntry pe : permissionList) { + final PermissionEntry expandedEntry = expandPermissionName(pe); + try { + Optional perm = getInstance(expandedEntry.permission(), expandedEntry.name(), expandedEntry.action()); + if (perm.isPresent()) { + permissions.add(perm.get()); + } + } catch (ClassNotFoundException e) { + // these were mostly custom permission classes added for security + // manager. Since security manager is deprecated, we can skip these + // permissions classes. + if (PERM_CLASSES_TO_SKIP.contains(pe.permission())) { + continue; // skip this permission + } + throw new PolicyInitializationException("Permission class not found: " + pe.permission(), e); + } + } + newInfo.policyEntries.add(new PolicyEntry(codesource, permissions)); + } + + private static PermissionEntry expandPermissionName(PermissionEntry pe) { + if (pe.name() == null || !pe.name().contains("${{")) { + return pe; + } + + int startIndex = 0; + int b, e; + StringBuilder sb = new StringBuilder(); + + while ((b = pe.name().indexOf("${{", startIndex)) != -1 && (e = pe.name().indexOf("}}", b)) != -1) { + sb.append(pe.name(), startIndex, b); + String value = pe.name().substring(b + 3, e); + sb.append("${{").append(value).append("}}"); + startIndex = e + 2; + } + + sb.append(pe.name().substring(startIndex)); + return new PermissionEntry(pe.permission(), sb.toString(), pe.action()); + } + + private static final Optional getInstance(String type, String name, String actions) throws ClassNotFoundException { + Class pc = Class.forName(type, false, null); + Permission answer = getKnownPermission(pc, name, actions); + + return Optional.ofNullable(answer); + } + + private static Permission getKnownPermission(Class claz, String name, String actions) { + if (claz.equals(FilePermission.class)) { + return new FilePermission(name, actions); + } else if (claz.equals(SocketPermission.class)) { + return new SocketPermission(name, actions); + } else if (claz.equals(RuntimePermission.class)) { + return new RuntimePermission(name, actions); + } else if (claz.equals(PropertyPermission.class)) { + return new PropertyPermission(name, actions); + } else if (claz.equals(NetPermission.class)) { + return new NetPermission(name, actions); + } else if (claz.equals(AllPermission.class)) { + return new AllPermission(); + } else if (claz.equals(SecurityPermission.class)) { + return new SecurityPermission(name, actions); + } else { + return null; + } + } + + @Override + public void refresh() { + try { + init(url); + } catch (PolicyInitializationException e) { + throw new RuntimeException("Failed to refresh policy", e); + } + } + + @Override + public boolean implies(ProtectionDomain pd, Permission p) { + PermissionCollection pc = getPermissions(pd); + return pc != null && pc.implies(p); + } + + @Override + public PermissionCollection getPermissions(ProtectionDomain domain) { + Permissions perms = new Permissions(); + if (domain == null) return perms; + + try { + getPermissionsForProtectionDomain(perms, domain); + } catch (PolicyInitializationException e) { + throw new RuntimeException("Failed to get permissions for domain", e); + } + + PermissionCollection pc = domain.getPermissions(); + if (pc != null) { + synchronized (pc) { + Enumeration e = pc.elements(); + while (e.hasMoreElements()) { + perms.add(e.nextElement()); + } + } + } + + return perms; + } + + @Override + public PermissionCollection getPermissions(CodeSource codesource) { + if (codesource == null) return new Permissions(); + + Permissions perms = new Permissions(); + CodeSource canonicalCodeSource; + + try { + canonicalCodeSource = canonicalizeCodebase(codesource); + } catch (PolicyInitializationException e) { + throw new RuntimeException("Failed to canonicalize CodeSource", e); + } + + for (PolicyEntry entry : policyInfo.policyEntries) { + if (entry.codeSource().implies(canonicalCodeSource)) { + for (Permission permission : entry.permissions) { + perms.add(permission); + } + } + } + + return perms; + } + + private void getPermissionsForProtectionDomain(Permissions perms, ProtectionDomain pd) throws PolicyInitializationException { + final CodeSource cs = pd.getCodeSource(); + if (cs == null) return; + + CodeSource canonicalCodeSource = canonicalizeCodebase(cs); + + for (PolicyEntry entry : policyInfo.policyEntries) { + if (entry.codeSource().implies(canonicalCodeSource)) { + for (Permission permission : entry.permissions) { + perms.add(permission); + } + } + } + } + + private CodeSource canonicalizeCodebase(CodeSource cs) throws PolicyInitializationException { + URL location = cs.getLocation(); + if (location == null) return cs; + + try { + URL canonicalUrl = canonicalizeUrl(location); + return new CodeSource(canonicalUrl, cs.getCertificates()); + } catch (IOException e) { + throw new PolicyInitializationException("Failed to canonicalize CodeSource", e); + } + } + + @SuppressWarnings("deprecation") + private URL canonicalizeUrl(URL url) throws IOException { + String protocol = url.getProtocol(); + + if ("jar".equals(protocol)) { + String spec = url.getFile(); + int separator = spec.indexOf("!/"); + if (separator != -1) { + try { + url = new URL(spec.substring(0, separator)); + } catch (MalformedURLException e) { + throw new IOException("Malformed nested jar URL", e); + } + } + } + + if ("file".equals(url.getProtocol())) { + String path = url.getPath(); + path = canonicalizePath(path); + return new File(path).toURI().toURL(); + } + + return url; + } + + private String canonicalizePath(String path) throws IOException { + if (path.endsWith("*")) { + path = path.substring(0, path.length() - 1); + return new File(path).getCanonicalPath() + "*"; + } else { + return new File(path).getCanonicalPath(); + } + } + + private record PolicyEntry(CodeSource codeSource, List permissions) { + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("{").append(codeSource).append("\n"); + for (Permission p : permissions) { + sb.append(" ").append(p).append("\n"); + } + sb.append("}\n"); + return sb.toString(); + } + } + + private static class PolicyInfo { + final List policyEntries; + + PolicyInfo() { + policyEntries = new ArrayList<>(); + } + } + + private static URL newURL(String spec) throws MalformedURLException, URISyntaxException { + return new URI(spec).toURL(); + } +} diff --git a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyInitializationException.java b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyInitializationException.java new file mode 100644 index 0000000000000..9205c0aecec41 --- /dev/null +++ b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyInitializationException.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.secure_sm.policy; + +/** + * Custom exception for failures during policy file parsing, + */ +public class PolicyInitializationException extends Exception { + + public PolicyInitializationException(String message) { + super(message); + } + + public PolicyInitializationException(String message, Throwable cause) { + super(message, cause); + } + + public PolicyInitializationException(Throwable cause) { + super(cause); + } +} diff --git a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyParser.java b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyParser.java new file mode 100644 index 0000000000000..8d9eeddd15c7f --- /dev/null +++ b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyParser.java @@ -0,0 +1,199 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.secure_sm.policy; + +import org.opensearch.secure_sm.policy.PropertyExpander.ExpandException; + +import java.io.BufferedReader; +import java.io.File; +import java.io.IOException; +import java.io.Reader; +import java.io.StreamTokenizer; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; + +public class PolicyParser { + + private PolicyParser() {} + + public static List read(Reader policy) throws ParsingException, IOException { + final List grantEntries = new ArrayList<>(); + if (!(policy instanceof BufferedReader)) { + policy = new BufferedReader(policy); + } + TokenStream tokenStream = new TokenStream(policy); + while (!tokenStream.isEOF()) { + if (peek(tokenStream, "grant")) { + parseGrantEntry(tokenStream).ifPresent(grantEntries::add); + } + } + return grantEntries; + } + + private static boolean pollOnMatch(TokenStream tokenStream, String expect) throws ParsingException, IOException { + if (peek(tokenStream, expect)) { + poll(tokenStream, expect); + return true; + } + return false; + } + + private static boolean peek(TokenStream tokenStream, String expected) throws IOException { + Token token = tokenStream.peek(); + return expected.equalsIgnoreCase(token.text()); + } + + private static String poll(TokenStream tokenStream, String expected) throws IOException, ParsingException { + Token token = tokenStream.consume(); + + // Match exact keyword or symbol + if (expected.equalsIgnoreCase("grant") + || expected.equalsIgnoreCase("Codebase") + || expected.equalsIgnoreCase("Permission") + || expected.equalsIgnoreCase("{") + || expected.equalsIgnoreCase("}") + || expected.equalsIgnoreCase(";") + || expected.equalsIgnoreCase(",")) { + + if (!expected.equalsIgnoreCase(token.text())) { + throw new ParsingException(token.line(), expected, token.text()); + } + return token.text(); + } + + if (token.type() == StreamTokenizer.TT_WORD || token.type() == '"' || token.type() == '\'') { + return token.text(); + } + + throw new ParsingException(token.line(), expected, token.text()); + } + + private static Optional parseGrantEntry(TokenStream tokenStream) throws ParsingException, IOException { + String codeBase = null; + List permissionEntries = new ArrayList<>(); + + poll(tokenStream, "grant"); + + while (!peek(tokenStream, "{")) { + if (pollOnMatch(tokenStream, "Codebase")) { + if (codeBase != null) { + throw new ParsingException(tokenStream.line(), "Multiple Codebase expressions"); + } + + String rawCodebase = poll(tokenStream, tokenStream.peek().text()); + try { + codeBase = PropertyExpander.expand(rawCodebase, true).replace(File.separatorChar, '/'); + } catch (ExpandException e) { + // skip this grant as expansion failed due to missing expansion property. + skipCurrentGrantBlock(tokenStream); + + return Optional.empty(); + } + pollOnMatch(tokenStream, ","); + } else { + throw new ParsingException(tokenStream.line(), "Expected codeBase"); + } + } + + poll(tokenStream, "{"); + + while (!peek(tokenStream, "}")) { + if (peek(tokenStream, "Permission")) { + permissionEntries.add(parsePermissionEntry(tokenStream)); + poll(tokenStream, ";"); + } else { + throw new ParsingException(tokenStream.line(), "Expected permission entry"); + } + } + + poll(tokenStream, "}"); + + if (peek(tokenStream, ";")) { + poll(tokenStream, ";"); + } + + if (codeBase != null) { + codeBase = codeBase.replace(File.separatorChar, '/'); + } + + return Optional.of(new GrantEntry(codeBase, permissionEntries)); + } + + private static void skipCurrentGrantBlock(TokenStream tokenStream) throws IOException, ParsingException { + // Consume until we find a matching closing '}' + int braceDepth = 0; + + // Go until we find the initial '{' + while (!tokenStream.isEOF()) { + Token token = tokenStream.peek(); + if ("{".equals(token.text())) { + braceDepth++; + tokenStream.consume(); + break; + } + tokenStream.consume(); + } + + // Now consume until matching '}' + while (braceDepth > 0 && !tokenStream.isEOF()) { + Token token = tokenStream.consume(); + if ("{".equals(token.text())) { + braceDepth++; + } else if ("}".equals(token.text())) { + braceDepth--; + } + } + + // Consume optional trailing semicolon + if (peek(tokenStream, ";")) { + poll(tokenStream, ";"); + } + } + + private static PermissionEntry parsePermissionEntry(TokenStream tokenStream) throws ParsingException, IOException { + String name = null; + String action = null; + + poll(tokenStream, "Permission"); + final String permission = poll(tokenStream, tokenStream.peek().text()); + + if (isQuotedToken(tokenStream.peek())) { + name = poll(tokenStream, tokenStream.peek().text()); + } + + if (peek(tokenStream, ",")) { + poll(tokenStream, ","); + } + + if (isQuotedToken(tokenStream.peek())) { + action = poll(tokenStream, tokenStream.peek().text()); + } + + return new PermissionEntry(permission, name, action); + } + + private static boolean isQuotedToken(Token token) { + return token.type() == '"' || token.type() == '\''; + } + + public static class ParsingException extends Exception { + public ParsingException(String message) { + super(message); + } + + public ParsingException(int line, String expected) { + super("line " + line + ": expected [" + expected + "]"); + } + + public ParsingException(int line, String expected, String found) { + super("line " + line + ": expected [" + expected + "], found [" + found + "]"); + } + } +} diff --git a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PropertyExpander.java b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PropertyExpander.java new file mode 100644 index 0000000000000..757062d46f226 --- /dev/null +++ b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PropertyExpander.java @@ -0,0 +1,82 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.secure_sm.policy; + +import java.io.File; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; +import java.security.GeneralSecurityException; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +public class PropertyExpander { + + private static final Pattern PLACEHOLDER_PATTERN = Pattern.compile("\\$\\{\\{(?.*?)}}|\\$\\{(?.*?)}"); + + public static class ExpandException extends GeneralSecurityException { + private static final long serialVersionUID = -1L; + + public ExpandException(String message) { + super(message); + } + } + + public static String expand(String value) throws ExpandException { + return expand(value, false); + } + + public static String expand(String value, boolean encodeURL) throws ExpandException { + if (value == null || !value.contains("${")) { + return value; + } + + Matcher matcher = PLACEHOLDER_PATTERN.matcher(value); + StringBuffer sb = new StringBuffer(); + while (matcher.find()) { + String replacement = handleMatch(matcher, encodeURL); + matcher.appendReplacement(sb, Matcher.quoteReplacement(replacement)); + } + matcher.appendTail(sb); + return sb.toString(); + } + + private static String handleMatch(Matcher match, boolean encodeURL) throws ExpandException { + String escaped = match.group("escaped"); + if (escaped != null) { + return "${{" + escaped + "}}"; + } + + String placeholder = match.group("normal"); + return expandPlaceholder(placeholder, encodeURL); + } + + private static String expandPlaceholder(String placeholder, boolean encodeURL) throws ExpandException { + return switch (placeholder) { + case "/" -> String.valueOf(File.separatorChar); + default -> { + String value = System.getProperty(placeholder); + if (value == null) { + throw new ExpandException("Unable to expand property: " + placeholder); + } + yield encodeURL ? encodeValue(value) : value; + } + }; + } + + private static String encodeValue(String value) { + try { + URI uri = new URI(value); + return uri.isAbsolute() ? value : URLEncoder.encode(value, StandardCharsets.UTF_8); + } catch (URISyntaxException e) { + return URLEncoder.encode(value, StandardCharsets.UTF_8); + } + } +} diff --git a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/Token.java b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/Token.java new file mode 100644 index 0000000000000..fafe2a1089352 --- /dev/null +++ b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/Token.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.secure_sm.policy; + +public record Token(int type, String text, int line) { +} diff --git a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/TokenStream.java b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/TokenStream.java new file mode 100644 index 0000000000000..885e35d3daa7c --- /dev/null +++ b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/TokenStream.java @@ -0,0 +1,54 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.secure_sm.policy; + +import java.io.IOException; +import java.io.Reader; +import java.io.StreamTokenizer; +import java.util.ArrayDeque; +import java.util.Deque; + +public class TokenStream { + private final StreamTokenizer tokenizer; + private final Deque buffer = new ArrayDeque<>(); + + TokenStream(Reader reader) { + this.tokenizer = Tokenizer.configureTokenizer(reader); + } + + Token peek() throws IOException { + if (buffer.isEmpty()) { + buffer.push(nextToken()); + } + return buffer.peek(); + } + + Token consume() throws IOException { + return buffer.isEmpty() ? nextToken() : buffer.pop(); + } + + boolean isEOF() throws IOException { + Token t = peek(); + return t.type() == StreamTokenizer.TT_EOF; + } + + int line() throws IOException { + return peek().line(); + } + + private Token nextToken() throws IOException { + int type = tokenizer.nextToken(); + String text = switch (type) { + case StreamTokenizer.TT_WORD, '"', '\'' -> tokenizer.sval; + case StreamTokenizer.TT_EOF -> ""; + default -> Character.toString((char) type); + }; + return new Token(type, text, tokenizer.lineno()); + } +} diff --git a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/Tokenizer.java b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/Tokenizer.java new file mode 100644 index 0000000000000..3ac771ef5f29e --- /dev/null +++ b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/Tokenizer.java @@ -0,0 +1,60 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.secure_sm.policy; + +import java.io.Reader; +import java.io.StreamTokenizer; + +public final class Tokenizer { + + private Tokenizer() {} + + /* + * Configure the stream tokenizer: + * Recognize strings between "..." + * Don't convert words to lowercase + * Recognize both C-style and C++-style comments + * Treat end-of-line as white space, not as a token + */ + + // new Token(StreamTokenizer.TT_WORD, "grant", line) // keyword + // new Token(StreamTokenizer.TT_WORD, "Codebase", line) + // new Token('"', "file:/some/path", line) // quoted string + // new Token('{', "{", line) // symbol + // new Token(StreamTokenizer.TT_WORD, "permission", line) + // new Token(StreamTokenizer.TT_WORD, "java.io.FilePermission", line) + // new Token('"', "file", line) + // new Token(',', ",", line) + // new Token('"', "read", line) + // new Token(';', ";", line) + // new Token('}', "}", line) + // new Token(';', ";", line) + public static StreamTokenizer configureTokenizer(Reader reader) { + StreamTokenizer st = new StreamTokenizer(reader); + + st.resetSyntax(); + st.wordChars('a', 'z'); + st.wordChars('A', 'Z'); + st.wordChars('.', '.'); + st.wordChars('0', '9'); + st.wordChars('_', '_'); + st.wordChars('$', '$'); + st.wordChars(128 + 32, 255); // extended chars + st.whitespaceChars(0, ' '); + st.commentChar('/'); + st.quoteChar('\''); + st.quoteChar('"'); + st.lowerCaseMode(false); + st.ordinaryChar('/'); + st.slashSlashComments(true); + st.slashStarComments(true); + + return st; + } +} diff --git a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/package-info.java b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/package-info.java new file mode 100644 index 0000000000000..d182490b8d173 --- /dev/null +++ b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Java Agent Policy + */ +package org.opensearch.secure_sm.policy; diff --git a/libs/agent-sm/agent-policy/src/test/java/org/opensearch/secure_sm/policy/PolicyParserTests.java b/libs/agent-sm/agent-policy/src/test/java/org/opensearch/secure_sm/policy/PolicyParserTests.java new file mode 100644 index 0000000000000..035719cbd7989 --- /dev/null +++ b/libs/agent-sm/agent-policy/src/test/java/org/opensearch/secure_sm/policy/PolicyParserTests.java @@ -0,0 +1,61 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.secure_sm.policy; + +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.io.Reader; +import java.io.StringReader; +import java.util.List; + +public class PolicyParserTests extends OpenSearchTestCase { + private static final String POLICY = """ + grant codeBase "TestCodeBase" { + permission java.net.NetPermission "accessUnixDomainSocket"; + }; + + grant { + permission java.net.NetPermission "accessUnixDomainSocket"; + permission java.net.SocketPermission "*", "accept,connect"; + }; + """; + + public void testPolicy() throws IOException, PolicyParser.ParsingException { + try (Reader reader = new StringReader(POLICY)) { + final List grantEntries = PolicyParser.read(reader); + assertEquals(2, grantEntries.size()); + + final GrantEntry grantEntry1 = grantEntries.get(0); + final GrantEntry grantEntry2 = grantEntries.get(1); + + assertEquals("TestCodeBase", grantEntry1.codeBase()); + + List permissions1 = grantEntry1.permissionEntries(); + assertEquals(1, permissions1.size()); + + PermissionEntry firstPerm1 = permissions1.get(0); + assertEquals("java.net.NetPermission", firstPerm1.permission()); + assertEquals("accessUnixDomainSocket", firstPerm1.name()); + + assertNull(grantEntry2.codeBase()); + + List permissions2 = grantEntry2.permissionEntries(); + assertEquals(2, permissions2.size()); + + PermissionEntry firstPerm2 = permissions2.get(0); + assertEquals("java.net.NetPermission", firstPerm2.permission()); + assertEquals("accessUnixDomainSocket", firstPerm2.name()); + + PermissionEntry secondPerm2 = permissions2.get(1); + assertEquals("java.net.SocketPermission", secondPerm2.permission()); + assertEquals("*", secondPerm2.name()); + assertEquals("accept,connect", secondPerm2.action()); + } + } +} From c09f79e83beafaa0344cda74578bb1654d620bf3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Apr 2025 20:30:59 -0400 Subject: [PATCH 152/550] Bump ch.qos.logback:logback-core from 1.5.17 to 1.5.18 in /test/fixtures/hdfs-fixture (#17809) * Bump ch.qos.logback:logback-core in /test/fixtures/hdfs-fixture Bumps [ch.qos.logback:logback-core](https://github.com/qos-ch/logback) from 1.5.17 to 1.5.18. - [Release notes](https://github.com/qos-ch/logback/releases) - [Commits](https://github.com/qos-ch/logback/compare/v_1.5.17...v_1.5.18) --- updated-dependencies: - dependency-name: ch.qos.logback:logback-core dependency-version: 1.5.18 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- CHANGELOG.md | 2 +- test/fixtures/hdfs-fixture/build.gradle | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1f2adbf9d3e4f..303cc44f91daf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -34,7 +34,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Dependencies - Bump `com.nimbusds:nimbus-jose-jwt` from 9.41.1 to 10.0.2 ([#17607](https://github.com/opensearch-project/OpenSearch/pull/17607), [#17669](https://github.com/opensearch-project/OpenSearch/pull/17669)) - Bump `com.google.api:api-common` from 1.8.1 to 2.46.1 ([#17604](https://github.com/opensearch-project/OpenSearch/pull/17604)) -- Bump `ch.qos.logback:logback-core` from 1.5.16 to 1.5.17 ([#17609](https://github.com/opensearch-project/OpenSearch/pull/17609)) +- Bump `ch.qos.logback:logback-core` from 1.5.16 to 1.5.18 ([#17609](https://github.com/opensearch-project/OpenSearch/pull/17609), [#17809](https://github.com/opensearch-project/OpenSearch/pull/17809)) - Bump `org.jruby.joni:joni` from 2.2.3 to 2.2.6 ([#17608](https://github.com/opensearch-project/OpenSearch/pull/17608), [#17732](https://github.com/opensearch-project/OpenSearch/pull/17732)) - Bump `dangoslen/dependabot-changelog-helper` from 3 to 4 ([#17498](https://github.com/opensearch-project/OpenSearch/pull/17498)) - Bump `com.google.api:gax` from 2.35.0 to 2.63.1 ([#17465](https://github.com/opensearch-project/OpenSearch/pull/17465)) diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index b3949062598be..41e90111c3488 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -74,7 +74,7 @@ dependencies { api 'org.apache.zookeeper:zookeeper:3.9.3' api "org.apache.commons:commons-text:1.13.0" api "commons-net:commons-net:3.11.1" - api "ch.qos.logback:logback-core:1.5.17" + api "ch.qos.logback:logback-core:1.5.18" api "ch.qos.logback:logback-classic:1.5.18" api "org.jboss.xnio:xnio-nio:3.8.16.Final" api 'org.jline:jline:3.29.0' From c91cdcb598478ba7f01bcf6ace01f29e7cd36333 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Apr 2025 21:01:09 -0400 Subject: [PATCH 153/550] Bump com.azure:azure-core from 1.54.1 to 1.55.3 in /plugins/repository-azure (#17810) * Bump com.azure:azure-core in /plugins/repository-azure Bumps [com.azure:azure-core](https://github.com/Azure/azure-sdk-for-java) from 1.54.1 to 1.55.3. - [Release notes](https://github.com/Azure/azure-sdk-for-java/releases) - [Commits](https://github.com/Azure/azure-sdk-for-java/compare/azure-core_1.54.1...azure-core_1.55.3) --- updated-dependencies: - dependency-name: com.azure:azure-core dependency-version: 1.55.3 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- CHANGELOG.md | 1 + plugins/repository-azure/build.gradle | 2 +- plugins/repository-azure/licenses/azure-core-1.54.1.jar.sha1 | 1 - plugins/repository-azure/licenses/azure-core-1.55.3.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-azure/licenses/azure-core-1.54.1.jar.sha1 create mode 100644 plugins/repository-azure/licenses/azure-core-1.55.3.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 303cc44f91daf..57feac4b45bd0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -49,6 +49,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `ch.qos.logback:logback-classic` from 1.5.17 to 1.5.18 ([#17730](https://github.com/opensearch-project/OpenSearch/pull/17730)) - Bump `reactor_netty` from 1.1.26 to 1.2.3 ([#17322](https://github.com/opensearch-project/OpenSearch/pull/17322), [#17377](https://github.com/opensearch-project/OpenSearch/pull/17377)) - Bump `com.google.api.grpc:proto-google-iam-v1` from 1.33.0 to 1.49.1 ([#17811](https://github.com/opensearch-project/OpenSearch/pull/17811)) +- Bump `com.azure:azure-core` from 1.54.1 to 1.55.3 ([#17810](https://github.com/opensearch-project/OpenSearch/pull/17810)) ### Changed diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 0bf07695745d3..b20fe0c39bf31 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -44,7 +44,7 @@ opensearchplugin { } dependencies { - api 'com.azure:azure-core:1.54.1' + api 'com.azure:azure-core:1.55.3' api 'com.azure:azure-json:1.3.0' api 'com.azure:azure-xml:1.1.0' api 'com.azure:azure-storage-common:12.28.0' diff --git a/plugins/repository-azure/licenses/azure-core-1.54.1.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.54.1.jar.sha1 deleted file mode 100644 index 9246d0dd8443a..0000000000000 --- a/plugins/repository-azure/licenses/azure-core-1.54.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9ae0cc4a8ff02a0146510ec9e1c06ab48950a66b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-1.55.3.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.55.3.jar.sha1 new file mode 100644 index 0000000000000..966919b5c3c86 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-1.55.3.jar.sha1 @@ -0,0 +1 @@ +120adc6c3de019097b163390a7eb511f0acd050b \ No newline at end of file From 0819161ef2d7c8b85806c61bfaabec9150641c70 Mon Sep 17 00:00:00 2001 From: Craig Perkins Date: Mon, 7 Apr 2025 22:04:12 -0400 Subject: [PATCH 154/550] Remove dependency on :test:framework in :libs:opensearch-agent-sm:agent-policy (#17821) * Remove dependency on :test:framework in :libs:opensearch-agent-sm:agent-policy Signed-off-by: Craig Perkins * Use JUnit Signed-off-by: Craig Perkins * Disable more tasks Signed-off-by: Craig Perkins --------- Signed-off-by: Craig Perkins --- gradle/libs.versions.toml | 2 ++ libs/agent-sm/agent-policy/build.gradle | 5 +++-- .../opensearch/secure_sm/policy/PolicyParserTests.java | 8 ++++++-- 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index 53b616e2329d6..51afc304263bf 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -93,11 +93,13 @@ arrow = "18.1.0" flatbuffers = "2.0.0" [libraries] +hamcrest = { group = "org.hamcrest", name = "hamcrest", version.ref = "hamcrest" } hdrhistogram = { group = "org.hdrhistogram", name = "HdrHistogram", version.ref = "hdrhistogram" } jakartaannotation = { group = "jakarta.annotation", name = "jakarta.annotation-api", version.ref = "jakarta_annotation" } jodatime = { group = "joda-time", name = "joda-time", version.ref = "joda" } jna = { group = "net.java.dev.jna", name = "jna", version.ref = "jna" } jtscore = { group = "org.locationtech.jts", name = "jts-core", version.ref = "jts" } +junit = { group = "junit", name = "junit", version.ref = "junit" } jzlib = { group = "com.jcraft", name = "jzlib", version.ref = "jzlib" } log4japi = { group = "org.apache.logging.log4j", name = "log4j-api", version.ref = "log4j" } log4jjul = { group = "org.apache.logging.log4j", name = "log4j-jul", version.ref = "log4j" } diff --git a/libs/agent-sm/agent-policy/build.gradle b/libs/agent-sm/agent-policy/build.gradle index a44c2c1349909..d84342e2a4888 100644 --- a/libs/agent-sm/agent-policy/build.gradle +++ b/libs/agent-sm/agent-policy/build.gradle @@ -20,8 +20,9 @@ base { archivesName = 'opensearch-agent-policy' } -disableTasks('forbiddenApisMain') +disableTasks('forbiddenApisMain', 'forbiddenApisTest', 'testingConventions') dependencies { - testImplementation(project(":test:framework")) + testImplementation libs.junit + testImplementation libs.hamcrest } diff --git a/libs/agent-sm/agent-policy/src/test/java/org/opensearch/secure_sm/policy/PolicyParserTests.java b/libs/agent-sm/agent-policy/src/test/java/org/opensearch/secure_sm/policy/PolicyParserTests.java index 035719cbd7989..c6cfd8747904a 100644 --- a/libs/agent-sm/agent-policy/src/test/java/org/opensearch/secure_sm/policy/PolicyParserTests.java +++ b/libs/agent-sm/agent-policy/src/test/java/org/opensearch/secure_sm/policy/PolicyParserTests.java @@ -7,14 +7,17 @@ */ package org.opensearch.secure_sm.policy; -import org.opensearch.test.OpenSearchTestCase; +import org.junit.Test; import java.io.IOException; import java.io.Reader; import java.io.StringReader; import java.util.List; -public class PolicyParserTests extends OpenSearchTestCase { +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +public class PolicyParserTests { private static final String POLICY = """ grant codeBase "TestCodeBase" { permission java.net.NetPermission "accessUnixDomainSocket"; @@ -26,6 +29,7 @@ public class PolicyParserTests extends OpenSearchTestCase { }; """; + @Test public void testPolicy() throws IOException, PolicyParser.ParsingException { try (Reader reader = new StringReader(POLICY)) { final List grantEntries = PolicyParser.read(reader); From 4560206c46d7d8e7f29b154859f0e175f10b9848 Mon Sep 17 00:00:00 2001 From: Sandesh Kumar Date: Mon, 7 Apr 2025 20:23:30 -0700 Subject: [PATCH 155/550] [Star Tree] [Search] Resolving Range aggregations with Star-tree (#17273) * range aggs changes --------- Signed-off-by: Sandesh Kumar --- CHANGELOG.md | 1 + .../bucket/range/RangeAggregator.java | 215 +++++++++++++---- .../search/startree/StarTreeQueryContext.java | 33 +++ .../search/SearchServiceStarTreeTests.java | 142 +++++++++++ .../startree/NumericTermsAggregatorTests.java | 3 + .../startree/RangeAggregatorTests.java | 226 ++++++++++++++++++ 6 files changed, 580 insertions(+), 40 deletions(-) create mode 100644 server/src/test/java/org/opensearch/search/aggregations/startree/RangeAggregatorTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 57feac4b45bd0..88af9996fb2a6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Implement fixed interval refresh task scheduling ([#17777](https://github.com/opensearch-project/OpenSearch/pull/17777)) - Add GRPC DocumentService and Bulk endpoint ([#17727](https://github.com/opensearch-project/OpenSearch/pull/17727)) - Added scale to zero (`search_only` mode) support for OpenSearch reader writer separation ([#17299](https://github.com/opensearch-project/OpenSearch/pull/17299) +- [Star Tree] [Search] Resolving numeric range aggregation with metric aggregation using star-tree ([#17273](https://github.com/opensearch-project/OpenSearch/pull/17273)) ### Changed - Migrate BC libs to their FIPS counterparts ([#14912](https://github.com/opensearch-project/OpenSearch/pull/14912)) diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/RangeAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/RangeAggregator.java index c7303011b5800..28f47298935df 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/RangeAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/RangeAggregator.java @@ -32,7 +32,9 @@ package org.opensearch.search.aggregations.bucket.range; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.util.FixedBitSet; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -43,7 +45,13 @@ import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; +import org.opensearch.index.compositeindex.datacube.MetricStat; +import org.opensearch.index.compositeindex.datacube.startree.index.StarTreeValues; +import org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeUtils; +import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedNumericStarTreeValuesIterator; import org.opensearch.index.fielddata.SortedNumericDoubleValues; +import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorFactories; @@ -53,12 +61,17 @@ import org.opensearch.search.aggregations.LeafBucketCollector; import org.opensearch.search.aggregations.LeafBucketCollectorBase; import org.opensearch.search.aggregations.NonCollectingAggregator; +import org.opensearch.search.aggregations.StarTreeBucketCollector; +import org.opensearch.search.aggregations.StarTreePreComputeCollector; import org.opensearch.search.aggregations.bucket.BucketsAggregator; import org.opensearch.search.aggregations.bucket.filterrewrite.FilterRewriteOptimizationContext; import org.opensearch.search.aggregations.bucket.filterrewrite.RangeAggregatorBridge; import org.opensearch.search.aggregations.support.ValuesSource; import org.opensearch.search.aggregations.support.ValuesSourceConfig; import org.opensearch.search.internal.SearchContext; +import org.opensearch.search.startree.StarTreeQueryHelper; +import org.opensearch.search.startree.StarTreeTraversalUtil; +import org.opensearch.search.startree.filter.DimensionFilter; import java.io.IOException; import java.util.ArrayList; @@ -70,16 +83,18 @@ import static org.opensearch.core.xcontent.ConstructingObjectParser.optionalConstructorArg; import static org.opensearch.search.aggregations.bucket.filterrewrite.AggregatorBridge.segmentMatchAll; +import static org.opensearch.search.startree.StarTreeQueryHelper.getSupportedStarTree; /** * Aggregate all docs that match given ranges. * * @opensearch.internal */ -public class RangeAggregator extends BucketsAggregator { +public class RangeAggregator extends BucketsAggregator implements StarTreePreComputeCollector { public static final ParseField RANGES_FIELD = new ParseField("ranges"); public static final ParseField KEYED_FIELD = new ParseField("keyed"); + public final String fieldName; /** * Range for the range aggregator @@ -298,6 +313,9 @@ protected Function bucketOrdProducer() { } }; filterRewriteOptimizationContext = new FilterRewriteOptimizationContext(bridge, parent, subAggregators.length, context); + this.fieldName = (valuesSource instanceof ValuesSource.Numeric.FieldData) + ? ((ValuesSource.Numeric.FieldData) valuesSource).getIndexFieldName() + : null; } @Override @@ -310,8 +328,13 @@ public ScoreMode scoreMode() { @Override protected boolean tryPrecomputeAggregationForLeaf(LeafReaderContext ctx) throws IOException { - if (segmentMatchAll(context, ctx)) { - return filterRewriteOptimizationContext.tryOptimize(ctx, this::incrementBucketDocCount, false); + if (segmentMatchAll(context, ctx) && filterRewriteOptimizationContext.tryOptimize(ctx, this::incrementBucketDocCount, false)) { + return true; + } + CompositeIndexFieldInfo supportedStarTree = getSupportedStarTree(this.context.getQueryShardContext()); + if (supportedStarTree != null) { + preComputeWithStarTree(ctx, supportedStarTree); + return true; } return false; } @@ -333,52 +356,107 @@ public void collect(int doc, long bucket) throws IOException { } private int collect(int doc, double value, long owningBucketOrdinal, int lowBound) throws IOException { - int lo = lowBound, hi = ranges.length - 1; // all candidates are between these indexes - int mid = (lo + hi) >>> 1; - while (lo <= hi) { - if (value < ranges[mid].from) { - hi = mid - 1; - } else if (value >= maxTo[mid]) { - lo = mid + 1; - } else { - break; + MatchedRange range = new MatchedRange(ranges, lowBound, value, maxTo); + for (int i = range.startLo; i <= range.endHi; ++i) { + if (ranges[i].matches(value)) { + collectBucket(sub, doc, subBucketOrdinal(owningBucketOrdinal, i)); } - mid = (lo + hi) >>> 1; } - if (lo > hi) return lo; // no potential candidate - - // binary search the lower bound - int startLo = lo, startHi = mid; - while (startLo <= startHi) { - final int startMid = (startLo + startHi) >>> 1; - if (value >= maxTo[startMid]) { - startLo = startMid + 1; - } else { - startHi = startMid - 1; - } + return range.endHi + 1; + } + }; + } + + private void preComputeWithStarTree(LeafReaderContext ctx, CompositeIndexFieldInfo starTree) throws IOException { + StarTreeBucketCollector starTreeBucketCollector = getStarTreeBucketCollector(ctx, starTree, null); + FixedBitSet matchingDocsBitSet = starTreeBucketCollector.getMatchingDocsBitSet(); + + int numBits = matchingDocsBitSet.length(); + + if (numBits > 0) { + for (int bit = matchingDocsBitSet.nextSetBit(0); bit != DocIdSetIterator.NO_MORE_DOCS; bit = (bit + 1 < numBits) + ? matchingDocsBitSet.nextSetBit(bit + 1) + : DocIdSetIterator.NO_MORE_DOCS) { + starTreeBucketCollector.collectStarTreeEntry(bit, 0); + } + } + } + + @Override + public StarTreeBucketCollector getStarTreeBucketCollector( + LeafReaderContext ctx, + CompositeIndexFieldInfo starTree, + StarTreeBucketCollector parentCollector + ) throws IOException { + assert parentCollector == null; + StarTreeValues starTreeValues = StarTreeQueryHelper.getStarTreeValues(ctx, starTree); + // TODO: Evaluate optimizing StarTree traversal filter with specific ranges instead of MATCH_ALL_DEFAULT + return new StarTreeBucketCollector( + starTreeValues, + StarTreeTraversalUtil.getStarTreeResult( + starTreeValues, + StarTreeQueryHelper.mergeDimensionFilterIfNotExists( + context.getQueryShardContext().getStarTreeQueryContext().getBaseQueryStarTreeFilter(), + fieldName, + List.of(DimensionFilter.MATCH_ALL_DEFAULT) + ), + context + ) + ) { + @Override + public void setSubCollectors() throws IOException { + for (Aggregator aggregator : subAggregators) { + this.subCollectors.add(((StarTreePreComputeCollector) aggregator).getStarTreeBucketCollector(ctx, starTree, this)); + } + } + + SortedNumericStarTreeValuesIterator valuesIterator = (SortedNumericStarTreeValuesIterator) starTreeValues + .getDimensionValuesIterator(fieldName); + + String metricName = StarTreeUtils.fullyQualifiedFieldNameForStarTreeMetricsDocValues( + starTree.getField(), + "_doc_count", + MetricStat.DOC_COUNT.getTypeName() + ); + + SortedNumericStarTreeValuesIterator docCountsIterator = (SortedNumericStarTreeValuesIterator) starTreeValues + .getMetricValuesIterator(metricName); + + @Override + public void collectStarTreeEntry(int starTreeEntry, long owningBucketOrd) throws IOException { + if (!valuesIterator.advanceExact(starTreeEntry)) { + return; } - // binary search the upper bound - int endLo = mid, endHi = hi; - while (endLo <= endHi) { - final int endMid = (endLo + endHi) >>> 1; - if (value < ranges[endMid].from) { - endHi = endMid - 1; + for (int i = 0, count = valuesIterator.entryValueCount(); i < count; i++) { + long dimensionLongValue = valuesIterator.nextValue(); + double dimensionValue; + + // Only numeric & floating points are supported as of now in star-tree + // TODO: Add support for isBigInteger() when it gets supported in star-tree + if (valuesSource.isFloatingPoint()) { + dimensionValue = ((NumberFieldMapper.NumberFieldType) context.mapperService().fieldType(fieldName)).toDoubleValue( + dimensionLongValue + ); } else { - endLo = endMid + 1; + dimensionValue = dimensionLongValue; } - } - assert startLo == lowBound || value >= maxTo[startLo - 1]; - assert endHi == ranges.length - 1 || value < ranges[endHi + 1].from; + MatchedRange matchedRange = new MatchedRange(ranges, 0, dimensionValue, maxTo); + if (matchedRange.startLo > matchedRange.endHi) { + continue; // No matching range + } - for (int i = startLo; i <= endHi; ++i) { - if (ranges[i].matches(value)) { - collectBucket(sub, doc, subBucketOrdinal(owningBucketOrdinal, i)); + if (docCountsIterator.advanceExact(starTreeEntry)) { + long metricValue = docCountsIterator.nextValue(); + for (int j = matchedRange.startLo; j <= matchedRange.endHi; ++j) { + if (ranges[j].matches(dimensionValue)) { + long bucketOrd = subBucketOrdinal(owningBucketOrd, j); + collectStarTreeBucket(this, metricValue, bucketOrd, starTreeEntry); + } + } } } - - return endHi + 1; } }; } @@ -421,6 +499,63 @@ public InternalAggregation buildEmptyAggregation() { return rangeFactory.create(name, buckets, format, keyed, metadata()); } + static class MatchedRange { + int startLo, endHi; + + MatchedRange(RangeAggregator.Range[] ranges, int lowBound, double value, double[] maxTo) { + computeMatchingRange(ranges, lowBound, value, maxTo); + } + + private void computeMatchingRange(RangeAggregator.Range[] ranges, int lowBound, double value, double[] maxTo) { + int lo = lowBound, hi = ranges.length - 1; + int mid = (lo + hi) >>> 1; + + while (lo <= hi) { + if (value < ranges[mid].from) { + hi = mid - 1; + } else if (value >= maxTo[mid]) { + lo = mid + 1; + } else { + break; + } + mid = (lo + hi) >>> 1; + } + if (lo > hi) { + this.startLo = lo; + this.endHi = lo - 1; + return; + } + + // binary search the lower bound + int startLo = lo, startHi = mid; + while (startLo <= startHi) { + int startMid = (startLo + startHi) >>> 1; + if (value >= maxTo[startMid]) { + startLo = startMid + 1; + } else { + startHi = startMid - 1; + } + } + + // binary search the upper bound + int endLo = mid, endHi = hi; + while (endLo <= endHi) { + int endMid = (endLo + endHi) >>> 1; + if (value < ranges[endMid].from) { + endHi = endMid - 1; + } else { + endLo = endMid + 1; + } + } + + assert startLo == lowBound || value >= maxTo[startLo - 1]; + assert endHi == ranges.length - 1 || value < ranges[endHi + 1].from; + + this.startLo = startLo; + this.endHi = endHi; + } + } + /** * Unmapped range * @@ -456,7 +591,7 @@ public Unmapped( public InternalAggregation buildEmptyAggregation() { InternalAggregations subAggs = buildEmptySubAggregations(); List buckets = new ArrayList<>(ranges.length); - for (RangeAggregator.Range range : ranges) { + for (Range range : ranges) { buckets.add(factory.createBucket(range.key, range.from, range.to, 0, subAggs, keyed, format)); } return factory.create(name, buckets, format, keyed, metadata()); diff --git a/server/src/main/java/org/opensearch/search/startree/StarTreeQueryContext.java b/server/src/main/java/org/opensearch/search/startree/StarTreeQueryContext.java index a8f54f5793551..53a5a7e007417 100644 --- a/server/src/main/java/org/opensearch/search/startree/StarTreeQueryContext.java +++ b/server/src/main/java/org/opensearch/search/startree/StarTreeQueryContext.java @@ -15,12 +15,14 @@ import org.opensearch.index.compositeindex.datacube.Dimension; import org.opensearch.index.compositeindex.datacube.Metric; import org.opensearch.index.compositeindex.datacube.MetricStat; +import org.opensearch.index.compositeindex.datacube.NumericDimension; import org.opensearch.index.compositeindex.datacube.startree.utils.date.DateTimeUnitAdapter; import org.opensearch.index.compositeindex.datacube.startree.utils.date.DateTimeUnitRounding; import org.opensearch.index.mapper.CompositeDataCubeFieldType; import org.opensearch.index.query.QueryBuilder; import org.opensearch.search.aggregations.AggregatorFactory; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramAggregatorFactory; +import org.opensearch.search.aggregations.bucket.range.RangeAggregatorFactory; import org.opensearch.search.aggregations.bucket.terms.TermsAggregatorFactory; import org.opensearch.search.aggregations.metrics.MetricAggregatorFactory; import org.opensearch.search.internal.SearchContext; @@ -120,6 +122,10 @@ public boolean consolidateAllFilters(SearchContext context) { continue; } + // validation for range aggregation + if (validateRangeAggregationSupport(compositeMappedFieldType, aggregatorFactory)) { + continue; + } // invalid query shape return false; } @@ -184,6 +190,33 @@ private static boolean validateKeywordTermsAggregationSupport( return true; } + private static boolean validateRangeAggregationSupport( + CompositeDataCubeFieldType compositeIndexFieldInfo, + AggregatorFactory aggregatorFactory + ) { + if (!(aggregatorFactory instanceof RangeAggregatorFactory rangeAggregatorFactory)) { + return false; + } + + // Validate request field is part of dimensions & is a numeric field + // TODO: Add support for date type ranges + if (compositeIndexFieldInfo.getDimensions() + .stream() + .noneMatch( + dimension -> rangeAggregatorFactory.getField().equals(dimension.getField()) && dimension instanceof NumericDimension + )) { + return false; + } + + // Validate all sub-factories + for (AggregatorFactory subFactory : aggregatorFactory.getSubFactories().getFactories()) { + if (!validateStarTreeMetricSupport(compositeIndexFieldInfo, subFactory)) { + return false; + } + } + return true; + } + private StarTreeFilter getStarTreeFilter( SearchContext context, QueryBuilder queryBuilder, diff --git a/server/src/test/java/org/opensearch/search/SearchServiceStarTreeTests.java b/server/src/test/java/org/opensearch/search/SearchServiceStarTreeTests.java index 95c877bfce0a8..9038f194843e3 100644 --- a/server/src/test/java/org/opensearch/search/SearchServiceStarTreeTests.java +++ b/server/src/test/java/org/opensearch/search/SearchServiceStarTreeTests.java @@ -47,6 +47,7 @@ import org.opensearch.search.aggregations.SearchContextAggregations; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.opensearch.search.aggregations.bucket.range.RangeAggregationBuilder; import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.opensearch.search.aggregations.metrics.MaxAggregationBuilder; import org.opensearch.search.aggregations.metrics.MedianAbsoluteDeviationAggregationBuilder; @@ -71,6 +72,7 @@ import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; import static org.opensearch.search.aggregations.AggregationBuilders.max; import static org.opensearch.search.aggregations.AggregationBuilders.medianAbsoluteDeviation; +import static org.opensearch.search.aggregations.AggregationBuilders.range; import static org.opensearch.search.aggregations.AggregationBuilders.sum; import static org.opensearch.search.aggregations.AggregationBuilders.terms; import static org.hamcrest.CoreMatchers.notNullValue; @@ -689,6 +691,146 @@ public void testQueryParsingForBucketAggregations() throws IOException { setStarTreeIndexSetting(null); } + /** + * Test query parsing for range aggregations, with/without numeric term query + */ + public void testQueryParsingForRangeAggregations() throws IOException { + FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.STAR_TREE_INDEX, true).build()); + setStarTreeIndexSetting("true"); + + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .put(IndexMetadata.INDEX_APPEND_ONLY_ENABLED_SETTING.getKey(), true) + .build(); + CreateIndexRequestBuilder builder = client().admin() + .indices() + .prepareCreate("test") + .setSettings(settings) + .setMapping(NumericTermsAggregatorTests.getExpandedMapping(1, false)); + createIndex("test", builder); + + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexService indexService = indicesService.indexServiceSafe(resolveIndex("test")); + IndexShard indexShard = indexService.getShard(0); + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + new SearchRequest().allowPartialSearchResults(true), + indexShard.shardId(), + 1, + new AliasFilter(null, Strings.EMPTY_ARRAY), + 1.0f, + -1, + null, + null + ); + String KEYWORD_FIELD = "clientip"; + String NUMERIC_FIELD = "size"; + String NUMERIC_FIELD_NOT_ORDERED_DIMENSION = "rank"; + + MaxAggregationBuilder maxAggNoSub = max("max").field(FIELD_NAME); + SumAggregationBuilder sumAggSub = sum("sum").field(FIELD_NAME).subAggregation(maxAggNoSub); + MedianAbsoluteDeviationAggregationBuilder medianAgg = medianAbsoluteDeviation("median").field(FIELD_NAME); + + QueryBuilder baseQuery; + SearchContext searchContext = createSearchContext(indexService); + StarTreeFieldConfiguration starTreeFieldConfiguration = new StarTreeFieldConfiguration( + 1, + Collections.emptySet(), + StarTreeFieldConfiguration.StarTreeBuildMode.ON_HEAP + ); + + // Case 1: MatchAllQuery and non-nested metric aggregations is nested within range aggregation, should use star tree + RangeAggregationBuilder rangeAggregationBuilder = range("range").field(NUMERIC_FIELD).addRange(0, 10).subAggregation(maxAggNoSub); + baseQuery = new MatchAllQueryBuilder(); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().size(0).query(baseQuery).aggregation(rangeAggregationBuilder); + + assertStarTreeContext( + request, + sourceBuilder, + getStarTreeQueryContext( + searchContext, + starTreeFieldConfiguration, + "startree1", + -1, + List.of(new NumericDimension(NUMERIC_FIELD), new OrdinalDimension(KEYWORD_FIELD)), + List.of(new Metric(FIELD_NAME, List.of(MetricStat.SUM, MetricStat.MAX))), + baseQuery, + sourceBuilder, + true + ), + -1 + ); + + // Case 2: NumericTermsQuery and non-nested metric aggregations is nested within range aggregation, should use star tree + rangeAggregationBuilder = range("range").field(NUMERIC_FIELD).addRange(0, 100).subAggregation(maxAggNoSub); + baseQuery = new TermQueryBuilder(FIELD_NAME, 1); + sourceBuilder = new SearchSourceBuilder().size(0).query(baseQuery).aggregation(rangeAggregationBuilder); + + assertStarTreeContext( + request, + sourceBuilder, + getStarTreeQueryContext( + searchContext, + starTreeFieldConfiguration, + "startree1", + -1, + List.of(new NumericDimension(NUMERIC_FIELD), new OrdinalDimension(KEYWORD_FIELD), new NumericDimension(FIELD_NAME)), + List.of(new Metric(FIELD_NAME, List.of(MetricStat.SUM, MetricStat.MAX))), + baseQuery, + sourceBuilder, + true + ), + -1 + ); + + // Case 3: Nested metric aggregations within range aggregation, should not use star tree + rangeAggregationBuilder = range("range").field(NUMERIC_FIELD).addRange(0, 100).subAggregation(sumAggSub); + sourceBuilder = new SearchSourceBuilder().size(0).query(new TermQueryBuilder(FIELD_NAME, 1)).aggregation(rangeAggregationBuilder); + assertStarTreeContext(request, sourceBuilder, null, -1); + + // Case 4: Unsupported aggregations within range aggregation, should not use star tree + rangeAggregationBuilder = range("range").field(NUMERIC_FIELD).addRange(0, 100).subAggregation(medianAgg); + sourceBuilder = new SearchSourceBuilder().size(0).query(new TermQueryBuilder(FIELD_NAME, 1)).aggregation(rangeAggregationBuilder); + assertStarTreeContext(request, sourceBuilder, null, -1); + + // Case 5: Range Aggregation on field not in ordered dimensions, should not use star tree + rangeAggregationBuilder = range("range").field(NUMERIC_FIELD_NOT_ORDERED_DIMENSION).addRange(0, 100).subAggregation(medianAgg); + baseQuery = new MatchAllQueryBuilder(); + sourceBuilder = new SearchSourceBuilder().size(0).query(baseQuery).aggregation(rangeAggregationBuilder); + assertStarTreeContext(request, sourceBuilder, null, -1); + + // Case 6: Range Aggregation on non-numeric field, should not use star tree + rangeAggregationBuilder = range("range").field(TIMESTAMP_FIELD).addRange(0, 100).subAggregation(maxAggNoSub); + baseQuery = new MatchAllQueryBuilder(); + sourceBuilder = new SearchSourceBuilder().size(0).query(baseQuery).aggregation(rangeAggregationBuilder); + assertStarTreeContext(request, sourceBuilder, null, -1); + + // Case 7: Valid range aggregation and valid metric aggregation, should use star tree & cache + rangeAggregationBuilder = range("range").field(NUMERIC_FIELD).addRange(0, 100).subAggregation(maxAggNoSub); + baseQuery = new MatchAllQueryBuilder(); + sourceBuilder = new SearchSourceBuilder().size(0).query(baseQuery).aggregation(rangeAggregationBuilder).aggregation(maxAggNoSub); + assertStarTreeContext( + request, + sourceBuilder, + getStarTreeQueryContext( + searchContext, + starTreeFieldConfiguration, + "startree1", + 1, + List.of(new NumericDimension(NUMERIC_FIELD), new OrdinalDimension(KEYWORD_FIELD)), + List.of(new Metric(FIELD_NAME, List.of(MetricStat.SUM, MetricStat.MAX))), + baseQuery, + sourceBuilder, + true + ), + 0 + ); + + setStarTreeIndexSetting(null); + } + private void setStarTreeIndexSetting(String value) { client().admin() .cluster() diff --git a/server/src/test/java/org/opensearch/search/aggregations/startree/NumericTermsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/startree/NumericTermsAggregatorTests.java index d3cb2d17e7c16..3c663ada97e6d 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/startree/NumericTermsAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/startree/NumericTermsAggregatorTests.java @@ -323,6 +323,9 @@ public static XContentBuilder getExpandedMapping(int maxLeafDocs, boolean skipSt b.startObject("size"); b.field("type", "float"); b.endObject(); + b.startObject("rank"); + b.field("type", "integer"); + b.endObject(); b.startObject("geoip"); b.startObject("properties"); b.startObject("country_name"); diff --git a/server/src/test/java/org/opensearch/search/aggregations/startree/RangeAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/startree/RangeAggregatorTests.java new file mode 100644 index 0000000000000..68d8423338f01 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/aggregations/startree/RangeAggregatorTests.java @@ -0,0 +1,226 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.startree; + +import com.carrotsearch.randomizedtesting.RandomizedTest; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.codecs.Codec; +import org.apache.lucene.codecs.lucene101.Lucene101Codec; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SegmentReader; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.apache.lucene.util.NumericUtils; +import org.opensearch.common.lucene.Lucene; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; +import org.opensearch.index.codec.composite.CompositeIndexReader; +import org.opensearch.index.codec.composite.composite101.Composite101Codec; +import org.opensearch.index.codec.composite912.datacube.startree.StarTreeDocValuesFormatTests; +import org.opensearch.index.compositeindex.datacube.Dimension; +import org.opensearch.index.compositeindex.datacube.NumericDimension; +import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.mapper.NumberFieldMapper; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.RangeQueryBuilder; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.search.aggregations.AggregatorTestCase; +import org.opensearch.search.aggregations.bucket.range.InternalRange; +import org.opensearch.search.aggregations.bucket.range.RangeAggregationBuilder; +import org.opensearch.search.aggregations.support.ValuesSourceAggregationBuilder; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Random; + +import static org.opensearch.search.aggregations.AggregationBuilders.avg; +import static org.opensearch.search.aggregations.AggregationBuilders.count; +import static org.opensearch.search.aggregations.AggregationBuilders.max; +import static org.opensearch.search.aggregations.AggregationBuilders.min; +import static org.opensearch.search.aggregations.AggregationBuilders.range; +import static org.opensearch.search.aggregations.AggregationBuilders.sum; +import static org.opensearch.test.InternalAggregationTestCase.DEFAULT_MAX_BUCKETS; + +public class RangeAggregatorTests extends AggregatorTestCase { + final static String STATUS = "status"; + final static String SIZE = "size"; + private static final MappedFieldType STATUS_FIELD_TYPE = new NumberFieldMapper.NumberFieldType( + STATUS, + NumberFieldMapper.NumberType.LONG + ); + private static final MappedFieldType SIZE_FIELD_NAME = new NumberFieldMapper.NumberFieldType(SIZE, NumberFieldMapper.NumberType.FLOAT); + + @Before + public void setup() { + FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.STAR_TREE_INDEX, true).build()); + } + + @After + public void teardown() throws IOException { + FeatureFlags.initializeFeatureFlags(Settings.EMPTY); + } + + protected Codec getCodec() { + final Logger testLogger = LogManager.getLogger(NumericTermsAggregatorTests.class); + MapperService mapperService; + try { + mapperService = StarTreeDocValuesFormatTests.createMapperService(NumericTermsAggregatorTests.getExpandedMapping(1, false)); + } catch (IOException e) { + throw new RuntimeException(e); + } + return new Composite101Codec(Lucene101Codec.Mode.BEST_SPEED, mapperService, testLogger); + } + + public void testRangeAggregation() throws IOException { + Directory directory = newDirectory(); + IndexWriterConfig conf = newIndexWriterConfig(null); + conf.setCodec(getCodec()); + conf.setMergePolicy(newLogMergePolicy()); + RandomIndexWriter iw = new RandomIndexWriter(random(), directory, conf); + + Random random = RandomizedTest.getRandom(); + int totalDocs = 100; + List docs = new ArrayList<>(); + long val; + + // Index 100 random documents + for (int i = 0; i < totalDocs; i++) { + Document doc = new Document(); + if (random.nextBoolean()) { + val = random.nextInt(100); // Random int between 0 and 99 for status + doc.add(new SortedNumericDocValuesField(STATUS, val)); + } + if (random.nextBoolean()) { + val = NumericUtils.doubleToSortableLong(random.nextInt(100) + 0.5f); + doc.add(new SortedNumericDocValuesField(SIZE, val)); + } + iw.addDocument(doc); + docs.add(doc); + } + + if (randomBoolean()) { + iw.forceMerge(1); + } + iw.close(); + + DirectoryReader ir = DirectoryReader.open(directory); + LeafReaderContext context = ir.leaves().get(0); + + SegmentReader reader = Lucene.segmentReader(context.reader()); + IndexSearcher indexSearcher = newSearcher(reader, false, false); + CompositeIndexReader starTreeDocValuesReader = (CompositeIndexReader) reader.getDocValuesReader(); + + List compositeIndexFields = starTreeDocValuesReader.getCompositeIndexFields(); + CompositeIndexFieldInfo starTree = compositeIndexFields.get(0); + + LinkedHashMap supportedDimensions = new LinkedHashMap<>(); + supportedDimensions.put(new NumericDimension(STATUS), STATUS_FIELD_TYPE); + supportedDimensions.put(new NumericDimension(SIZE), SIZE_FIELD_NAME); + + Query query = new MatchAllDocsQuery(); + QueryBuilder queryBuilder = null; + RangeAggregationBuilder rangeAggregationBuilder = range("range_agg").field(STATUS).addRange(10, 30).addRange(30, 50); + // no sub-aggregation + testCase(indexSearcher, query, queryBuilder, rangeAggregationBuilder, starTree, supportedDimensions); + + ValuesSourceAggregationBuilder[] aggBuilders = { + sum("_sum").field(SIZE), + max("_max").field(SIZE), + min("_min").field(SIZE), + count("_count").field(SIZE), + avg("_avg").field(SIZE) }; + + for (ValuesSourceAggregationBuilder aggregationBuilder : aggBuilders) { + query = new MatchAllDocsQuery(); + queryBuilder = null; + rangeAggregationBuilder = range("range_agg").field(STATUS).addRange(10, 30).addRange(30, 50).subAggregation(aggregationBuilder); + // sub-aggregation, no top level query + testCase(indexSearcher, query, queryBuilder, rangeAggregationBuilder, starTree, supportedDimensions); + + // Numeric-terms query with range aggregation + for (int cases = 0; cases < 100; cases++) { + // term query of status field + String queryField = SIZE; + long queryValue = NumericUtils.floatToSortableInt(random.nextInt(50) + 0.5f); + query = SortedNumericDocValuesField.newSlowExactQuery(queryField, queryValue); + queryBuilder = new TermQueryBuilder(queryField, queryValue); + testCase(indexSearcher, query, queryBuilder, rangeAggregationBuilder, starTree, supportedDimensions); + + // range query on same field as aggregation field + query = SortedNumericDocValuesField.newSlowRangeQuery(STATUS, 15, 35); + queryBuilder = new RangeQueryBuilder(STATUS).from(15).to(35); + testCase(indexSearcher, query, queryBuilder, rangeAggregationBuilder, starTree, supportedDimensions); + } + } + + reader.close(); + directory.close(); + } + + private void testCase( + IndexSearcher indexSearcher, + Query query, + QueryBuilder queryBuilder, + RangeAggregationBuilder rangeAggregationBuilder, + CompositeIndexFieldInfo starTree, + LinkedHashMap supportedDimensions + ) throws IOException { + InternalRange starTreeAggregation = searchAndReduceStarTree( + createIndexSettings(), + indexSearcher, + query, + queryBuilder, + rangeAggregationBuilder, + starTree, + supportedDimensions, + null, + DEFAULT_MAX_BUCKETS, + false, + null, + true, + STATUS_FIELD_TYPE, + SIZE_FIELD_NAME + ); + + InternalRange defaultAggregation = searchAndReduceStarTree( + createIndexSettings(), + indexSearcher, + query, + queryBuilder, + rangeAggregationBuilder, + null, + null, + null, + DEFAULT_MAX_BUCKETS, + false, + null, + false, + STATUS_FIELD_TYPE, + SIZE_FIELD_NAME + ); + + assertEquals(defaultAggregation.getBuckets().size(), starTreeAggregation.getBuckets().size()); + assertEquals(defaultAggregation.getBuckets(), starTreeAggregation.getBuckets()); + } +} From 9b4688f6c3f6cd56308ad4f60cb004890681b8fe Mon Sep 17 00:00:00 2001 From: "Ilmar S. Habibulin" <161701313+ngr-ilmarh@users.noreply.github.com> Date: Tue, 8 Apr 2025 20:26:48 +0300 Subject: [PATCH 156/550] Fix possible NPE in RestTasksAction.java (#17778) Signed-off-by: Ilmar S. Habibulin <161701313+ngr-ilmarh@users.noreply.github.com> --- .../rest/action/cat/RestTasksAction.java | 2 +- .../rest/action/cat/RestTasksActionTests.java | 22 ++++++++++++++++++- 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestTasksAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestTasksAction.java index b6e6cc8603dcb..787b90967c541 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestTasksAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestTasksAction.java @@ -172,7 +172,7 @@ private void buildRow(Table table, boolean fullId, boolean detailed, DiscoveryNo // Node information. Note that the node may be null because it has left the cluster between when we got this response and now. table.addCell(fullId ? nodeId : Strings.substring(nodeId, 0, 4)); table.addCell(node == null ? "-" : node.getHostAddress()); - table.addCell(node.getAddress().address().getPort()); + table.addCell(node == null ? "-" : node.getAddress().address().getPort()); table.addCell(node == null ? "-" : node.getName()); table.addCell(node == null ? "-" : node.getVersion().toString()); table.addCell(taskInfo.getHeaders().getOrDefault(Task.X_OPAQUE_ID, "-")); diff --git a/server/src/test/java/org/opensearch/rest/action/cat/RestTasksActionTests.java b/server/src/test/java/org/opensearch/rest/action/cat/RestTasksActionTests.java index aaa5c5534d5c0..acbe662b86b98 100644 --- a/server/src/test/java/org/opensearch/rest/action/cat/RestTasksActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/cat/RestTasksActionTests.java @@ -39,13 +39,19 @@ import org.opensearch.common.collect.MapBuilder; import org.opensearch.core.action.ActionListener; import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.tasks.TaskId; import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.tasks.TaskInfo; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.client.NoOpNodeClient; import org.opensearch.test.rest.FakeRestChannel; import org.opensearch.test.rest.FakeRestRequest; +import java.util.List; +import java.util.Map; + import static java.util.Collections.emptyList; +import static org.opensearch.tasks.TaskInfoTests.randomResourceStats; import static org.hamcrest.Matchers.is; public class RestTasksActionTests extends OpenSearchTestCase { @@ -77,7 +83,21 @@ public void doE Request request, ActionListener listener ) { - listener.onResponse((Response) new ListTasksResponse(emptyList(), emptyList(), emptyList())); + final TaskInfo taskInfo = new TaskInfo( + new TaskId("test-node-id", randomLong()), + "test_type", + "test_action", + "test_description", + null, + randomLong(), + randomLongBetween(0, Long.MAX_VALUE), + false, + false, + TaskId.EMPTY_TASK_ID, + Map.of("foo", "bar"), + randomResourceStats(randomBoolean()) + ); + listener.onResponse((Response) new ListTasksResponse(List.of(taskInfo), emptyList(), emptyList())); } }; } From 6b976ea0633a297e4c9b9cf7bf7263e6e1b658ad Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Tue, 8 Apr 2025 13:28:40 -0400 Subject: [PATCH 157/550] Fix FileInterceptor to properly check read / write sides path separation (#17836) Signed-off-by: Andriy Redko Signed-off-by: Andrew Ross Co-authored-by: Andrew Ross --- .../opensearch/javaagent/FileInterceptor.java | 38 +++++++++++++++---- .../javaagent/FileInterceptorIntegTests.java | 5 +++ 2 files changed, 35 insertions(+), 8 deletions(-) diff --git a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/FileInterceptor.java b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/FileInterceptor.java index 823b4e4fe0726..fb3d66d4524cf 100644 --- a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/FileInterceptor.java +++ b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/FileInterceptor.java @@ -61,18 +61,27 @@ public static void intercept(@Advice.AllArguments Object[] args, @Advice.Origin final Collection callers = walker.walk(StackCallerProtectionDomainChainExtractor.INSTANCE); final String name = method.getName(); - boolean isMutating = name.equals("copy") || name.equals("move") || name.equals("write") || name.startsWith("create"); + boolean isMutating = name.equals("move") || name.equals("write") || name.startsWith("create"); final boolean isDelete = isMutating == false ? name.startsWith("delete") : false; - if (isMutating == false && isDelete == false && name.equals("newByteChannel") == true) { - if (args.length > 1 && args[1] instanceof OpenOption[] opts) { - for (final OpenOption opt : opts) { - if (opt != StandardOpenOption.READ) { - isMutating = true; - break; + String targetFilePath = null; + if (isMutating == false && isDelete == false) { + if (name.equals("newByteChannel") == true) { + if (args.length > 1 && args[1] instanceof OpenOption[] opts) { + for (final OpenOption opt : opts) { + if (opt != StandardOpenOption.READ) { + isMutating = true; + break; + } } - } + } + } else if (name.equals("copy") == true) { + if (args.length > 1 && args[1] instanceof String pathStr) { + targetFilePath = Paths.get(pathStr).toAbsolutePath().toString(); + } else if (args.length > 1 && args[1] instanceof Path path) { + targetFilePath = path.toAbsolutePath().toString(); + } } } @@ -85,6 +94,19 @@ public static void intercept(@Advice.AllArguments Object[] args, @Advice.Origin } } + // Handle Files.copy() separately to check read/write permissions properly + if (method.getName().equals("copy")) { + if (!policy.implies(domain, new FilePermission(filePath, "read"))) { + throw new SecurityException("Denied OPEN access to file: " + filePath + ", domain: " + domain); + } + + if (targetFilePath != null) { + if (!policy.implies(domain, new FilePermission(targetFilePath, "write"))) { + throw new SecurityException("Denied OPEN access to file: " + targetFilePath + ", domain: " + domain); + } + } + } + // File mutating operations if (isMutating && !policy.implies(domain, new FilePermission(filePath, "write"))) { throw new SecurityException("Denied WRITE access to file: " + filePath + ", domain: " + domain); diff --git a/libs/agent-sm/agent/src/test/java/org/opensearch/javaagent/FileInterceptorIntegTests.java b/libs/agent-sm/agent/src/test/java/org/opensearch/javaagent/FileInterceptorIntegTests.java index 6375e1d3c2b2d..537f1d1f38159 100644 --- a/libs/agent-sm/agent/src/test/java/org/opensearch/javaagent/FileInterceptorIntegTests.java +++ b/libs/agent-sm/agent/src/test/java/org/opensearch/javaagent/FileInterceptorIntegTests.java @@ -29,6 +29,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; @SuppressWarnings("removal") @@ -144,6 +145,10 @@ public void testCopy() throws Exception { // Test copy operation Files.copy(sourcePath, targetPath); + assertThrows( + SecurityException.class, + () -> Files.copy(sourcePath, tmpDir.getRoot().resolve("test-target-" + randomAlphaOfLength(8) + ".txt")) + ); // Verify copy assertTrue("Target file should exist", Files.exists(targetPath)); From e8b975ddbd5cc7f0b39e50c95493a6c655a40933 Mon Sep 17 00:00:00 2001 From: Sriram Ganesh Date: Tue, 8 Apr 2025 23:34:23 +0530 Subject: [PATCH 158/550] Fix remove skip tags for version 6 (#17750) --------- Signed-off-by: Michael Froh Signed-off-by: Sriram Ganesh Co-authored-by: Sriram Ganesh Co-authored-by: Michael Froh --- .../test/geo_shape/290_geotile_grid.yml | 3 - .../test/old_cluster/20_date_range.yml | 80 ------------------- .../test/cat.thread_pool/10_basic.yml | 3 - .../test/cluster.state/10_basic.yml | 4 - .../test/cluster.state/20_filtering.yml | 2 - .../10_basic.yml | 4 - .../rest-api-spec/test/create/10_with_id.yml | 4 +- .../test/create/15_without_id.yml | 4 +- .../test/create/35_external_version.yml | 4 +- .../rest-api-spec/test/create/40_routing.yml | 4 +- .../rest-api-spec/test/create/60_refresh.yml | 12 +-- .../rest-api-spec/test/delete/10_basic.yml | 4 - .../test/delete/11_shard_header.yml | 4 - .../rest-api-spec/test/delete/12_result.yml | 4 - .../rest-api-spec/test/delete/20_cas.yml | 4 - .../test/delete/25_external_version.yml | 4 - .../test/delete/26_external_gte_version.yml | 4 - .../rest-api-spec/test/delete/30_routing.yml | 4 - .../rest-api-spec/test/delete/50_refresh.yml | 12 --- .../rest-api-spec/test/delete/60_missing.yml | 8 -- .../rest-api-spec/test/exists/40_routing.yml | 3 - .../test/exists/60_realtime_refresh.yml | 3 - .../rest-api-spec/test/get/40_routing.yml | 4 - .../test/get/50_with_headers.yml | 3 +- .../test/get/60_realtime_refresh.yml | 4 - .../rest-api-spec/test/get/90_versions.yml | 4 - .../test/get_source/40_routing.yml | 5 -- .../test/get_source/60_realtime_refresh.yml | 4 - .../test/get_source/80_missing.yml | 10 --- .../test/get_source/85_source_missing.yml | 4 - .../rest-api-spec/test/index/10_with_id.yml | 4 - .../test/index/15_without_id.yml | 4 - .../rest-api-spec/test/index/20_optype.yml | 4 - .../test/index/35_external_version.yml | 4 - .../test/index/36_external_gte_version.yml | 4 - .../rest-api-spec/test/index/40_routing.yml | 3 - .../rest-api-spec/test/index/60_refresh.yml | 11 --- .../test/indices.shrink/20_source_mapping.yml | 3 - .../rest-api-spec/test/mget/40_routing.yml | 3 - .../test/mget/60_realtime_refresh.yml | 3 - .../test/search.aggregation/40_range.yml | 3 - .../test/search.inner_hits/10_basic.yml | 8 +- .../search/115_multiple_field_collapsing.yml | 4 +- .../test/termvectors/10_basic.yml | 3 - .../test/update/11_shard_header.yml | 4 - .../test/update/35_if_seq_no.yml | 4 - .../rest-api-spec/test/update/40_routing.yml | 4 - .../rest-api-spec/test/update/60_refresh.yml | 12 --- 48 files changed, 10 insertions(+), 291 deletions(-) diff --git a/modules/geo/src/yamlRestTest/resources/rest-api-spec/test/geo_shape/290_geotile_grid.yml b/modules/geo/src/yamlRestTest/resources/rest-api-spec/test/geo_shape/290_geotile_grid.yml index dfd5b6c5f2583..f150564550bab 100644 --- a/modules/geo/src/yamlRestTest/resources/rest-api-spec/test/geo_shape/290_geotile_grid.yml +++ b/modules/geo/src/yamlRestTest/resources/rest-api-spec/test/geo_shape/290_geotile_grid.yml @@ -1,7 +1,4 @@ setup: - - skip: - version: " - 6.99.99" - reason: "added in 7.0.0" - do: indices.create: index: test_1 diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/20_date_range.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/20_date_range.yml index 6427a45e19f58..ed4619e060d96 100644 --- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/20_date_range.yml +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/20_date_range.yml @@ -1,83 +1,3 @@ ---- -"Create index with joda style index that is incompatible with java.time. (6.0)": - - skip: - features: "allowed_warnings" - version: "6.8.1 -" - reason: change of warning message - - do: - allowed_warnings: - - "Use of 'Y' (year-of-era) will change to 'y' in the next major version of OpenSearch. Prefix your date format with '8' to use the new specifier." - indices.create: - index: joda_for_range - body: - settings: - index: - number_of_replicas: 2 - mappings: - "properties": - "time_frame": - "type": "date_range" - "format": "YYYY-MM-dd'T'HH:mmZZ" - - - do: - bulk: - refresh: true - body: - - '{"index": {"_index": "joda_for_range"}}' - - '{"time_frame": {"gte": "2019-01-01T00:00+01:00", "lte" : "2019-03-01T00:00+01:00"}}' - - - do: - search: - rest_total_hits_as_int: true - index: joda_for_range - body: - query: - range: - time_frame: - gte: "2019-02-01T00:00+01:00" - lte: "2019-02-01T00:00+01:00" - - match: { hits.total: 1 } - ---- -"Create index with joda style index that is incompatible with java.time (>6.1)": - - skip: - features: "allowed_warnings" - version: " - 6.8.0, 7.0.0 -" - reason: change of warning message, we skip 7 becase this format will be considered java - - do: - allowed_warnings: - - "'Y' year-of-era should be replaced with 'y'. Use 'Y' for week-based-year.; 'Z' time zone offset/id fails when parsing 'Z' for Zulu timezone. Consider using 'X'. Prefix your date format with '8' to use the new specifier." - indices.create: - index: joda_for_range - body: - settings: - index: - number_of_replicas: 2 - mappings: - "properties": - "time_frame": - "type": "date_range" - "format": "YYYY-MM-dd'T'HH:mmZZ" - - - do: - bulk: - refresh: true - body: - - '{"index": {"_index": "joda_for_range"}}' - - '{"time_frame": {"gte": "2019-01-01T00:00+01:00", "lte" : "2019-03-01T00:00+01:00"}}' - - - do: - search: - rest_total_hits_as_int: true - index: joda_for_range - body: - query: - range: - time_frame: - gte: "2019-02-01T00:00+01:00" - lte: "2019-02-01T00:00+01:00" - - match: { hits.total: 1 } - --- "Create index with java style index in 6": - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yml index 39c8040993f2a..ad72592fa49b4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yml @@ -54,9 +54,6 @@ --- "Test cat thread_pool output": - - skip: - version: " - 6.99.99" - reason: this API was changed in a backwards-incompatible fashion in 7.0.0 so we need to skip in a mixed cluster - do: cat.thread_pool: {} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/10_basic.yml index 294f00bdd822b..3e56ddc587d8b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/10_basic.yml @@ -7,10 +7,6 @@ --- "get cluster state returns cluster_uuid at the top level": - - skip: - version: " - 6.3.99" - reason: "cluster state including cluster_uuid at the top level is new in v6.4.0 and higher" - - do: cluster.state: human: true diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/20_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/20_filtering.yml index b17201a911290..a2f03680cf182 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/20_filtering.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/20_filtering.yml @@ -156,8 +156,6 @@ setup: --- "Filtering the cluster state returns cluster_uuid at the top level regardless of metric filters": - skip: - version: " - 6.3.99" - reason: "cluster state including cluster_uuid at the top level is new in v6.4.0 and higher" features: allowed_warnings # Get the current cluster_uuid diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.voting_config_exclusions/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.voting_config_exclusions/10_basic.yml index 23eebacabf3f3..9390689ea53d2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.voting_config_exclusions/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.voting_config_exclusions/10_basic.yml @@ -4,10 +4,6 @@ teardown: --- "Get cluster state without voting config exclusions": - - skip: - version: " - 6.99.99" - reason: Voting config exclusions were introduced in 7.0.0 - - do: cluster.state: {} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/10_with_id.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/10_with_id.yml index 410b31acb7138..f69e3600a43d3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/10_with_id.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/create/10_with_id.yml @@ -1,8 +1,6 @@ --- "Create with ID": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 + - do: create: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/15_without_id.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/15_without_id.yml index 5280c5bb9946d..ddfb4775458c9 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/15_without_id.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/create/15_without_id.yml @@ -1,8 +1,6 @@ --- "Create without ID": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 + - do: catch: param create: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/35_external_version.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/35_external_version.yml index 47dc5b6059609..86d0d4b59e06b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/35_external_version.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/create/35_external_version.yml @@ -1,8 +1,6 @@ --- "External version": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 + - do: catch: bad_request create: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/40_routing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/40_routing.yml index e1341ac2b5380..e2e8e4a53db1e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/40_routing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/create/40_routing.yml @@ -1,8 +1,6 @@ --- "Routing": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 + - do: indices.create: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/60_refresh.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/60_refresh.yml index dd8acd9f99f4f..e348a980de685 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/60_refresh.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/create/60_refresh.yml @@ -1,8 +1,6 @@ --- "Refresh": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 + - do: indices.create: index: test_1 @@ -44,9 +42,7 @@ --- "When refresh url parameter is an empty string that means \"refresh immediately\"": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 + - do: create: index: test_1 @@ -66,9 +62,7 @@ --- "refresh=wait_for waits until changes are visible in search": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 + - do: index: index: create_60_refresh_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/10_basic.yml index 842d749d7b14d..693624b5b4d35 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/10_basic.yml @@ -1,10 +1,6 @@ --- "Basic": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: index: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/11_shard_header.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/11_shard_header.yml index 6a2f852b221c2..2ff003c51ad61 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/11_shard_header.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/11_shard_header.yml @@ -1,10 +1,6 @@ --- "Delete check shard header": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: indices.create: index: foobar diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/12_result.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/12_result.yml index 13356cd938c48..e18f56ceb74b7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/12_result.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/12_result.yml @@ -1,10 +1,6 @@ --- "Delete result field": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: index: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/20_cas.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/20_cas.yml index f3c7b0acbcccd..944c239f7ef1e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/20_cas.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/20_cas.yml @@ -1,10 +1,6 @@ --- "Internal version": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: index: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/25_external_version.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/25_external_version.yml index d7cc4fce0eda5..ba5bc891c24a5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/25_external_version.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/25_external_version.yml @@ -1,10 +1,6 @@ --- "External version": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: index: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/26_external_gte_version.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/26_external_gte_version.yml index ebe1680551c96..d03353c496ece 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/26_external_gte_version.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/26_external_gte_version.yml @@ -1,10 +1,6 @@ --- "External GTE version": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: index: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/30_routing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/30_routing.yml index c3c407cd9173a..a5cdd4a251e0c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/30_routing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/30_routing.yml @@ -1,10 +1,6 @@ --- "Routing": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: indices.create: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/50_refresh.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/50_refresh.yml index 935e0946f100b..3e54acd3c7dfb 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/50_refresh.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/50_refresh.yml @@ -1,10 +1,6 @@ --- "Refresh": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: indices.create: index: test_1 @@ -81,10 +77,6 @@ --- "When refresh url parameter is an empty string that means \"refresh immediately\"": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: index: index: test_1 @@ -118,10 +110,6 @@ --- "refresh=wait_for waits until changes are visible in search": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: index: index: delete_50_refresh_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/60_missing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/60_missing.yml index b8f81080f3ee8..cc53b81cb4540 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/60_missing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/60_missing.yml @@ -1,10 +1,6 @@ --- "Missing document with catch": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: catch: missing delete: @@ -14,10 +10,6 @@ --- "Missing document with ignore": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: delete: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/exists/40_routing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/exists/40_routing.yml index 8d59c8a0535f5..85dbeba837c25 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/exists/40_routing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/exists/40_routing.yml @@ -1,8 +1,5 @@ --- "Routing": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - do: indices.create: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/exists/60_realtime_refresh.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/exists/60_realtime_refresh.yml index e12a504349c4d..6089a19bba4b2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/exists/60_realtime_refresh.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/exists/60_realtime_refresh.yml @@ -1,8 +1,5 @@ --- "Realtime Refresh": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - do: indices.create: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/40_routing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/40_routing.yml index 7f45b39add8a7..bc876c287362a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/40_routing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/40_routing.yml @@ -1,10 +1,6 @@ --- "Routing": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: indices.create: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/50_with_headers.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/50_with_headers.yml index d79a3bd300da8..8500f0a441abd 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/50_with_headers.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/50_with_headers.yml @@ -2,8 +2,7 @@ "REST test with headers": - skip: features: ["headers", "yaml"] - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 + - do: index: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/60_realtime_refresh.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/60_realtime_refresh.yml index ef4fa60bf1b0e..0c942086010e8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/60_realtime_refresh.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/60_realtime_refresh.yml @@ -1,10 +1,6 @@ --- "Realtime Refresh": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: indices.create: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/90_versions.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/90_versions.yml index 3f45a1da09dce..93585fb762c8f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/90_versions.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/90_versions.yml @@ -1,10 +1,6 @@ --- "Versions": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: index: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/40_routing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/40_routing.yml index 6425f70f26aad..191a0411604d1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/40_routing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/40_routing.yml @@ -1,11 +1,6 @@ --- "Routing": - - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: indices.create: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/60_realtime_refresh.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/60_realtime_refresh.yml index d39b07a6ce5f7..16140cfe14ceb 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/60_realtime_refresh.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/60_realtime_refresh.yml @@ -1,10 +1,6 @@ --- "Realtime": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: indices.create: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/80_missing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/80_missing.yml index b704fc2612007..d2d2ad83bf290 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/80_missing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/80_missing.yml @@ -1,11 +1,6 @@ --- "Missing document with catch": - - skip: - features: warnings - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: catch: missing get_source: @@ -15,11 +10,6 @@ --- "Missing document with ignore": - - skip: - features: warnings - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: get_source: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/85_source_missing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/85_source_missing.yml index c214bf87d3997..8a8d23b6bb0ef 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/85_source_missing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/85_source_missing.yml @@ -1,10 +1,6 @@ --- setup: - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: indices.create: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/10_with_id.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/10_with_id.yml index 97eb9be1547ba..e716a637ecd9d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/10_with_id.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/10_with_id.yml @@ -1,10 +1,6 @@ --- "Index with ID": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: index: index: test-weird-index-中文 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/15_without_id.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/15_without_id.yml index 54f203e3621bc..80e80bf5a4845 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/15_without_id.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/15_without_id.yml @@ -1,10 +1,6 @@ --- "Index without ID": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: index: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/20_optype.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/20_optype.yml index c33a86093acab..6c1380dc1795a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/20_optype.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/20_optype.yml @@ -1,10 +1,6 @@ --- "Optype": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: index: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/35_external_version.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/35_external_version.yml index 89aaa190af384..d23be00aa08fc 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/35_external_version.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/35_external_version.yml @@ -1,10 +1,6 @@ --- "External version": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: index: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/36_external_gte_version.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/36_external_gte_version.yml index 82421227adb7f..c72bcf66f9879 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/36_external_gte_version.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/36_external_gte_version.yml @@ -1,10 +1,6 @@ --- "External GTE version": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: index: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/40_routing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/40_routing.yml index f6f497269b043..fd4faf51464a6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/40_routing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/40_routing.yml @@ -1,9 +1,6 @@ --- "Routing": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - do: indices.create: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/60_refresh.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/60_refresh.yml index e16602d7ac8b6..7faf564bd95ba 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/60_refresh.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/60_refresh.yml @@ -1,9 +1,6 @@ --- "Refresh": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - do: indices.create: @@ -48,10 +45,6 @@ --- "When refresh url parameter is an empty string that means \"refresh immediately\"": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: index: index: test_1 @@ -72,10 +65,6 @@ --- "refresh=wait_for waits until changes are visible in search": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: index: index: index_60_refresh_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml index 8d08373208216..ee631b8a6a68a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml @@ -1,10 +1,7 @@ --- "Shrink index ignores target template mapping": - skip: - version: " - 6.9.99" - reason: expects warnings that pre-7.0.0 will not send features: [warnings, arbitrary_key, allowed_warnings] - - do: nodes.info: node_id: data:true diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/40_routing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/40_routing.yml index 50bf9a158852b..db68eb867aa67 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/40_routing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/40_routing.yml @@ -1,8 +1,5 @@ --- "Routing": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - do: indices.create: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/60_realtime_refresh.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/60_realtime_refresh.yml index 3b1bfcdca556c..2146e38377a8c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/60_realtime_refresh.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/60_realtime_refresh.yml @@ -1,8 +1,5 @@ --- "Realtime Refresh": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - do: indices.create: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/40_range.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/40_range.yml index 2cc49061d53e8..9e138eb6b2ad6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/40_range.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/40_range.yml @@ -213,9 +213,6 @@ setup: --- "IP Range Key Generation": - - skip: - version: " - 6.3.99" - reason: "Before 6.4.0, ip_range did not always generate bucket keys (see #21045)." - do: search: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yml index 7b3b4e8233d0b..bd2b3af0e0856 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yml @@ -11,9 +11,7 @@ setup: --- "Nested inner hits": - - skip: - version: " - 6.1.99" - reason: "<= 6.1 nodes don't always include index or id in nested inner hits" + - do: index: index: test @@ -41,10 +39,6 @@ setup: --- "Nested doc version and seqIDs": - - skip: - version: " - 6.99.99" - reason: "Triggers warnings before 7.0" - - do: index: index: test diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/115_multiple_field_collapsing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/115_multiple_field_collapsing.yml index b10401f48dbce..0b04385112af5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/115_multiple_field_collapsing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/115_multiple_field_collapsing.yml @@ -1,8 +1,6 @@ --- "two levels fields collapsing": - - skip: - version: " - 6.99.99" - reason: using multiple field collapsing from 7.0 on + - do: indices.create: index: addresses diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/10_basic.yml index 62ec86118e5bb..ba4bf7a02ad4c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/10_basic.yml @@ -1,7 +1,4 @@ setup: - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - do: indices.create: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/11_shard_header.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/11_shard_header.yml index ffcb72027b33d..6dc5a49c5d954 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/11_shard_header.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/11_shard_header.yml @@ -1,10 +1,6 @@ --- "Update check shard header": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: indices.create: index: foobar diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/35_if_seq_no.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/35_if_seq_no.yml index c93be37be49f5..21e3afc8cc3be 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/35_if_seq_no.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/35_if_seq_no.yml @@ -1,10 +1,6 @@ --- "Update with if_seq_no": - - skip: - version: " - 6.6.99" - reason: if_seq_no was added in 6.7.0 - - do: catch: missing update: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/40_routing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/40_routing.yml index 28e42f9dafea9..659e97d005154 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/40_routing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/40_routing.yml @@ -1,10 +1,6 @@ --- "Routing": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: indices.create: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/60_refresh.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/60_refresh.yml index 3a74f75f4f11d..e55ff9138044e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/60_refresh.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/60_refresh.yml @@ -1,10 +1,6 @@ --- "Refresh": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: indices.create: index: test_1 @@ -52,10 +48,6 @@ --- "When refresh url parameter is an empty string that means \"refresh immediately\"": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: index: index: test_1 @@ -85,10 +77,6 @@ --- "refresh=wait_for waits until changes are visible in search": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: index: index: update_60_refresh_1 From ca03fdd206ba9b2f82a2476b7f23ee3ddfb929a6 Mon Sep 17 00:00:00 2001 From: Vinay Krishna Pudyodu Date: Tue, 8 Apr 2025 11:12:45 -0700 Subject: [PATCH 159/550] Added Search Only strict routing setting (#17803) * Added Search Only strict routing setting Signed-off-by: Vinay Krishna Pudyodu * Added Changelog Signed-off-by: Vinay Krishna Pudyodu --------- Signed-off-by: Vinay Krishna Pudyodu --- CHANGELOG.md | 1 + .../indices/settings/SearchOnlyReplicaIT.java | 77 +++++++++++++++++++ .../cluster/routing/OperationRouting.java | 16 +++- .../common/settings/ClusterSettings.java | 1 + .../routing/OperationRoutingTests.java | 71 +++++++++++++++++ 5 files changed, 165 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 88af9996fb2a6..052322b202589 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -26,6 +26,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add GRPC DocumentService and Bulk endpoint ([#17727](https://github.com/opensearch-project/OpenSearch/pull/17727)) - Added scale to zero (`search_only` mode) support for OpenSearch reader writer separation ([#17299](https://github.com/opensearch-project/OpenSearch/pull/17299) - [Star Tree] [Search] Resolving numeric range aggregation with metric aggregation using star-tree ([#17273](https://github.com/opensearch-project/OpenSearch/pull/17273)) +- Added Search Only strict routing setting ([#17803](https://github.com/opensearch-project/OpenSearch/pull/17803)) ### Changed - Migrate BC libs to their FIPS counterparts ([#14912](https://github.com/opensearch-project/OpenSearch/pull/14912)) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/settings/SearchOnlyReplicaIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/settings/SearchOnlyReplicaIT.java index 6e3cd8e724214..eab3d229f576d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/settings/SearchOnlyReplicaIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/settings/SearchOnlyReplicaIT.java @@ -8,6 +8,7 @@ package org.opensearch.indices.settings; +import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.WriteRequest; import org.opensearch.cluster.ClusterState; @@ -30,6 +31,8 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; import static org.opensearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.core.IsEqual.equalTo; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class SearchOnlyReplicaIT extends RemoteStoreBaseIntegTestCase { @@ -236,6 +239,80 @@ public void testSearchReplicaRoutingPreference() throws IOException { assertEquals(nodeId, indexShardRoutingTable.searchOnlyReplicas().get(0).currentNodeId()); } + public void testSearchReplicaRoutingPreferenceWhenSearchReplicaUnassigned() { + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNode(); + createIndex(TEST_INDEX, Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS, 1).build()); + ensureYellow(TEST_INDEX); + client().prepareIndex(TEST_INDEX).setId("1").setSource("foo", "bar").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + + // By default cluster.routing.search_only.strict is set as true + // When cluster.routing.search_only.strict is set as true, and no assigned search replica is available, + // search request will fail since it will route only to search replica but it's not available + Throwable throwable = assertThrows( + SearchPhaseExecutionException.class, + () -> client().prepareSearch(TEST_INDEX).setPreference(null).setQuery(QueryBuilders.matchAllQuery()).get() + ); + + assertEquals("all shards failed", throwable.getMessage()); + + // Set cluster.routing.search_only.strict as false + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put("cluster.routing.search_only.strict", false)) + .get(); + + // When cluster.routing.search_only.strict is set as false, and no assigned search replica is available; + // search request will fall back to querying writers + SearchResponse response = client().prepareSearch(TEST_INDEX).setPreference(null).setQuery(QueryBuilders.matchAllQuery()).get(); + + String nodeId = response.getHits().getAt(0).getShard().getNodeId(); + IndexShardRoutingTable indexShardRoutingTable = getIndexShardRoutingTable(); + assertEquals(nodeId, indexShardRoutingTable.primaryShard().currentNodeId()); + } + + public void testSearchReplicaRoutingPreferenceWhenSearchReplicaAssigned() { + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNode(); + createIndex(TEST_INDEX, Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS, 1).build()); + ensureYellow(TEST_INDEX); + client().prepareIndex(TEST_INDEX).setId("1").setSource("foo", "bar").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + + internalCluster().startSearchOnlyNode(); + ensureGreen(TEST_INDEX); + + // By default cluster.routing.search_only.strict is set as true + // When cluster.routing.search_only.strict is set as true, and assigned search replica is available; + // search request will succeed + SearchResponse response = client().prepareSearch(TEST_INDEX).setPreference(null).setQuery(QueryBuilders.matchAllQuery()).get(); + + String nodeId = response.getHits().getAt(0).getShard().getNodeId(); + IndexShardRoutingTable indexShardRoutingTable = getIndexShardRoutingTable(); + assertEquals(nodeId, indexShardRoutingTable.searchOnlyReplicas().get(0).currentNodeId()); + + // Set cluster.routing.search_only.strict as false + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put("cluster.routing.search_only.strict", false)) + .get(); + + // When cluster.routing.search_only.strict is set as false, and assigned search replica is available; + // search request can land on either writer or reader + response = client().prepareSearch(TEST_INDEX).setPreference(null).setQuery(QueryBuilders.matchAllQuery()).get(); + + nodeId = response.getHits().getAt(0).getShard().getNodeId(); + indexShardRoutingTable = getIndexShardRoutingTable(); + assertThat( + nodeId, + anyOf( + equalTo(indexShardRoutingTable.primaryShard().currentNodeId()), + equalTo(indexShardRoutingTable.searchOnlyReplicas().get(0).currentNodeId()) + ) + ); + } + public void testUnableToAllocateSearchReplicaWontBlockRegularReplicaAllocation() { int numSearchReplicas = 1; int numWriterReplicas = 1; diff --git a/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java b/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java index 9f23ba3a01539..bcf03ec0e75b7 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java +++ b/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java @@ -118,6 +118,13 @@ public class OperationRouting { Preference.PREFER_NODES ); + public static final Setting STRICT_SEARCH_ONLY_ROUTING_ENABLED = Setting.boolSetting( + "cluster.routing.search_only.strict", + true, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + private volatile List awarenessAttributes; private volatile boolean useAdaptiveReplicaSelection; private volatile boolean ignoreAwarenessAttr; @@ -125,6 +132,7 @@ public class OperationRouting { private volatile boolean isFailOpenEnabled; private volatile boolean isStrictWeightedShardRouting; private volatile boolean ignoreWeightedRouting; + private volatile boolean isStrictSearchOnlyShardRouting; private final boolean isReaderWriterSplitEnabled; public OperationRouting(Settings settings, ClusterSettings clusterSettings) { @@ -140,12 +148,14 @@ public OperationRouting(Settings settings, ClusterSettings clusterSettings) { this.isFailOpenEnabled = WEIGHTED_ROUTING_FAILOPEN_ENABLED.get(settings); this.isStrictWeightedShardRouting = STRICT_WEIGHTED_SHARD_ROUTING_ENABLED.get(settings); this.ignoreWeightedRouting = IGNORE_WEIGHTED_SHARD_ROUTING.get(settings); + this.isStrictSearchOnlyShardRouting = STRICT_SEARCH_ONLY_ROUTING_ENABLED.get(settings); clusterSettings.addSettingsUpdateConsumer(USE_ADAPTIVE_REPLICA_SELECTION_SETTING, this::setUseAdaptiveReplicaSelection); clusterSettings.addSettingsUpdateConsumer(IGNORE_AWARENESS_ATTRIBUTES_SETTING, this::setIgnoreAwarenessAttributes); clusterSettings.addSettingsUpdateConsumer(WEIGHTED_ROUTING_DEFAULT_WEIGHT, this::setWeightedRoutingDefaultWeight); clusterSettings.addSettingsUpdateConsumer(WEIGHTED_ROUTING_FAILOPEN_ENABLED, this::setFailOpenEnabled); clusterSettings.addSettingsUpdateConsumer(STRICT_WEIGHTED_SHARD_ROUTING_ENABLED, this::setStrictWeightedShardRouting); clusterSettings.addSettingsUpdateConsumer(IGNORE_WEIGHTED_SHARD_ROUTING, this::setIgnoreWeightedRouting); + clusterSettings.addSettingsUpdateConsumer(STRICT_SEARCH_ONLY_ROUTING_ENABLED, this::setStrictSearchOnlyShardRouting); this.isReaderWriterSplitEnabled = FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL_SETTING.get(settings); } @@ -193,6 +203,10 @@ public double getWeightedRoutingDefaultWeight() { return this.weightedRoutingDefaultWeight; } + void setStrictSearchOnlyShardRouting(boolean strictSearchOnlyShardRouting) { + this.isStrictSearchOnlyShardRouting = strictSearchOnlyShardRouting; + } + public ShardIterator indexShards(ClusterState clusterState, String index, String id, @Nullable String routing) { return shards(clusterState, index, id, routing).shardsIt(); } @@ -265,7 +279,7 @@ public GroupShardsIterator searchShards( if (isReaderWriterSplitEnabled) { if (preference == null || preference.isEmpty()) { - if (indexMetadataForShard.getNumberOfSearchOnlyReplicas() > 0) { + if (indexMetadataForShard.getNumberOfSearchOnlyReplicas() > 0 && isStrictSearchOnlyShardRouting) { preference = Preference.SEARCH_REPLICA.type(); } } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 84b50041b7c91..9d8b458d70966 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -613,6 +613,7 @@ public void apply(Settings value, Settings current, Settings previous) { OperationRouting.WEIGHTED_ROUTING_FAILOPEN_ENABLED, OperationRouting.STRICT_WEIGHTED_SHARD_ROUTING_ENABLED, OperationRouting.IGNORE_WEIGHTED_SHARD_ROUTING, + OperationRouting.STRICT_SEARCH_ONLY_ROUTING_ENABLED, IndexGraveyard.SETTING_MAX_TOMBSTONES, PersistentTasksClusterService.CLUSTER_TASKS_ALLOCATION_RECHECK_INTERVAL_SETTING, EnableAssignmentDecider.CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING, diff --git a/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java b/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java index 8cfdcce45c523..11890d561e8b7 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java @@ -1196,6 +1196,77 @@ public void testSearchReplicaDefaultRouting() throws Exception { } } + public void testSearchReplicaRoutingWhenSearchOnlyStrictSettingIsFalse() throws Exception { + final int numShards = 1; + final int numReplicas = 2; + final int numSearchReplicas = 2; + final String indexName = "test"; + final String[] indexNames = new String[] { indexName }; + + ClusterService clusterService = null; + ThreadPool threadPool = null; + + try { + OperationRouting opRouting = new OperationRouting( + Settings.builder().put(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL, "true").build(), + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + opRouting.setStrictSearchOnlyShardRouting(false); + + ClusterState state = ClusterStateCreationUtils.stateWithAssignedPrimariesAndReplicas( + indexNames, + numShards, + numReplicas, + numSearchReplicas + ); + IndexShardRoutingTable indexShardRoutingTable = state.getRoutingTable().index(indexName).getShards().get(0); + ShardId shardId = indexShardRoutingTable.searchOnlyReplicas().get(0).shardId(); + + threadPool = new TestThreadPool("testSearchReplicaDefaultRouting"); + clusterService = ClusterServiceUtils.createClusterService(threadPool); + + // add a search replica in initializing state: + DiscoveryNode node = new DiscoveryNode( + "node_initializing", + OpenSearchTestCase.buildNewFakeTransportAddress(), + Collections.emptyMap(), + new HashSet<>(DiscoveryNodeRole.BUILT_IN_ROLES), + Version.CURRENT + ); + + IndexMetadata indexMetadata = IndexMetadata.builder(indexName) + .settings(Settings.builder().put(state.metadata().index(indexName).getSettings()).build()) + .numberOfSearchReplicas(3) + .numberOfReplicas(2) + .build(); + Metadata.Builder metadataBuilder = Metadata.builder(state.metadata()).put(indexMetadata, false).generateClusterUuidIfNeeded(); + IndexRoutingTable.Builder indexShardRoutingBuilder = IndexRoutingTable.builder(indexMetadata.getIndex()); + indexShardRoutingBuilder.addIndexShard(indexShardRoutingTable); + indexShardRoutingBuilder.addShard( + TestShardRouting.newShardRouting(shardId, node.getId(), null, false, true, ShardRoutingState.INITIALIZING, null) + ); + state = ClusterState.builder(state) + .routingTable(RoutingTable.builder().add(indexShardRoutingBuilder).build()) + .metadata(metadataBuilder.build()) + .build(); + + GroupShardsIterator groupIterator = opRouting.searchShards(state, indexNames, null, null); + assertThat("one group per shard", groupIterator.size(), equalTo(numShards)); + for (ShardIterator shardIterator : groupIterator) { + assertEquals("We should have all 6 shards returned", shardIterator.size(), 6); + for (ShardRouting shardRouting : shardIterator) { + assertTrue( + "Any shard can exist with when cluster.routing.search_only.strict is set as false", + shardRouting.isSearchOnly() || shardRouting.primary() || shardRouting.isSearchOnly() == false + ); + } + } + } finally { + IOUtils.close(clusterService); + terminate(threadPool); + } + } + private DiscoveryNode[] setupNodes() { // Sets up two data nodes in zone-a and one data node in zone-b List zones = Arrays.asList("a", "a", "b"); From 1b56084c09649d1a3422c62adee4002aa8890be9 Mon Sep 17 00:00:00 2001 From: Asim M Date: Tue, 8 Apr 2025 11:29:48 -0700 Subject: [PATCH 160/550] Switch from IOContext.DEFAULT(RANDOM) to READONLY for sequential cases (#17670) * Lucene 10 changed the IOContext.DEFAULT from sequential to random, which makes sense for search use case: https://github.com/apache/lucene/pull/13244 * place we read a file only once, its better to switch to READONLY(sequential) * this should only be in cases the file is read by the same thread that opened it, e.g. it won't work for RemoteStore that does async upload --------- Signed-off-by: Asim Mahmood --- .../org/opensearch/common/settings/KeyStoreWrapper.java | 2 +- .../java/org/opensearch/gateway/MetadataStateFormat.java | 6 +++--- .../main/java/org/opensearch/index/translog/Checkpoint.java | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/server/src/main/java/org/opensearch/common/settings/KeyStoreWrapper.java b/server/src/main/java/org/opensearch/common/settings/KeyStoreWrapper.java index 81fb1309df310..d5ec37516a678 100644 --- a/server/src/main/java/org/opensearch/common/settings/KeyStoreWrapper.java +++ b/server/src/main/java/org/opensearch/common/settings/KeyStoreWrapper.java @@ -527,7 +527,7 @@ public synchronized void save(Path configDir, char[] password) throws Exception NIOFSDirectory directory = new NIOFSDirectory(configDir); // write to tmp file first, then overwrite String tmpFile = KEYSTORE_FILENAME + ".tmp"; - try (IndexOutput output = EndiannessReverserUtil.createOutput(directory, tmpFile, IOContext.DEFAULT)) { + try (IndexOutput output = EndiannessReverserUtil.createOutput(directory, tmpFile, IOContext.READONCE)) { CodecUtil.writeHeader(output, KEYSTORE_FILENAME, FORMAT_VERSION); output.writeByte(password.length == 0 ? (byte) 0 : (byte) 1); diff --git a/server/src/main/java/org/opensearch/gateway/MetadataStateFormat.java b/server/src/main/java/org/opensearch/gateway/MetadataStateFormat.java index ad47ca66129bb..cb55c35a9b7e6 100644 --- a/server/src/main/java/org/opensearch/gateway/MetadataStateFormat.java +++ b/server/src/main/java/org/opensearch/gateway/MetadataStateFormat.java @@ -120,7 +120,7 @@ private void writeStateToFirstLocation(final T state, Path stateLocation, Direct throws WriteStateException { try { deleteFileIfExists(stateLocation, stateDir, tmpFileName); - try (IndexOutput out = EndiannessReverserUtil.createOutput(stateDir, tmpFileName, IOContext.DEFAULT)) { + try (IndexOutput out = EndiannessReverserUtil.createOutput(stateDir, tmpFileName, IOContext.READONCE)) { CodecUtil.writeHeader(out, STATE_FILE_CODEC, STATE_FILE_VERSION); out.writeInt(FORMAT.index()); try (XContentBuilder builder = newXContentBuilder(FORMAT, new IndexOutputOutputStream(out) { @@ -155,7 +155,7 @@ private static void copyStateToExtraLocations(List> state Directory extraStateDir = extraStatePathAndDir.v2(); try { deleteFileIfExists(extraStateLocation, extraStateDir, tmpFileName); - extraStateDir.copyFrom(srcStateDir, tmpFileName, tmpFileName, IOContext.DEFAULT); + extraStateDir.copyFrom(srcStateDir, tmpFileName, tmpFileName, IOContext.READONCE); extraStateDir.sync(Collections.singleton(tmpFileName)); } catch (Exception e) { throw new WriteStateException(false, "failed to copy tmp state file to extra location " + extraStateLocation, e); @@ -309,7 +309,7 @@ protected XContentBuilder newXContentBuilder(XContentType type, OutputStream str */ public final T read(NamedXContentRegistry namedXContentRegistry, Path file) throws IOException { try (Directory dir = newDirectory(file.getParent())) { - try (IndexInput indexInput = EndiannessReverserUtil.openInput(dir, file.getFileName().toString(), IOContext.DEFAULT)) { + try (IndexInput indexInput = EndiannessReverserUtil.openInput(dir, file.getFileName().toString(), IOContext.READONCE)) { // We checksum the entire file before we even go and parse it. If it's corrupted we barf right here. CodecUtil.checksumEntireFile(indexInput); CodecUtil.checkHeader(indexInput, STATE_FILE_CODEC, MIN_COMPATIBLE_STATE_FILE_VERSION, STATE_FILE_VERSION); diff --git a/server/src/main/java/org/opensearch/index/translog/Checkpoint.java b/server/src/main/java/org/opensearch/index/translog/Checkpoint.java index d309564ef5d32..8d63fab3c8c31 100644 --- a/server/src/main/java/org/opensearch/index/translog/Checkpoint.java +++ b/server/src/main/java/org/opensearch/index/translog/Checkpoint.java @@ -201,7 +201,7 @@ public String toString() { public static Checkpoint read(Path path) throws IOException { try (Directory dir = new NIOFSDirectory(path.getParent())) { - try (IndexInput indexInput = dir.openInput(path.getFileName().toString(), IOContext.DEFAULT)) { + try (IndexInput indexInput = dir.openInput(path.getFileName().toString(), IOContext.READONCE)) { // We checksum the entire file before we even go and parse it. If it's corrupted we barf right here. CodecUtil.checksumEntireFile(indexInput); final int fileVersion = CodecUtil.checkHeader(indexInput, CHECKPOINT_CODEC, VERSION_LUCENE_BIG_ENDIAN, CURRENT_VERSION); From 6da0340206adc67c7ec2d7cd9e3d244ee39c4440 Mon Sep 17 00:00:00 2001 From: "Ilmar S. Habibulin" <161701313+ngr-ilmarh@users.noreply.github.com> Date: Tue, 8 Apr 2025 21:47:24 +0300 Subject: [PATCH 161/550] Update WorkerBulkByScrollTaskState.java (#17840) Signed-off-by: Ilmar S. Habibulin <161701313+ngr-ilmarh@users.noreply.github.com> --- .../opensearch/index/reindex/WorkerBulkByScrollTaskState.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/opensearch/index/reindex/WorkerBulkByScrollTaskState.java b/server/src/main/java/org/opensearch/index/reindex/WorkerBulkByScrollTaskState.java index c9661c6e00c64..49467fb86f95a 100644 --- a/server/src/main/java/org/opensearch/index/reindex/WorkerBulkByScrollTaskState.java +++ b/server/src/main/java/org/opensearch/index/reindex/WorkerBulkByScrollTaskState.java @@ -296,7 +296,6 @@ DelayedPrepareBulkRequest rethrottle(float newRequestsPerSecond) { return this; } - long remainingDelay = scheduled.getDelay(TimeUnit.NANOSECONDS); // Actually reschedule the task if (scheduled == null || false == scheduled.cancel()) { // Couldn't cancel, probably because the task has finished or been scheduled. Either way we have nothing to do here. @@ -304,6 +303,7 @@ DelayedPrepareBulkRequest rethrottle(float newRequestsPerSecond) { return this; } + long remainingDelay = scheduled.getDelay(TimeUnit.NANOSECONDS); /* Strangely enough getting here doesn't mean that you actually * cancelled the request, just that you probably did. If you stress * test it you'll find that requests sneak through. So each request From 5c7cced105b5fe3d50bfab32afe5aa242863274b Mon Sep 17 00:00:00 2001 From: shreyah963 Date: Tue, 8 Apr 2025 11:47:43 -0700 Subject: [PATCH 162/550] Add singleton optimization for DateHistogramAggregator (#17643) * Added Singleton DocValue Functionality Signed-off-by: shreyah963 * sync jvm.options Signed-off-by: shreyah963 * added override in getLeafCollector method Signed-off-by: shreyah963 * disabled jvm debug port used for testing Signed-off-by: shreyah963 * Fix code formatting issues Signed-off-by: shreyah963 * Update CHANGELOG.md Signed-off-by: shreyah963 * Restore Debug_OpenSearch.xml Signed-off-by: shreyah963 * restore Debug_OpenSearch.xml Signed-off-by: shreyah963 * Update CHANGELOG.md Co-authored-by: bowenlan-amzn Signed-off-by: shreyah963 * Move rounding calculation out of collectValue method to avoid redundant calculation in multi-valued case Signed-off-by: shreyah963 --------- Signed-off-by: shreyah963 Co-authored-by: bowenlan-amzn --- CHANGELOG.md | 1 + .../histogram/DateHistogramAggregator.java | 43 ++++++++++++++----- 2 files changed, 33 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 052322b202589..875e912f9ea19 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Changed - Migrate BC libs to their FIPS counterparts ([#14912](https://github.com/opensearch-project/OpenSearch/pull/14912)) - Increase the floor segment size to 16MB ([#17699](https://github.com/opensearch-project/OpenSearch/pull/17699)) +- Unwrap singleton DocValues in date histogram aggregation. ([#17643](https://github.com/opensearch-project/OpenSearch/pull/17643)) - Introduce 512 byte limit to search and ingest pipeline IDs ([#17786](https://github.com/opensearch-project/OpenSearch/pull/17786)) ### Dependencies diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java index d825b33a0f150..3d935100fd0d1 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java @@ -31,7 +31,9 @@ package org.opensearch.search.aggregations.bucket.histogram; +import org.apache.lucene.index.DocValues; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.util.CollectionUtil; @@ -201,13 +203,28 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCol return LeafBucketCollector.NO_OP_COLLECTOR; } - SortedNumericDocValues values = valuesSource.longValues(ctx); + final SortedNumericDocValues values = valuesSource.longValues(ctx); + final NumericDocValues singleton = DocValues.unwrapSingleton(values); + + if (singleton != null) { + // Optimized path for single-valued fields + return new LeafBucketCollectorBase(sub, values) { + @Override + public void collect(int doc, long owningBucketOrd) throws IOException { + if (singleton.advanceExact(doc)) { + long value = singleton.longValue(); + collectValue(sub, doc, owningBucketOrd, preparedRounding.round(value)); + } + } + }; + } + + // Original path for multi-valued fields return new LeafBucketCollectorBase(sub, values) { @Override public void collect(int doc, long owningBucketOrd) throws IOException { if (values.advanceExact(doc)) { int valuesCount = values.docValueCount(); - long previousRounded = Long.MIN_VALUE; for (int i = 0; i < valuesCount; ++i) { long value = values.nextValue(); @@ -216,15 +233,7 @@ public void collect(int doc, long owningBucketOrd) throws IOException { if (rounded == previousRounded) { continue; } - if (hardBounds == null || hardBounds.contain(rounded)) { - long bucketOrd = bucketOrds.add(owningBucketOrd, rounded); - if (bucketOrd < 0) { // already seen - bucketOrd = -1 - bucketOrd; - collectExistingBucket(sub, doc, bucketOrd); - } else { - collectBucket(sub, doc, bucketOrd); - } - } + collectValue(sub, doc, owningBucketOrd, rounded); previousRounded = rounded; } } @@ -232,6 +241,18 @@ public void collect(int doc, long owningBucketOrd) throws IOException { }; } + private void collectValue(LeafBucketCollector sub, int doc, long owningBucketOrd, long rounded) throws IOException { + if (hardBounds == null || hardBounds.contain(rounded)) { + long bucketOrd = bucketOrds.add(owningBucketOrd, rounded); + if (bucketOrd < 0) { // already seen + bucketOrd = -1 - bucketOrd; + collectExistingBucket(sub, doc, bucketOrd); + } else { + collectBucket(sub, doc, bucketOrd); + } + } + } + private String fetchStarTreeCalendarUnit() { if (this.rounding.unit() == null) { return null; From 01e6ed415842592071a9e7eace2f15d9a8d0ee14 Mon Sep 17 00:00:00 2001 From: Gulshan <71965388+kumargu@users.noreply.github.com> Date: Wed, 9 Apr 2025 01:03:40 +0530 Subject: [PATCH 163/550] [Java Agent] Implement protection domain caching (#17832) Signed-off-by: Gulshan Kumar --- .../secure_sm/policy/PolicyFile.java | 42 ++++++++++++------- 1 file changed, 27 insertions(+), 15 deletions(-) diff --git a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyFile.java b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyFile.java index 183ca5222b017..eaae59f35c4aa 100644 --- a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyFile.java +++ b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyFile.java @@ -33,9 +33,12 @@ import java.util.ArrayList; import java.util.Enumeration; import java.util.List; +import java.util.Map; import java.util.Optional; import java.util.PropertyPermission; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Function; @SuppressWarnings("removal") public class PolicyFile extends java.security.Policy { @@ -62,16 +65,17 @@ public PolicyFile(URL url) { } private PolicyInfo init(URL policy) throws PolicyInitializationException { - PolicyInfo info = new PolicyInfo(); + List entries = new ArrayList<>(); try (InputStreamReader reader = new InputStreamReader(getInputStream(policy), StandardCharsets.UTF_8)) { List grantEntries = PolicyParser.read(reader); for (GrantEntry grantEntry : grantEntries) { - addGrantEntry(grantEntry, info); + addGrantEntry(grantEntry, entries); } } catch (Exception e) { throw new PolicyInitializationException("Failed to load policy from: " + policy, e); } - return info; + + return new PolicyInfo(entries); } public static InputStream getInputStream(URL url) throws IOException { @@ -94,32 +98,30 @@ private CodeSource getCodeSource(GrantEntry grantEntry) throws PolicyInitializat } } - private void addGrantEntry(GrantEntry grantEntry, PolicyInfo newInfo) throws PolicyInitializationException { + private void addGrantEntry(GrantEntry grantEntry, List entries) throws PolicyInitializationException { CodeSource codesource = getCodeSource(grantEntry); if (codesource == null) { throw new PolicyInitializationException("Null CodeSource for: " + grantEntry.codeBase()); } List permissions = new ArrayList<>(); - List permissionList = grantEntry.permissionEntries(); - for (PermissionEntry pe : permissionList) { + for (PermissionEntry pe : grantEntry.permissionEntries()) { final PermissionEntry expandedEntry = expandPermissionName(pe); try { Optional perm = getInstance(expandedEntry.permission(), expandedEntry.name(), expandedEntry.action()); - if (perm.isPresent()) { - permissions.add(perm.get()); - } + perm.ifPresent(permissions::add); } catch (ClassNotFoundException e) { // these were mostly custom permission classes added for security // manager. Since security manager is deprecated, we can skip these // permissions classes. if (PERM_CLASSES_TO_SKIP.contains(pe.permission())) { - continue; // skip this permission + continue; } throw new PolicyInitializationException("Permission class not found: " + pe.permission(), e); } } - newInfo.policyEntries.add(new PolicyEntry(codesource, permissions)); + + entries.add(new PolicyEntry(codesource, permissions)); } private static PermissionEntry expandPermissionName(PermissionEntry pe) { @@ -180,7 +182,11 @@ public void refresh() { @Override public boolean implies(ProtectionDomain pd, Permission p) { - PermissionCollection pc = getPermissions(pd); + if (pd == null || p == null) { + return false; + } + + PermissionCollection pc = policyInfo.getOrCompute(pd, this::getPermissions); return pc != null && pc.implies(p); } @@ -307,10 +313,16 @@ public String toString() { } private static class PolicyInfo { - final List policyEntries; + private final List policyEntries; + private final Map pdMapping; + + PolicyInfo(List entries) { + this.policyEntries = List.copyOf(entries); // an immutable copy for thread safety. + this.pdMapping = new ConcurrentHashMap<>(); + } - PolicyInfo() { - policyEntries = new ArrayList<>(); + public PermissionCollection getOrCompute(ProtectionDomain pd, Function computeFn) { + return pdMapping.computeIfAbsent(pd, k -> computeFn.apply(k)); } } From 374ad774dc8f443f400e0f059e83d672066c8a33 Mon Sep 17 00:00:00 2001 From: Prudhvi Godithi Date: Tue, 8 Apr 2025 12:34:52 -0700 Subject: [PATCH 164/550] Support `_close` and `_open` of an index in `search_only` mode (#17844) --- .../metadata/MetadataIndexStateServiceIT.java | 128 ++++++++++++++++++ .../action/support/ActiveShardCount.java | 7 +- .../metadata/MetadataIndexStateService.java | 2 +- .../cluster/routing/IndexRoutingTable.java | 2 +- 4 files changed, 135 insertions(+), 4 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/opensearch/cluster/metadata/MetadataIndexStateServiceIT.java diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/MetadataIndexStateServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/MetadataIndexStateServiceIT.java new file mode 100644 index 0000000000000..026ee43454c07 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/MetadataIndexStateServiceIT.java @@ -0,0 +1,128 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.metadata; + +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.action.admin.indices.open.OpenIndexResponse; +import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.support.WriteRequest; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.concurrent.TimeUnit; + +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class MetadataIndexStateServiceIT extends RemoteStoreBaseIntegTestCase { + + private static final String TEST_INDEX = "test_open_close_index"; + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL, Boolean.TRUE).build(); + } + + public void testIndexCloseAndOpen() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNodes(2); + + Settings specificSettings = Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 1).build(); + + createIndex(TEST_INDEX, specificSettings); + ensureGreen(TEST_INDEX); + + for (int i = 0; i < 10; i++) { + client().prepareIndex(TEST_INDEX) + .setId(Integer.toString(i)) + .setSource("field1", "value" + i) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + } + + assertAcked(client().admin().indices().prepareClose(TEST_INDEX).get()); + + ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get(); + IndexMetadata indexMetadata = clusterStateResponse.getState().metadata().index(TEST_INDEX); + assertEquals(IndexMetadata.State.CLOSE, indexMetadata.getState()); + + OpenIndexResponse openIndexResponse = client().admin().indices().prepareOpen(TEST_INDEX).get(); + + assertTrue("Open operation should be acknowledged", openIndexResponse.isAcknowledged()); + assertTrue("Open operation shards should be acknowledged", openIndexResponse.isShardsAcknowledged()); + + clusterStateResponse = client().admin().cluster().prepareState().get(); + indexMetadata = clusterStateResponse.getState().metadata().index(TEST_INDEX); + assertEquals(IndexMetadata.State.OPEN, indexMetadata.getState()); + + assertBusy(() -> { + SearchResponse searchResponse = client().prepareSearch(TEST_INDEX).get(); + assertHitCount(searchResponse, 10); + }, 30, TimeUnit.SECONDS); + } + + public void testIndexCloseAndOpenWithSearchOnlyMode() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNodes(2); + internalCluster().startSearchOnlyNodes(1); + + Settings specificSettings = Settings.builder() + .put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_NUMBER_OF_REPLICAS, 1) + .put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 1) + .build(); + + createIndex(TEST_INDEX, specificSettings); + ensureGreen(TEST_INDEX); + + for (int i = 0; i < 10; i++) { + client().prepareIndex(TEST_INDEX) + .setId(Integer.toString(i)) + .setSource("field1", "value" + i) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + } + + assertAcked(client().admin().indices().prepareScaleSearchOnly(TEST_INDEX, true).get()); + ensureGreen(TEST_INDEX); + + GetSettingsResponse settingsResponse = client().admin().indices().prepareGetSettings(TEST_INDEX).get(); + assertTrue(settingsResponse.getSetting(TEST_INDEX, IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey()).equals("true")); + + assertAcked(client().admin().indices().prepareClose(TEST_INDEX).get()); + + ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get(); + IndexMetadata indexMetadata = clusterStateResponse.getState().metadata().index(TEST_INDEX); + assertEquals(IndexMetadata.State.CLOSE, indexMetadata.getState()); + + OpenIndexResponse openIndexResponse = client().admin().indices().prepareOpen(TEST_INDEX).get(); + + assertTrue("Open operation should be acknowledged", openIndexResponse.isAcknowledged()); + assertTrue("Open operation shards should be acknowledged", openIndexResponse.isShardsAcknowledged()); + + clusterStateResponse = client().admin().cluster().prepareState().get(); + indexMetadata = clusterStateResponse.getState().metadata().index(TEST_INDEX); + assertEquals(IndexMetadata.State.OPEN, indexMetadata.getState()); + + settingsResponse = client().admin().indices().prepareGetSettings(TEST_INDEX).get(); + assertTrue(settingsResponse.getSetting(TEST_INDEX, IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey()).equals("true")); + + assertBusy(() -> { + SearchResponse searchResponse = client().prepareSearch(TEST_INDEX).get(); + assertHitCount(searchResponse, 10); + }, 30, TimeUnit.SECONDS); + } +} diff --git a/server/src/main/java/org/opensearch/action/support/ActiveShardCount.java b/server/src/main/java/org/opensearch/action/support/ActiveShardCount.java index e91342a7ce4b8..ad4d99929cbfd 100644 --- a/server/src/main/java/org/opensearch/action/support/ActiveShardCount.java +++ b/server/src/main/java/org/opensearch/action/support/ActiveShardCount.java @@ -178,9 +178,12 @@ public boolean enoughShardsActive(final ClusterState clusterState, final String. continue; } assert indexRoutingTable != null; + if (indexRoutingTable.allPrimaryShardsActive() == false) { - // all primary shards aren't active yet - return false; + if (indexMetadata.getSettings().getAsBoolean(IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey(), false) == false) { + // all primary shards aren't active yet + return false; + } } ActiveShardCount waitForActiveShards = this; if (waitForActiveShards == ActiveShardCount.DEFAULT) { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java index f0a74653f22ce..5edee81ad0ec5 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java @@ -675,7 +675,7 @@ private void sendVerifyShardBeforeCloseRequest( final ActionListener listener ) { final ShardId shardId = shardRoutingTable.shardId(); - if (shardRoutingTable.primaryShard().unassigned()) { + if (shardRoutingTable.primaryShard() == null || shardRoutingTable.primaryShard().unassigned()) { logger.debug("primary shard {} is unassigned, ignoring", shardId); final ReplicationResponse response = new ReplicationResponse(); response.setShardInfo(new ReplicationResponse.ShardInfo(shardRoutingTable.size(), shardRoutingTable.size())); diff --git a/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java index d1bb689672cca..971c8ef95282e 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java +++ b/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java @@ -275,7 +275,7 @@ public boolean allPrimaryShardsActive() { public int primaryShardsActive() { int counter = 0; for (IndexShardRoutingTable shardRoutingTable : this) { - if (shardRoutingTable.primaryShard().active()) { + if (shardRoutingTable.primaryShard() != null && shardRoutingTable.primaryShard().active()) { counter++; } } From 6f2bc8ed91b001c8a8536ef1214eab3f66e591b5 Mon Sep 17 00:00:00 2001 From: "Ilmar S. Habibulin" <161701313+ngr-ilmarh@users.noreply.github.com> Date: Tue, 8 Apr 2025 22:44:38 +0300 Subject: [PATCH 165/550] Update StartupException.java (#17838) Signed-off-by: Ilmar S. Habibulin <161701313+ngr-ilmarh@users.noreply.github.com> --- .../java/org/opensearch/bootstrap/StartupException.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/opensearch/bootstrap/StartupException.java b/server/src/main/java/org/opensearch/bootstrap/StartupException.java index 9ad89ab518006..5e40b47e4fa02 100644 --- a/server/src/main/java/org/opensearch/bootstrap/StartupException.java +++ b/server/src/main/java/org/opensearch/bootstrap/StartupException.java @@ -85,10 +85,10 @@ private void printStackTrace(Consumer consumer) { cause = getFirstGuiceCause((CreationException) cause); } - String message = cause.toString(); - consumer.accept(message); - if (cause != null) { + String message = cause.toString(); + consumer.accept(message); + // walk to the root cause while (cause.getCause() != null) { cause = cause.getCause(); From 427f37b925e221187748342c6b899fbb26368438 Mon Sep 17 00:00:00 2001 From: Prudhvi Godithi Date: Tue, 8 Apr 2025 13:20:33 -0700 Subject: [PATCH 166/550] Fix ScaleIndexIT flaky test (#17848) Signed-off-by: Prudhvi Godithi --- .../admin/indices/scale/searchonly/ScaleIndexIT.java | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexIT.java index 3e22084db96d8..06c6c6074fc47 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexIT.java @@ -148,11 +148,7 @@ public void testFullSearchOnlyReplicasFullLifecycle() throws Exception { // Test cluster health when one search replica is down internalCluster().stopRandomNode(InternalTestCluster.nameFilter(findNodesWithSearchOnlyReplicas()[0])); - assertEquals( - "Index health should be YELLOW with one search replica down", - "YELLOW", - client().admin().cluster().prepareHealth(TEST_INDEX).get().getStatus().name() - ); + ensureYellow(TEST_INDEX); // Start a replacement search node and wait for recovery internalCluster().startSearchOnlyNode(); @@ -332,14 +328,12 @@ private String[] findNodesWithSearchOnlyReplicas() { ClusterState state = client().admin().cluster().prepareState().get().getState(); IndexRoutingTable indexRoutingTable = state.routingTable().index(TEST_INDEX); - // Use a set to avoid duplicates if multiple shards are on the same node Set nodeNames = new HashSet<>(); for (IndexShardRoutingTable shardTable : indexRoutingTable) { for (ShardRouting searchReplica : shardTable.searchOnlyReplicas()) { if (searchReplica.active()) { - String nodeId = searchReplica.currentNodeId(); - nodeNames.add(state.nodes().get(nodeId).getName()); + nodeNames.add(state.nodes().get(searchReplica.currentNodeId()).getName()); } } } From 0d86ac128e0fe81038ed0b57c74425a41736ce1e Mon Sep 17 00:00:00 2001 From: Yupeng Fu Date: Tue, 8 Apr 2025 13:40:49 -0700 Subject: [PATCH 167/550] Add a builder to ingestion source (#17814) Signed-off-by: Yupeng Fu --- .../cluster/metadata/IndexMetadata.java | 5 +- .../cluster/metadata/IngestionSource.java | 60 +++++++++++++++++-- .../metadata/IngestionSourceTests.java | 40 ++++++++++--- 3 files changed, 91 insertions(+), 14 deletions(-) diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index 8782e06e68419..7cdfba563f25f 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -1047,7 +1047,10 @@ public IngestionSource getIngestionSource() { final IngestionErrorStrategy.ErrorStrategy errorStrategy = INGESTION_SOURCE_ERROR_STRATEGY_SETTING.get(settings); final Map ingestionSourceParams = INGESTION_SOURCE_PARAMS_SETTING.getAsMap(settings); - return new IngestionSource(ingestionSourceType, pointerInitReset, errorStrategy, ingestionSourceParams); + return new IngestionSource.Builder(ingestionSourceType).setParams(ingestionSourceParams) + .setPointerInitReset(pointerInitReset) + .setErrorStrategy(errorStrategy) + .build(); } return null; } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IngestionSource.java b/server/src/main/java/org/opensearch/cluster/metadata/IngestionSource.java index fd28acf3246ad..b8ffa890ce519 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IngestionSource.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IngestionSource.java @@ -12,6 +12,7 @@ import org.opensearch.indices.pollingingest.IngestionErrorStrategy; import org.opensearch.indices.pollingingest.StreamPoller; +import java.util.HashMap; import java.util.Map; import java.util.Objects; @@ -20,12 +21,12 @@ */ @ExperimentalApi public class IngestionSource { - private String type; - private PointerInitReset pointerInitReset; - private IngestionErrorStrategy.ErrorStrategy errorStrategy; - private Map params; + private final String type; + private final PointerInitReset pointerInitReset; + private final IngestionErrorStrategy.ErrorStrategy errorStrategy; + private final Map params; - public IngestionSource( + private IngestionSource( String type, PointerInitReset pointerInitReset, IngestionErrorStrategy.ErrorStrategy errorStrategy, @@ -125,4 +126,53 @@ public String toString() { return "PointerInitReset{" + "type='" + type + '\'' + ", value=" + value + '}'; } } + + /** + * Builder for {@link IngestionSource}. + * + */ + @ExperimentalApi + public static class Builder { + private String type; + private PointerInitReset pointerInitReset; + private IngestionErrorStrategy.ErrorStrategy errorStrategy; + private Map params; + + public Builder(String type) { + this.type = type; + this.params = new HashMap<>(); + } + + public Builder(IngestionSource ingestionSource) { + this.type = ingestionSource.type; + this.pointerInitReset = ingestionSource.pointerInitReset; + this.errorStrategy = ingestionSource.errorStrategy; + this.params = ingestionSource.params; + } + + public Builder setPointerInitReset(PointerInitReset pointerInitReset) { + this.pointerInitReset = pointerInitReset; + return this; + } + + public Builder setErrorStrategy(IngestionErrorStrategy.ErrorStrategy errorStrategy) { + this.errorStrategy = errorStrategy; + return this; + } + + public Builder setParams(Map params) { + this.params = params; + return this; + } + + public Builder addParam(String key, Object value) { + this.params.put(key, value); + return this; + } + + public IngestionSource build() { + return new IngestionSource(type, pointerInitReset, errorStrategy, params); + } + + } } diff --git a/server/src/test/java/org/opensearch/cluster/metadata/IngestionSourceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/IngestionSourceTests.java index 05037f33c3965..1e24c5f7df4a0 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/IngestionSourceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/IngestionSourceTests.java @@ -26,7 +26,10 @@ public class IngestionSourceTests extends OpenSearchTestCase { public void testConstructorAndGetters() { Map params = new HashMap<>(); params.put("key", "value"); - IngestionSource source = new IngestionSource("type", pointerInitReset, DROP, params); + IngestionSource source = new IngestionSource.Builder("type").setParams(params) + .setPointerInitReset(pointerInitReset) + .setErrorStrategy(DROP) + .build(); assertEquals("type", source.getType()); assertEquals(StreamPoller.ResetState.REWIND_BY_OFFSET, source.getPointerInitReset().getType()); @@ -38,36 +41,57 @@ public void testConstructorAndGetters() { public void testEquals() { Map params1 = new HashMap<>(); params1.put("key", "value"); - IngestionSource source1 = new IngestionSource("type", pointerInitReset, DROP, params1); + IngestionSource source1 = new IngestionSource.Builder("type").setParams(params1) + .setPointerInitReset(pointerInitReset) + .setErrorStrategy(DROP) + .build(); Map params2 = new HashMap<>(); params2.put("key", "value"); - IngestionSource source2 = new IngestionSource("type", pointerInitReset, DROP, params2); + IngestionSource source2 = new IngestionSource.Builder("type").setParams(params2) + .setPointerInitReset(pointerInitReset) + .setErrorStrategy(DROP) + .build(); assertTrue(source1.equals(source2)); assertTrue(source2.equals(source1)); - IngestionSource source3 = new IngestionSource("differentType", pointerInitReset, DROP, params1); + IngestionSource source3 = new IngestionSource.Builder("differentType").setParams(params1) + .setPointerInitReset(pointerInitReset) + .setErrorStrategy(DROP) + .build(); assertFalse(source1.equals(source3)); } public void testHashCode() { Map params1 = new HashMap<>(); params1.put("key", "value"); - IngestionSource source1 = new IngestionSource("type", pointerInitReset, DROP, params1); + IngestionSource source1 = new IngestionSource.Builder("type").setParams(params1) + .setPointerInitReset(pointerInitReset) + .setErrorStrategy(DROP) + .build(); Map params2 = new HashMap<>(); params2.put("key", "value"); - IngestionSource source2 = new IngestionSource("type", pointerInitReset, DROP, params2); + IngestionSource source2 = new IngestionSource.Builder("type").setParams(params2) + .setPointerInitReset(pointerInitReset) + .setErrorStrategy(DROP) + .build(); assertEquals(source1.hashCode(), source2.hashCode()); - IngestionSource source3 = new IngestionSource("differentType", pointerInitReset, DROP, params1); + IngestionSource source3 = new IngestionSource.Builder("differentType").setParams(params1) + .setPointerInitReset(pointerInitReset) + .setErrorStrategy(DROP) + .build(); assertNotEquals(source1.hashCode(), source3.hashCode()); } public void testToString() { Map params = new HashMap<>(); params.put("key", "value"); - IngestionSource source = new IngestionSource("type", pointerInitReset, DROP, params); + IngestionSource source = new IngestionSource.Builder("type").setParams(params) + .setPointerInitReset(pointerInitReset) + .setErrorStrategy(DROP) + .build(); String expected = "IngestionSource{type='type',pointer_init_reset='PointerInitReset{type='REWIND_BY_OFFSET', value=1000}',error_strategy='DROP', params={key=value}}"; assertEquals(expected, source.toString()); From 38231693572bd00363db1b0221a0bc632c4ceef2 Mon Sep 17 00:00:00 2001 From: shreyah963 Date: Tue, 8 Apr 2025 14:31:17 -0700 Subject: [PATCH 168/550] [Aggregations] Optimize singleton handling in GlobalOrdinalValuesSource (#17740) * added singleton optimization path to globalordinalvaluesource Signed-off-by: shreyah963 * enabled remote debugging Signed-off-by: shreyah963 * Removed the minimum cap to handle larger ordinal values Signed-off-by: shreyah963 * emove redundant singleton optimization state from SingleDimensionValuesSource and simplify the optimization logic in GlobalOrdinalValuesSource. The singleton optimization is now only applied when DocValues.unwrapSingleton() succeeds, preventing array index out of bounds errors with high cardinality fields. Signed-off-by: shreyah963 * removed redundant initialization Signed-off-by: shreyah963 * reverted the array allocation in the constructer to its original form Signed-off-by: shreyah963 * [Docs] Add detailed comments to GlobalOrdinalValuesSource collector Signed-off-by: shreyah963 * Remote redundant imports and disable remote debugging Signed-off-by: shreyah963 * replaced wildcard import with only necessary imports Signed-off-by: shreyah963 * Update CHANGELOG.md Signed-off-by: shreyah963 * Update CHANGELOG.md Co-authored-by: bowenlan-amzn Signed-off-by: shreyah963 * Remove redundant comments from GlobalOrdinalValuesSource Signed-off-by: shreyah963 --------- Signed-off-by: shreyah963 Signed-off-by: Ankit Jain Co-authored-by: bowenlan-amzn Co-authored-by: Ankit Jain --- CHANGELOG.md | 1 + .../composite/GlobalOrdinalValuesSource.java | 22 +++++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 875e912f9ea19..db706c7b6e641 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Changed - Migrate BC libs to their FIPS counterparts ([#14912](https://github.com/opensearch-project/OpenSearch/pull/14912)) - Increase the floor segment size to 16MB ([#17699](https://github.com/opensearch-project/OpenSearch/pull/17699)) +- Unwrap singleton DocValues in global ordinal value source of composite histogram aggregation ([#17740](https://github.com/opensearch-project/OpenSearch/pull/17740)) - Unwrap singleton DocValues in date histogram aggregation. ([#17643](https://github.com/opensearch-project/OpenSearch/pull/17643)) - Introduce 512 byte limit to search and ingest pipeline IDs ([#17786](https://github.com/opensearch-project/OpenSearch/pull/17786)) diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/GlobalOrdinalValuesSource.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/GlobalOrdinalValuesSource.java index 227dce543cfe9..ad1116d842360 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/GlobalOrdinalValuesSource.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/GlobalOrdinalValuesSource.java @@ -32,8 +32,10 @@ package org.opensearch.search.aggregations.bucket.composite; +import org.apache.lucene.index.DocValues; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; @@ -171,6 +173,26 @@ LeafBucketCollector getLeafCollector(LeafReaderContext context, LeafBucketCollec if (lookup == null) { initLookup(dvs); } + + // unwrapSingleton() returns non-null only if the field is single-valued + final SortedDocValues singleton = DocValues.unwrapSingleton(dvs); + + // Direct ordinal access for single-valued fields + if (singleton != null) { + return new LeafBucketCollector() { + @Override + public void collect(int doc, long bucket) throws IOException { + if (singleton.advanceExact(doc)) { + currentValue = singleton.ordValue(); + next.collect(doc, bucket); + } else if (missingBucket) { + currentValue = -1; + next.collect(doc, bucket); + } + } + }; + } + return new LeafBucketCollector() { @Override public void collect(int doc, long bucket) throws IOException { From 5ec6e9c7f6fce3cddbe78e1696553494cb52680c Mon Sep 17 00:00:00 2001 From: Yupeng Fu Date: Tue, 8 Apr 2025 16:41:06 -0700 Subject: [PATCH 169/550] [Pull-based Ingestion] disable push-API for indexing in ingestionEngine (#17768) --------- Signed-off-by: Yupeng Fu --- CHANGELOG.md | 1 + .../opensearch/OpenSearchServerException.java | 8 +++++ .../index/engine/IngestionEngine.java | 12 ++++++- .../engine/IngestionEngineException.java | 36 +++++++++++++++++++ .../MessageProcessorRunnable.java | 2 +- .../ExceptionSerializationTests.java | 2 ++ .../index/engine/IngestionEngineTests.java | 9 +++++ 7 files changed, 68 insertions(+), 2 deletions(-) create mode 100644 server/src/main/java/org/opensearch/index/engine/IngestionEngineException.java diff --git a/CHANGELOG.md b/CHANGELOG.md index db706c7b6e641..1b8d0e3b4405c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Added scale to zero (`search_only` mode) support for OpenSearch reader writer separation ([#17299](https://github.com/opensearch-project/OpenSearch/pull/17299) - [Star Tree] [Search] Resolving numeric range aggregation with metric aggregation using star-tree ([#17273](https://github.com/opensearch-project/OpenSearch/pull/17273)) - Added Search Only strict routing setting ([#17803](https://github.com/opensearch-project/OpenSearch/pull/17803)) +- Disable the index API for ingestion engine ([#17768](https://github.com/opensearch-project/OpenSearch/pull/17768)) ### Changed - Migrate BC libs to their FIPS counterparts ([#14912](https://github.com/opensearch-project/OpenSearch/pull/14912)) diff --git a/server/src/main/java/org/opensearch/OpenSearchServerException.java b/server/src/main/java/org/opensearch/OpenSearchServerException.java index 695fe4dbac767..247a23dc4bd57 100644 --- a/server/src/main/java/org/opensearch/OpenSearchServerException.java +++ b/server/src/main/java/org/opensearch/OpenSearchServerException.java @@ -1224,5 +1224,13 @@ public static void registerExceptions() { V_3_0_0 ) ); + registerExceptionHandle( + new OpenSearchExceptionHandle( + org.opensearch.index.engine.IngestionEngineException.class, + org.opensearch.index.engine.IngestionEngineException::new, + 176, + V_3_0_0 + ) + ); } } diff --git a/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java b/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java index 1d5d104394558..4839b9ceb463b 100644 --- a/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java @@ -145,6 +145,16 @@ protected Set fetchPersistedOffsets(DirectoryReader direc @Override public IndexResult index(Index index) throws IOException { + throw new IngestionEngineException("push-based indexing is not supported in ingestion engine, use streaming source instead"); + } + + /** + * Indexes the document into the engine. This is used internally by the stream poller only. + * @param index the index request + * @return the index result + * @throws IOException if an error occurs + */ + public IndexResult indexInternal(Index index) throws IOException { assert Objects.equals(index.uid().field(), IdFieldMapper.NAME) : index.uid().field(); ensureOpen(); final IndexResult indexResult; @@ -168,7 +178,7 @@ private void addDocs(final List docs, final IndexWriter i @Override public DeleteResult delete(Delete delete) throws IOException { - return null; + throw new IngestionEngineException("push-based deletion is not supported in ingestion engine, use streaming source instead"); } @Override diff --git a/server/src/main/java/org/opensearch/index/engine/IngestionEngineException.java b/server/src/main/java/org/opensearch/index/engine/IngestionEngineException.java new file mode 100644 index 0000000000000..9bf53d35bacf6 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/engine/IngestionEngineException.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.engine; + +import org.opensearch.OpenSearchException; +import org.opensearch.OpenSearchWrapperException; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.rest.RestStatus; + +import java.io.IOException; + +/** + * Exception thrown when there is an error in the ingestion engine. + * + * @opensearch.internal + */ +public class IngestionEngineException extends OpenSearchException implements OpenSearchWrapperException { + public IngestionEngineException(String message) { + super(message); + } + + public IngestionEngineException(StreamInput in) throws IOException { + super(in); + } + + @Override + public RestStatus status() { + return RestStatus.BAD_REQUEST; + } +} diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/MessageProcessorRunnable.java b/server/src/main/java/org/opensearch/indices/pollingingest/MessageProcessorRunnable.java index 2066f348243b8..23aa1a043d774 100644 --- a/server/src/main/java/org/opensearch/indices/pollingingest/MessageProcessorRunnable.java +++ b/server/src/main/java/org/opensearch/indices/pollingingest/MessageProcessorRunnable.java @@ -119,7 +119,7 @@ protected void process(Message message, IngestionShardPointer pointer) { Engine.Operation operation = getOperation(payload, pointer); switch (operation.operationType()) { case INDEX: - engine.index((Engine.Index) operation); + engine.indexInternal((Engine.Index) operation); break; case DELETE: engine.delete((Engine.Delete) operation); diff --git a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java index dd55abb65d19f..9773a0dcd16a0 100644 --- a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java @@ -87,6 +87,7 @@ import org.opensearch.core.xcontent.XContentLocation; import org.opensearch.crypto.CryptoRegistryException; import org.opensearch.env.ShardLockObtainFailedException; +import org.opensearch.index.engine.IngestionEngineException; import org.opensearch.index.engine.RecoveryEngineException; import org.opensearch.index.query.QueryShardException; import org.opensearch.index.seqno.RetentionLeaseAlreadyExistsException; @@ -896,6 +897,7 @@ public void testIds() { ids.put(173, ViewAlreadyExistsException.class); ids.put(174, InvalidIndexContextException.class); ids.put(175, ResponseLimitBreachedException.class); + ids.put(176, IngestionEngineException.class); ids.put(10001, IndexCreateBlockException.class); Map, Integer> reverse = new HashMap<>(); diff --git a/server/src/test/java/org/opensearch/index/engine/IngestionEngineTests.java b/server/src/test/java/org/opensearch/index/engine/IngestionEngineTests.java index 8f84f59cfbccc..d8c5ebb16a36a 100644 --- a/server/src/test/java/org/opensearch/index/engine/IngestionEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/IngestionEngineTests.java @@ -35,6 +35,8 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; +import org.mockito.Mockito; + import static org.awaitility.Awaitility.await; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doThrow; @@ -128,6 +130,13 @@ public void testRecovery() throws IOException { waitForResults(ingestionEngine, 4); } + public void testPushAPIFailures() { + Engine.Index indexMock = Mockito.mock(Engine.Index.class); + assertThrows(IngestionEngineException.class, () -> ingestionEngine.index(indexMock)); + Engine.Delete deleteMock = Mockito.mock(Engine.Delete.class); + assertThrows(IngestionEngineException.class, () -> ingestionEngine.delete(deleteMock)); + } + public void testCreationFailure() throws IOException { final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); FakeIngestionSource.FakeIngestionConsumerFactory consumerFactory = new FakeIngestionSource.FakeIngestionConsumerFactory(messages); From 027551e9a1c487e8ff0ad2f96c8258144e6fe6b9 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Tue, 8 Apr 2025 20:16:06 -0400 Subject: [PATCH 170/550] Add the support of trusted file systems to AgentPolicy (#17852) Signed-off-by: Andriy Redko --- .../opensearch/javaagent/FileInterceptor.java | 19 ++++++++++++++----- .../opensearch/javaagent/AgentTestCase.java | 6 +++++- .../javaagent/bootstrap/AgentPolicy.java | 15 ++++++++++++++- 3 files changed, 33 insertions(+), 7 deletions(-) diff --git a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/FileInterceptor.java b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/FileInterceptor.java index fb3d66d4524cf..e1ceddee23ca4 100644 --- a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/FileInterceptor.java +++ b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/FileInterceptor.java @@ -16,6 +16,7 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.nio.file.StandardOpenOption; +import java.nio.file.spi.FileSystemProvider; import java.security.Policy; import java.security.ProtectionDomain; import java.util.Collection; @@ -46,17 +47,23 @@ public static void intercept(@Advice.AllArguments Object[] args, @Advice.Origin return; /* noop */ } + FileSystemProvider provider = null; String filePath = null; if (args.length > 0 && args[0] instanceof String pathStr) { filePath = Paths.get(pathStr).toAbsolutePath().toString(); } else if (args.length > 0 && args[0] instanceof Path path) { filePath = path.toAbsolutePath().toString(); + provider = path.getFileSystem().provider(); } if (filePath == null) { return; // No valid file path found } + if (provider != null && AgentPolicy.isTrustedFileSystem(provider.getScheme()) == true) { + return; + } + final StackWalker walker = StackWalker.getInstance(StackWalker.Option.RETAIN_CLASS_REFERENCE); final Collection callers = walker.walk(StackCallerProtectionDomainChainExtractor.INSTANCE); @@ -66,7 +73,7 @@ public static void intercept(@Advice.AllArguments Object[] args, @Advice.Origin String targetFilePath = null; if (isMutating == false && isDelete == false) { - if (name.equals("newByteChannel") == true) { + if (name.equals("newByteChannel") == true || name.equals("open") == true) { if (args.length > 1 && args[1] instanceof OpenOption[] opts) { for (final OpenOption opt : opts) { if (opt != StandardOpenOption.READ) { @@ -89,20 +96,22 @@ public static void intercept(@Advice.AllArguments Object[] args, @Advice.Origin for (final ProtectionDomain domain : callers) { // Handle FileChannel.open() separately to check read/write permissions properly if (method.getName().equals("open")) { - if (!policy.implies(domain, new FilePermission(filePath, "read,write"))) { - throw new SecurityException("Denied OPEN access to file: " + filePath + ", domain: " + domain); + if (isMutating == true && !policy.implies(domain, new FilePermission(filePath, "read,write"))) { + throw new SecurityException("Denied OPEN (read/write) access to file: " + filePath + ", domain: " + domain); + } else if (!policy.implies(domain, new FilePermission(filePath, "read"))) { + throw new SecurityException("Denied OPEN (read) access to file: " + filePath + ", domain: " + domain); } } // Handle Files.copy() separately to check read/write permissions properly if (method.getName().equals("copy")) { if (!policy.implies(domain, new FilePermission(filePath, "read"))) { - throw new SecurityException("Denied OPEN access to file: " + filePath + ", domain: " + domain); + throw new SecurityException("Denied COPY (read) access to file: " + filePath + ", domain: " + domain); } if (targetFilePath != null) { if (!policy.implies(domain, new FilePermission(targetFilePath, "write"))) { - throw new SecurityException("Denied OPEN access to file: " + targetFilePath + ", domain: " + domain); + throw new SecurityException("Denied COPY (write) access to file: " + targetFilePath + ", domain: " + domain); } } } diff --git a/libs/agent-sm/agent/src/test/java/org/opensearch/javaagent/AgentTestCase.java b/libs/agent-sm/agent/src/test/java/org/opensearch/javaagent/AgentTestCase.java index 1cfc97b6352fd..f15d310b8f388 100644 --- a/libs/agent-sm/agent/src/test/java/org/opensearch/javaagent/AgentTestCase.java +++ b/libs/agent-sm/agent/src/test/java/org/opensearch/javaagent/AgentTestCase.java @@ -19,6 +19,10 @@ public abstract class AgentTestCase { @BeforeClass public static void setUp() { AgentPolicy.setPolicy(new Policy() { - }, Set.of(), (caller, chain) -> caller.getName().equalsIgnoreCase("worker.org.gradle.process.internal.worker.GradleWorkerMain")); + }, + Set.of(), + Set.of(), + (caller, chain) -> caller.getName().equalsIgnoreCase("worker.org.gradle.process.internal.worker.GradleWorkerMain") + ); } } diff --git a/libs/agent-sm/bootstrap/src/main/java/org/opensearch/javaagent/bootstrap/AgentPolicy.java b/libs/agent-sm/bootstrap/src/main/java/org/opensearch/javaagent/bootstrap/AgentPolicy.java index c54a7296cbebe..44042efc7f73c 100644 --- a/libs/agent-sm/bootstrap/src/main/java/org/opensearch/javaagent/bootstrap/AgentPolicy.java +++ b/libs/agent-sm/bootstrap/src/main/java/org/opensearch/javaagent/bootstrap/AgentPolicy.java @@ -30,6 +30,7 @@ public class AgentPolicy { private static final Logger LOGGER = Logger.getLogger(AgentPolicy.class.getName()); private static volatile Policy policy; private static volatile Set trustedHosts; + private static volatile Set trustedFileSystems; private static volatile BiFunction, Collection>, Boolean> classesThatCanExit; /** @@ -124,23 +125,26 @@ private AgentPolicy() {} * @param policy policy */ public static void setPolicy(Policy policy) { - setPolicy(policy, Set.of(), new NoneCanExit()); + setPolicy(policy, Set.of(), Set.of(), new NoneCanExit()); } /** * Set Agent policy * @param policy policy * @param trustedHosts trusted hosts + * @param trustedFileSystems trusted file systems * @param classesThatCanExit classed that are allowed to call {@link System#exit}, {@link Runtime#halt} */ public static void setPolicy( Policy policy, final Set trustedHosts, + final Set trustedFileSystems, final BiFunction, Collection>, Boolean> classesThatCanExit ) { if (AgentPolicy.policy == null) { AgentPolicy.policy = policy; AgentPolicy.trustedHosts = Collections.unmodifiableSet(trustedHosts); + AgentPolicy.trustedFileSystems = Collections.unmodifiableSet(trustedFileSystems); AgentPolicy.classesThatCanExit = classesThatCanExit; LOGGER.info("Policy attached successfully: " + policy); } else { @@ -182,6 +186,15 @@ public static boolean isTrustedHost(String hostname) { return AgentPolicy.trustedHosts.contains(hostname); } + /** + * Check if file system is trusted + * @param fileSystem file system + * @return is trusted or not + */ + public static boolean isTrustedFileSystem(String fileSystem) { + return AgentPolicy.trustedFileSystems.contains(fileSystem); + } + /** * Check if class is allowed to call {@link System#exit}, {@link Runtime#halt} * @param caller caller class From 405586c776ff13d813b2fa846fe756cafdad3f78 Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Tue, 8 Apr 2025 19:16:54 -0700 Subject: [PATCH 171/550] Remove deprecated batch_size parameter from _bulk (#17801) --- CHANGELOG.md | 1 + .../org/opensearch/ingest/IngestClientIT.java | 12 -- .../opensearch/action/bulk/BulkRequest.java | 30 +-- .../action/bulk/TransportBulkAction.java | 3 +- .../org/opensearch/ingest/IngestService.java | 11 +- .../rest/action/document/RestBulkAction.java | 7 - .../bulk/TransportBulkActionIngestTests.java | 28 +-- .../opensearch/ingest/IngestServiceTests.java | 179 +++--------------- 8 files changed, 44 insertions(+), 227 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1b8d0e3b4405c..74de495f327f4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -61,6 +61,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Deprecated ### Removed +- Remove deprecated `batch_size` parameter from `_bulk` ([#14283](https://github.com/opensearch-project/OpenSearch/issues/14283)) ### Fixed - Fix bytes parameter on `_cat/recovery` ([#17598](https://github.com/opensearch-project/OpenSearch/pull/17598)) diff --git a/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java b/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java index 660f3188b51d4..eebfc7805114f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java @@ -162,14 +162,6 @@ public void testSimulate() throws Exception { } public void testBulkWithIngestFailures() throws Exception { - runBulkTestWithRandomDocs(false); - } - - public void testBulkWithIngestFailuresWithBatchSize() throws Exception { - runBulkTestWithRandomDocs(true); - } - - private void runBulkTestWithRandomDocs(boolean shouldSetBatchSize) throws Exception { createIndex("index"); BytesReference source = BytesReference.bytes( @@ -188,9 +180,6 @@ private void runBulkTestWithRandomDocs(boolean shouldSetBatchSize) throws Except int numRequests = scaledRandomIntBetween(32, 128); BulkRequest bulkRequest = new BulkRequest(); - if (shouldSetBatchSize) { - bulkRequest.batchSize(scaledRandomIntBetween(2, numRequests)); - } for (int i = 0; i < numRequests; i++) { IndexRequest indexRequest = new IndexRequest("index").id(Integer.toString(i)).setPipeline("_id"); indexRequest.source(Requests.INDEX_CONTENT_TYPE, "field", "value", "fail", i % 2 == 0); @@ -244,7 +233,6 @@ public void testBulkWithIngestFailuresAndDropBatch() throws Exception { client().admin().cluster().putPipeline(putPipelineRequest).get(); BulkRequest bulkRequest = new BulkRequest(); - bulkRequest.batchSize(3); bulkRequest.add( new IndexRequest("index").id("_fail").setPipeline("_id").source(Requests.INDEX_CONTENT_TYPE, "field", "value", "fail", true) ); diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java b/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java index c8842e25bc907..84d1bee38fb47 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java @@ -97,7 +97,6 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques private String globalRouting; private String globalIndex; private Boolean globalRequireAlias; - private int batchSize = Integer.MAX_VALUE; private long sizeInBytes = 0; @@ -109,8 +108,8 @@ public BulkRequest(StreamInput in) throws IOException { requests.addAll(in.readList(i -> DocWriteRequest.readDocumentRequest(null, i))); refreshPolicy = RefreshPolicy.readFrom(in); timeout = in.readTimeValue(); - if (in.getVersion().onOrAfter(Version.V_2_14_0)) { - batchSize = in.readInt(); + if (in.getVersion().onOrAfter(Version.V_2_14_0) && in.getVersion().before(Version.V_3_0_0)) { + in.readInt(); // formerly batch_size } } @@ -351,27 +350,6 @@ public final BulkRequest timeout(TimeValue timeout) { return this; } - /** - * Set batch size - * @param size batch size from input - * @return {@link BulkRequest} - */ - public BulkRequest batchSize(int size) { - if (size < 1) { - throw new IllegalArgumentException("batch_size must be greater than 0"); - } - this.batchSize = size; - return this; - } - - /** - * Get batch size - * @return batch size - */ - public int batchSize() { - return this.batchSize; - } - /** * Note for internal callers (NOT high level rest client), * the global parameter setting is ignored when used with: @@ -479,8 +457,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeCollection(requests, DocWriteRequest::writeDocumentRequest); refreshPolicy.writeTo(out); out.writeTimeValue(timeout); - if (out.getVersion().onOrAfter(Version.V_2_14_0)) { - out.writeInt(batchSize); + if (out.getVersion().onOrAfter(Version.V_2_14_0) && out.getVersion().before(Version.V_3_0_0)) { + out.writeInt(Integer.MAX_VALUE); // formerly batch_size } } diff --git a/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java index 50b45fd687083..4d9d9eb36c735 100644 --- a/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java @@ -963,8 +963,7 @@ public boolean isForceExecution() { } }, bulkRequestModifier::markItemAsDropped, - executorName, - original + executorName ); } diff --git a/server/src/main/java/org/opensearch/ingest/IngestService.java b/server/src/main/java/org/opensearch/ingest/IngestService.java index 48697eafd3bbb..9f724f0d07ae5 100644 --- a/server/src/main/java/org/opensearch/ingest/IngestService.java +++ b/server/src/main/java/org/opensearch/ingest/IngestService.java @@ -40,7 +40,6 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.ResourceNotFoundException; import org.opensearch.action.DocWriteRequest; -import org.opensearch.action.bulk.BulkRequest; import org.opensearch.action.bulk.TransportBulkAction; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.ingest.DeletePipelineRequest; @@ -567,8 +566,7 @@ public void executeBulkRequest( BiConsumer onFailure, BiConsumer onCompletion, IntConsumer onDropped, - String executorName, - BulkRequest originalBulkRequest + String executorName ) { threadPool.executor(executorName).execute(new AbstractRunnable() { @@ -579,7 +577,7 @@ public void onFailure(Exception e) { @Override protected void doRun() { - runBulkRequestInBatch(numberOfActionRequests, actionRequests, onFailure, onCompletion, onDropped, originalBulkRequest); + runBulkRequestInBatch(numberOfActionRequests, actionRequests, onFailure, onCompletion, onDropped); } }); } @@ -589,8 +587,7 @@ private void runBulkRequestInBatch( Iterable> actionRequests, BiConsumer onFailure, BiConsumer onCompletion, - IntConsumer onDropped, - BulkRequest originalBulkRequest + IntConsumer onDropped ) { final Thread originalThread = Thread.currentThread(); @@ -635,7 +632,7 @@ private void runBulkRequestInBatch( i++; } - int batchSize = Math.min(numberOfActionRequests, originalBulkRequest.batchSize()); + int batchSize = numberOfActionRequests; List> batches = prepareBatches(batchSize, indexRequestWrappers); logger.debug("batchSize: {}, batches: {}", batchSize, batches.size()); diff --git a/server/src/main/java/org/opensearch/rest/action/document/RestBulkAction.java b/server/src/main/java/org/opensearch/rest/action/document/RestBulkAction.java index a3ed7f32127b5..b8c46bb68201e 100644 --- a/server/src/main/java/org/opensearch/rest/action/document/RestBulkAction.java +++ b/server/src/main/java/org/opensearch/rest/action/document/RestBulkAction.java @@ -36,7 +36,6 @@ import org.opensearch.action.bulk.BulkRequest; import org.opensearch.action.bulk.BulkShardRequest; import org.opensearch.action.support.ActiveShardCount; -import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; @@ -67,8 +66,6 @@ public class RestBulkAction extends BaseRestHandler { private final boolean allowExplicitIndex; - private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestBulkAction.class); - static final String BATCH_SIZE_DEPRECATED_MESSAGE = "The batch size option in bulk API is deprecated and will be removed in 3.0."; public RestBulkAction(Settings settings) { this.allowExplicitIndex = MULTI_ALLOW_EXPLICIT_INDEX.get(settings); @@ -100,10 +97,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC Boolean defaultRequireAlias = request.paramAsBoolean(DocWriteRequest.REQUIRE_ALIAS, null); bulkRequest.timeout(request.paramAsTime("timeout", BulkShardRequest.DEFAULT_TIMEOUT)); bulkRequest.setRefreshPolicy(request.param("refresh")); - if (request.hasParam("batch_size")) { - deprecationLogger.deprecate("batch_size_deprecation", BATCH_SIZE_DEPRECATED_MESSAGE); - } - bulkRequest.batchSize(request.paramAsInt("batch_size", Integer.MAX_VALUE)); bulkRequest.add( request.requiredContent(), defaultIndex, diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java index a94a5d60b3f5a..847140e81e201 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java @@ -346,8 +346,7 @@ public void testIngestLocal() throws Exception { failureHandler.capture(), completionHandler.capture(), any(), - eq(Names.WRITE), - eq(bulkRequest) + eq(Names.WRITE) ); completionHandler.getValue().accept(null, exception); assertTrue(failureCalled.get()); @@ -384,8 +383,7 @@ public void testSingleItemBulkActionIngestLocal() throws Exception { failureHandler.capture(), completionHandler.capture(), any(), - eq(Names.WRITE), - any() + eq(Names.WRITE) ); completionHandler.getValue().accept(null, exception); assertTrue(failureCalled.get()); @@ -431,8 +429,7 @@ public void testIngestSystemLocal() throws Exception { failureHandler.capture(), completionHandler.capture(), any(), - eq(Names.SYSTEM_WRITE), - eq(bulkRequest) + eq(Names.SYSTEM_WRITE) ); completionHandler.getValue().accept(null, exception); assertTrue(failureCalled.get()); @@ -463,7 +460,7 @@ public void testIngestForward() throws Exception { action.execute(null, bulkRequest, listener); // should not have executed ingest locally - verify(ingestService, never()).executeBulkRequest(anyInt(), any(), any(), any(), any(), any(), any()); + verify(ingestService, never()).executeBulkRequest(anyInt(), any(), any(), any(), any(), any()); // but instead should have sent to a remote node with the transport service ArgumentCaptor node = ArgumentCaptor.forClass(DiscoveryNode.class); verify(transportService).sendRequest(node.capture(), eq(BulkAction.NAME), any(), remoteResponseHandler.capture()); @@ -503,7 +500,7 @@ public void testSingleItemBulkActionIngestForward() throws Exception { singleItemBulkWriteAction.execute(null, indexRequest, listener); // should not have executed ingest locally - verify(ingestService, never()).executeBulkRequest(anyInt(), any(), any(), any(), any(), any(), any()); + verify(ingestService, never()).executeBulkRequest(anyInt(), any(), any(), any(), any(), any()); // but instead should have sent to a remote node with the transport service ArgumentCaptor node = ArgumentCaptor.forClass(DiscoveryNode.class); verify(transportService).sendRequest(node.capture(), eq(BulkAction.NAME), any(), remoteResponseHandler.capture()); @@ -589,8 +586,7 @@ private void validatePipelineWithBulkUpsert(@Nullable String indexRequestIndexNa failureHandler.capture(), completionHandler.capture(), any(), - eq(Names.WRITE), - eq(bulkRequest) + eq(Names.WRITE) ); assertEquals(indexRequest1.getPipeline(), "default_pipeline"); assertEquals(indexRequest2.getPipeline(), "default_pipeline"); @@ -633,8 +629,7 @@ public void testDoExecuteCalledTwiceCorrectly() throws Exception { failureHandler.capture(), completionHandler.capture(), any(), - eq(Names.WRITE), - any() + eq(Names.WRITE) ); completionHandler.getValue().accept(null, exception); assertFalse(action.indexCreated); // still no index yet, the ingest node failed. @@ -721,8 +716,7 @@ public void testFindDefaultPipelineFromTemplateMatch() { failureHandler.capture(), completionHandler.capture(), any(), - eq(Names.WRITE), - any() + eq(Names.WRITE) ); } @@ -761,8 +755,7 @@ public void testFindDefaultPipelineFromV2TemplateMatch() { failureHandler.capture(), completionHandler.capture(), any(), - eq(Names.WRITE), - any() + eq(Names.WRITE) ); } @@ -787,8 +780,7 @@ private void validateDefaultPipeline(IndexRequest indexRequest) { failureHandler.capture(), completionHandler.capture(), any(), - eq(Names.WRITE), - any() + eq(Names.WRITE) ); assertEquals(indexRequest.getPipeline(), "default_pipeline"); completionHandler.getValue().accept(null, exception); diff --git a/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java b/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java index 6fd21ddd24121..ed2a92ec48573 100644 --- a/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java +++ b/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java @@ -122,7 +122,6 @@ import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.eq; -import static org.mockito.Mockito.lenient; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; @@ -148,7 +147,6 @@ public void setup() { when(threadPool.generic()).thenReturn(executorService); when(threadPool.executor(anyString())).thenReturn(executorService); mockBulkRequest = mock(BulkRequest.class); - lenient().when(mockBulkRequest.batchSize()).thenReturn(1); } public void testIngestPlugin() { @@ -228,8 +226,7 @@ public void testExecuteIndexPipelineDoesNotExist() { failureHandler, completionHandler, indexReq -> {}, - Names.WRITE, - new BulkRequest() + Names.WRITE ); assertTrue(failure.get()); @@ -931,8 +928,7 @@ public String getType() { failureHandler, completionHandler, indexReq -> {}, - Names.WRITE, - bulkRequest + Names.WRITE ); assertTrue(failure.get()); @@ -978,8 +974,7 @@ public void testExecuteBulkPipelineDoesNotExist() { failureHandler, completionHandler, indexReq -> {}, - Names.WRITE, - bulkRequest + Names.WRITE ); verify(failureHandler, times(1)).accept( argThat((Integer item) -> item == 2), @@ -1015,8 +1010,7 @@ public void testExecuteSuccess() { failureHandler, completionHandler, indexReq -> {}, - Names.WRITE, - mockBulkRequest + Names.WRITE ); verify(failureHandler, never()).accept(any(), any()); verify(completionHandler, times(1)).accept(Thread.currentThread(), null); @@ -1047,8 +1041,7 @@ public void testExecuteEmptyPipeline() throws Exception { failureHandler, completionHandler, indexReq -> {}, - Names.WRITE, - mockBulkRequest + Names.WRITE ); verify(failureHandler, never()).accept(any(), any()); verify(completionHandler, times(1)).accept(Thread.currentThread(), null); @@ -1107,8 +1100,7 @@ public void testExecutePropagateAllMetadataUpdates() throws Exception { failureHandler, completionHandler, indexReq -> {}, - Names.WRITE, - mockBulkRequest + Names.WRITE ); verify(processor).execute(any(), any()); verify(failureHandler, never()).accept(any(), any()); @@ -1152,8 +1144,7 @@ public void testExecuteFailure() throws Exception { failureHandler, completionHandler, indexReq -> {}, - Names.WRITE, - mockBulkRequest + Names.WRITE ); verify(processor).execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), emptyMap()), any()); verify(failureHandler, times(1)).accept(eq(0), any(RuntimeException.class)); @@ -1211,8 +1202,7 @@ public void testExecuteSuccessWithOnFailure() throws Exception { failureHandler, completionHandler, indexReq -> {}, - Names.WRITE, - mockBulkRequest + Names.WRITE ); verify(failureHandler, never()).accept(eq(0), any(IngestProcessorException.class)); verify(completionHandler, times(1)).accept(Thread.currentThread(), null); @@ -1261,8 +1251,7 @@ public void testExecuteFailureWithNestedOnFailure() throws Exception { failureHandler, completionHandler, indexReq -> {}, - Names.WRITE, - mockBulkRequest + Names.WRITE ); verify(processor).execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), emptyMap()), any()); verify(failureHandler, times(1)).accept(eq(0), any(RuntimeException.class)); @@ -1322,8 +1311,7 @@ public void testBulkRequestExecutionWithFailures() { errorHandler::put, completionHandler::put, indexReq -> {}, - Names.WRITE, - bulkRequest + Names.WRITE ); MatcherAssert.assertThat(errorHandler.entrySet(), hasSize(numIndexRequests)); @@ -1383,8 +1371,7 @@ public void testBulkRequestExecution() throws Exception { requestItemErrorHandler, completionHandler, indexReq -> {}, - Names.WRITE, - bulkRequest + Names.WRITE ); verify(requestItemErrorHandler, never()).accept(any(), any()); @@ -1452,8 +1439,7 @@ public void testStats() throws Exception { failureHandler, completionHandler, indexReq -> {}, - Names.WRITE, - mockBulkRequest + Names.WRITE ); final IngestStats afterFirstRequestStats = ingestService.stats(); assertThat(afterFirstRequestStats.getPipelineStats().size(), equalTo(2)); @@ -1477,8 +1463,7 @@ public void testStats() throws Exception { failureHandler, completionHandler, indexReq -> {}, - Names.WRITE, - mockBulkRequest + Names.WRITE ); final IngestStats afterSecondRequestStats = ingestService.stats(); assertThat(afterSecondRequestStats.getPipelineStats().size(), equalTo(2)); @@ -1507,8 +1492,7 @@ public void testStats() throws Exception { failureHandler, completionHandler, indexReq -> {}, - Names.WRITE, - mockBulkRequest + Names.WRITE ); final IngestStats afterThirdRequestStats = ingestService.stats(); assertThat(afterThirdRequestStats.getPipelineStats().size(), equalTo(2)); @@ -1541,8 +1525,7 @@ public void testStats() throws Exception { failureHandler, completionHandler, indexReq -> {}, - Names.WRITE, - mockBulkRequest + Names.WRITE ); final IngestStats afterForthRequestStats = ingestService.stats(); assertThat(afterForthRequestStats.getPipelineStats().size(), equalTo(2)); @@ -1640,8 +1623,7 @@ public String getDescription() { failureHandler, completionHandler, dropHandler, - Names.WRITE, - bulkRequest + Names.WRITE ); verify(failureHandler, never()).accept(any(), any()); verify(completionHandler, times(1)).accept(Thread.currentThread(), null); @@ -1732,8 +1714,7 @@ public void testCBORParsing() throws Exception { (integer, e) -> {}, (thread, e) -> {}, indexReq -> {}, - Names.WRITE, - mockBulkRequest + Names.WRITE ); } @@ -1876,77 +1857,6 @@ public void testResolveRequestOrDefaultPipelineAndFinalPipeline() { } } - public void testExecuteBulkRequestInBatch() { - CompoundProcessor mockCompoundProcessor = mockCompoundProcessor(); - IngestService ingestService = createWithProcessors( - Collections.singletonMap("mock", (factories, tag, description, config) -> mockCompoundProcessor) - ); - createPipeline("_id", ingestService); - BulkRequest bulkRequest = new BulkRequest(); - IndexRequest indexRequest1 = new IndexRequest("_index").id("_id1").source(emptyMap()).setPipeline("_id").setFinalPipeline("_none"); - bulkRequest.add(indexRequest1); - IndexRequest indexRequest2 = new IndexRequest("_index").id("_id2").source(emptyMap()).setPipeline("_id").setFinalPipeline("_none"); - bulkRequest.add(indexRequest2); - IndexRequest indexRequest3 = new IndexRequest("_index").id("_id3").source(emptyMap()).setPipeline("_none").setFinalPipeline("_id"); - bulkRequest.add(indexRequest3); - IndexRequest indexRequest4 = new IndexRequest("_index").id("_id4").source(emptyMap()).setPipeline("_id").setFinalPipeline("_none"); - bulkRequest.add(indexRequest4); - bulkRequest.batchSize(2); - @SuppressWarnings("unchecked") - final BiConsumer failureHandler = mock(BiConsumer.class); - @SuppressWarnings("unchecked") - final BiConsumer completionHandler = mock(BiConsumer.class); - ingestService.executeBulkRequest( - 4, - bulkRequest.requests(), - failureHandler, - completionHandler, - indexReq -> {}, - Names.WRITE, - bulkRequest - ); - verify(failureHandler, never()).accept(any(), any()); - verify(completionHandler, times(1)).accept(Thread.currentThread(), null); - verify(mockCompoundProcessor, times(2)).batchExecute(any(), any()); - verify(mockCompoundProcessor, never()).execute(any(), any()); - } - - public void testExecuteBulkRequestInBatchWithDefaultAndFinalPipeline() { - CompoundProcessor mockCompoundProcessor = mockCompoundProcessor(); - IngestService ingestService = createWithProcessors( - Collections.singletonMap("mock", (factories, tag, description, config) -> mockCompoundProcessor) - ); - ClusterState clusterState = createPipeline("_id", ingestService); - createPipeline("_final", ingestService, clusterState); - BulkRequest bulkRequest = new BulkRequest(); - IndexRequest indexRequest1 = new IndexRequest("_index").id("_id1").source(emptyMap()).setPipeline("_id").setFinalPipeline("_final"); - bulkRequest.add(indexRequest1); - IndexRequest indexRequest2 = new IndexRequest("_index").id("_id2").source(emptyMap()).setPipeline("_id").setFinalPipeline("_final"); - bulkRequest.add(indexRequest2); - IndexRequest indexRequest3 = new IndexRequest("_index").id("_id3").source(emptyMap()).setPipeline("_id").setFinalPipeline("_final"); - bulkRequest.add(indexRequest3); - IndexRequest indexRequest4 = new IndexRequest("_index").id("_id4").source(emptyMap()).setPipeline("_id").setFinalPipeline("_final"); - bulkRequest.add(indexRequest4); - bulkRequest.batchSize(2); - @SuppressWarnings("unchecked") - final BiConsumer failureHandler = mock(BiConsumer.class); - @SuppressWarnings("unchecked") - final BiConsumer completionHandler = mock(BiConsumer.class); - ingestService.executeBulkRequest( - 4, - bulkRequest.requests(), - failureHandler, - completionHandler, - indexReq -> {}, - Names.WRITE, - bulkRequest - ); - verify(failureHandler, never()).accept(any(), any()); - verify(completionHandler, times(1)).accept(Thread.currentThread(), null); - verify(mockCompoundProcessor, times(4)).batchExecute(any(), any()); - verify(mockCompoundProcessor, never()).execute(any(), any()); - } - public void testExecuteBulkRequestInBatchFallbackWithOneDocument() { CompoundProcessor mockCompoundProcessor = mockCompoundProcessor(); IngestService ingestService = createWithProcessors( @@ -1956,20 +1866,11 @@ public void testExecuteBulkRequestInBatchFallbackWithOneDocument() { BulkRequest bulkRequest = new BulkRequest(); IndexRequest indexRequest1 = new IndexRequest("_index").id("_id1").source(emptyMap()).setPipeline("_id").setFinalPipeline("_none"); bulkRequest.add(indexRequest1); - bulkRequest.batchSize(2); @SuppressWarnings("unchecked") final BiConsumer failureHandler = mock(BiConsumer.class); @SuppressWarnings("unchecked") final BiConsumer completionHandler = mock(BiConsumer.class); - ingestService.executeBulkRequest( - 1, - bulkRequest.requests(), - failureHandler, - completionHandler, - indexReq -> {}, - Names.WRITE, - bulkRequest - ); + ingestService.executeBulkRequest(1, bulkRequest.requests(), failureHandler, completionHandler, indexReq -> {}, Names.WRITE); verify(failureHandler, never()).accept(any(), any()); verify(completionHandler, times(1)).accept(Thread.currentThread(), null); verify(mockCompoundProcessor, never()).batchExecute(any(), any()); @@ -1994,20 +1895,11 @@ public void testExecuteBulkRequestInBatchNoValidPipeline() { .setPipeline("_none") .setFinalPipeline("_none"); bulkRequest.add(indexRequest2); - bulkRequest.batchSize(2); @SuppressWarnings("unchecked") final BiConsumer failureHandler = mock(BiConsumer.class); @SuppressWarnings("unchecked") final BiConsumer completionHandler = mock(BiConsumer.class); - ingestService.executeBulkRequest( - 2, - bulkRequest.requests(), - failureHandler, - completionHandler, - indexReq -> {}, - Names.WRITE, - bulkRequest - ); + ingestService.executeBulkRequest(2, bulkRequest.requests(), failureHandler, completionHandler, indexReq -> {}, Names.WRITE); verify(failureHandler, never()).accept(any(), any()); verify(completionHandler, times(1)).accept(Thread.currentThread(), null); verify(mockCompoundProcessor, never()).batchExecute(any(), any()); @@ -2024,20 +1916,11 @@ public void testExecuteBulkRequestInBatchNoValidDocument() { // will not be handled as not valid document type bulkRequest.add(new DeleteRequest("_index", "_id")); bulkRequest.add(new DeleteRequest("_index", "_id")); - bulkRequest.batchSize(2); @SuppressWarnings("unchecked") final BiConsumer failureHandler = mock(BiConsumer.class); @SuppressWarnings("unchecked") final BiConsumer completionHandler = mock(BiConsumer.class); - ingestService.executeBulkRequest( - 2, - bulkRequest.requests(), - failureHandler, - completionHandler, - indexReq -> {}, - Names.WRITE, - bulkRequest - ); + ingestService.executeBulkRequest(2, bulkRequest.requests(), failureHandler, completionHandler, indexReq -> {}, Names.WRITE); verify(failureHandler, never()).accept(any(), any()); verify(completionHandler, times(1)).accept(Thread.currentThread(), null); verify(mockCompoundProcessor, never()).batchExecute(any(), any()); @@ -2057,20 +1940,11 @@ public void testExecuteBulkRequestInBatchWithException() { bulkRequest.add(indexRequest1); IndexRequest indexRequest2 = new IndexRequest("_index").id("_id2").source(emptyMap()).setPipeline("_id").setFinalPipeline("_none"); bulkRequest.add(indexRequest2); - bulkRequest.batchSize(2); @SuppressWarnings("unchecked") final BiConsumer failureHandler = mock(BiConsumer.class); @SuppressWarnings("unchecked") final BiConsumer completionHandler = mock(BiConsumer.class); - ingestService.executeBulkRequest( - 2, - bulkRequest.requests(), - failureHandler, - completionHandler, - indexReq -> {}, - Names.WRITE, - bulkRequest - ); + ingestService.executeBulkRequest(2, bulkRequest.requests(), failureHandler, completionHandler, indexReq -> {}, Names.WRITE); verify(failureHandler, times(2)).accept(any(), any()); verify(completionHandler, times(1)).accept(Thread.currentThread(), null); verify(mockCompoundProcessor, times(1)).batchExecute(any(), any()); @@ -2091,7 +1965,6 @@ public void testExecuteBulkRequestInBatchWithExceptionAndDropInCallback() { bulkRequest.add(indexRequest2); IndexRequest indexRequest3 = new IndexRequest("_index").id("_id3").source(emptyMap()).setPipeline("_id").setFinalPipeline("_none"); bulkRequest.add(indexRequest3); - bulkRequest.batchSize(3); List results = Arrays.asList( new IngestDocumentWrapper(0, IngestService.toIngestDocument(indexRequest1), null), @@ -2114,8 +1987,7 @@ public void testExecuteBulkRequestInBatchWithExceptionAndDropInCallback() { failureHandler::put, completionHandler::put, dropHandler::add, - Names.WRITE, - bulkRequest + Names.WRITE ); assertEquals(Set.of(1), failureHandler.keySet()); assertEquals(List.of(2), dropHandler); @@ -2149,8 +2021,7 @@ public void testExecuteBulkRequestInBatchWithDefaultBatchSize() { failureHandler::put, completionHandler::put, dropHandler::add, - Names.WRITE, - bulkRequest + Names.WRITE ); assertTrue(failureHandler.isEmpty()); assertTrue(dropHandler.isEmpty()); @@ -2180,7 +2051,6 @@ public void testExecuteEmptyPipelineInBatch() throws Exception { bulkRequest.add(indexRequest3); IndexRequest indexRequest4 = new IndexRequest("_index").id("_id4").source(emptyMap()).setPipeline("_id").setFinalPipeline("_none"); bulkRequest.add(indexRequest4); - bulkRequest.batchSize(4); final Map failureHandler = new HashMap<>(); final Map completionHandler = new HashMap<>(); ingestService.executeBulkRequest( @@ -2189,8 +2059,7 @@ public void testExecuteEmptyPipelineInBatch() throws Exception { failureHandler::put, completionHandler::put, indexReq -> {}, - Names.WRITE, - bulkRequest + Names.WRITE ); assertTrue(failureHandler.isEmpty()); assertEquals(Set.of(Thread.currentThread()), completionHandler.keySet()); From 9549df8679e1509ec30f456c34b4ac00f22b096d Mon Sep 17 00:00:00 2001 From: Finn Date: Tue, 8 Apr 2025 19:43:43 -0700 Subject: [PATCH 172/550] Refactor FeatureFlags (#17611) * Refactor FeatureFlags.java. - Remove internal immutable `settings` in favor of `ConcurrentHashMap`. - Move functionality to internal `FeatureFlagsImpl` class. Expose public api of `FeatureFlagsImpl` in FeatureFlags. Expose test api of `FeatureFlagsImpl` in FeatureFlags.TestUtils. - Read and set JVM system properties once on `initializeFeatureFlags`. Remove JVM system properties check from `isEnabled`. - Add `FlagLock` in `TestUtils` to maintain a lock for each feature flag. - Add helper functions to set & access feature flags in a thread safe way. `TestUtils.with(, () -> {})` to execute crtical sections. `New FlagLock()` for fine grained control. Signed-off-by: Finn Carroll * Add @LockFeatureFlag annotation - Add annotation in OpenSearchTestCase to enable and lock a flag for the duration of a single test case. Signed-off-by: Finn Carroll * Update FeatureFlagTests - Add cases for public api. - Add cases for thread safe helpers @LockFeatureFlag FlagLock TestUtils.with Signed-off-by: Finn Carroll * Remove FeatureFlagSetter Signed-off-by: Finn Carroll * Find and replace feature flag test case usages with appropriate helper Replace all usages of `FeatureFlagSetter` in tests. Replace all usages of JVM system properties for feature flags in tests. Replace all usages of `initializeFeatureFlags` with `TestUtils.set` in tests. Signed-off-by: Finn Carroll * Add changelog entry Signed-off-by: Finn Carroll * Fix lingering test failures - Add missing LockFeatureFlag annotations. - Cannot use annotation in tests which expect exception thrown. - SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY has no setting? Adding. - Flight server tests need flag enabled on setup. Signed-off-by: Finn Carroll * Remove concurrency primitives JUnit does not run tests in parallel on the same JVM so these are not necessary. Additionally rename to `FlagWriteLock` for clarity. Signed-off-by: Finn Carroll * Fix flight service IT. Address ff contant nit. Signed-off-by: Finn Carroll * Nit. Signed-off-by: Finn Carroll * Add blurb to DEVELOPER_GUIDE.md regarding ff test utils. Signed-off-by: Finn Carroll --------- Signed-off-by: Finn Carroll --- CHANGELOG.md | 1 + DEVELOPER_GUIDE.md | 3 +- .../mapper/ScaledFloatFieldMapperTests.java | 14 +- .../arrow/flight/ArrowFlightServerIT.java | 11 +- .../arrow/flight/FlightStreamPluginTests.java | 10 +- .../bootstrap/FlightClientManagerTests.java | 6 +- .../flight/bootstrap/FlightServiceTests.java | 6 +- .../opensearch/common/util/FeatureFlags.java | 303 ++++++++++++++---- .../shrink/TransportResizeActionTests.java | 4 +- .../SystemTemplatesServiceTests.java | 7 +- .../coordination/JoinTaskExecutorTests.java | 13 +- .../MetadataCreateIndexServiceTests.java | 71 ++-- .../MetadataIndexTemplateServiceTests.java | 21 +- .../routing/OperationRoutingTests.java | 4 +- .../allocation/FailedShardsRoutingTests.java | 3 +- .../ShardsTieringAllocationTests.java | 14 +- .../common/settings/SettingsModuleTests.java | 7 +- .../common/util/FeatureFlagTests.java | 127 ++++++-- .../extensions/ExtensionsManagerTests.java | 6 +- .../RemoteClusterStateServiceTests.java | 5 +- .../opensearch/index/IndexSettingsTests.java | 5 +- .../AbstractStarTreeDVFormatTests.java | 7 +- .../index/engine/ReadOnlyEngineTests.java | 5 +- .../index/mapper/ObjectMapperTests.java | 31 +- .../index/mapper/StarTreeMapperTests.java | 6 +- .../opensearch/index/store/StoreTests.java | 5 +- .../java/org/opensearch/node/NodeTests.java | 5 +- .../opensearch/search/SearchModuleTests.java | 3 - .../search/SearchServiceStarTreeTests.java | 10 +- .../DateHistogramAggregatorTests.java | 7 +- .../startree/KeywordTermsAggregatorTests.java | 7 +- .../startree/MetricAggregatorTests.java | 7 +- .../startree/NumericTermsAggregatorTests.java | 7 +- .../startree/StarTreeFilterTests.java | 7 +- .../opensearch/test/FeatureFlagSetter.java | 66 ---- .../opensearch/test/OpenSearchTestCase.java | 56 +++- 36 files changed, 530 insertions(+), 340 deletions(-) delete mode 100644 test/framework/src/main/java/org/opensearch/test/FeatureFlagSetter.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 74de495f327f4..73bac28902e21 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -65,6 +65,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Fixed - Fix bytes parameter on `_cat/recovery` ([#17598](https://github.com/opensearch-project/OpenSearch/pull/17598)) +- Fix slow performance of FeatureFlag checks ([#17611](https://github.com/opensearch-project/OpenSearch/pull/17611)) ### Security diff --git a/DEVELOPER_GUIDE.md b/DEVELOPER_GUIDE.md index e7ad1d8120ea6..ca67c62b6a4b3 100644 --- a/DEVELOPER_GUIDE.md +++ b/DEVELOPER_GUIDE.md @@ -594,7 +594,8 @@ Rapidly developing new features often benefit from several release cycles before uses an Experimental Development process leveraging [Feature Flags](https://featureflags.io/feature-flags/). This allows a feature to be developed using the same process as a LTS feature but with additional guard rails and communication mechanisms to signal to the users and development community the feature is not yet stable, may change in a future release, or be removed altogether. Any Developer or User APIs implemented along with the experimental feature should be marked with `@ExperimentalApi` (or documented as -`@opensearch.experimental`) annotation to signal the implementation is not subject to LTS and does not follow backwards compatibility guidelines. +`@opensearch.experimental`) annotation to signal the implementation is not subject to LTS and does not follow backwards compatibility guidelines. When writing tests for +functionality gated behind a feature flag please refer to `FeatureFlags.TestUtils` and the `@LockFeatureFlag` annotation. #### API Compatibility Checks diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldMapperTests.java index d1af54452bde9..7dcfe1848511e 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldMapperTests.java @@ -36,15 +36,12 @@ import org.apache.lucene.index.IndexableField; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.compositeindex.datacube.startree.StarTreeIndexSettings; import org.opensearch.plugins.Plugin; -import org.junit.AfterClass; -import org.junit.BeforeClass; import java.io.IOException; import java.util.Arrays; @@ -98,16 +95,7 @@ public void testExistsQueryDocValuesDisabled() throws IOException { assertParseMinimalWarnings(); } - @BeforeClass - public static void createMapper() { - FeatureFlags.initializeFeatureFlags(Settings.builder().put(STAR_TREE_INDEX, "true").build()); - } - - @AfterClass - public static void clearMapper() { - FeatureFlags.initializeFeatureFlags(Settings.EMPTY); - } - + @LockFeatureFlag(STAR_TREE_INDEX) public void testScaledFloatWithStarTree() throws Exception { double scalingFactorField1 = randomDouble() * 100; diff --git a/plugins/arrow-flight-rpc/src/internalClusterTest/java/org/opensearch/arrow/flight/ArrowFlightServerIT.java b/plugins/arrow-flight-rpc/src/internalClusterTest/java/org/opensearch/arrow/flight/ArrowFlightServerIT.java index bcad335c7a917..54b47329dab7f 100644 --- a/plugins/arrow-flight-rpc/src/internalClusterTest/java/org/opensearch/arrow/flight/ArrowFlightServerIT.java +++ b/plugins/arrow-flight-rpc/src/internalClusterTest/java/org/opensearch/arrow/flight/ArrowFlightServerIT.java @@ -14,26 +14,20 @@ import org.opensearch.arrow.flight.bootstrap.FlightService; import org.opensearch.arrow.flight.bootstrap.FlightStreamPlugin; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.plugins.Plugin; -import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.OpenSearchIntegTestCase; -import org.junit.BeforeClass; import java.util.Collection; import java.util.Collections; import java.util.concurrent.TimeUnit; +import static org.opensearch.common.util.FeatureFlags.ARROW_STREAMS; + @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, numDataNodes = 5) public class ArrowFlightServerIT extends OpenSearchIntegTestCase { private FlightClientManager flightClientManager; - @BeforeClass - public static void setupFeatureFlags() { - FeatureFlagSetter.set(FeatureFlags.ARROW_STREAMS_SETTING.getKey()); - } - @Override protected Collection> nodePlugins() { return Collections.singleton(FlightStreamPlugin.class); @@ -48,6 +42,7 @@ public void setUp() throws Exception { flightClientManager = flightService.getFlightClientManager(); } + @LockFeatureFlag(ARROW_STREAMS) public void testArrowFlightEndpoint() throws Exception { for (DiscoveryNode node : getClusterState().nodes()) { try (FlightClient flightClient = flightClientManager.getFlightClient(node.getId()).get()) { diff --git a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/FlightStreamPluginTests.java b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/FlightStreamPluginTests.java index 6f93d792f9db4..2573f0032f45b 100644 --- a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/FlightStreamPluginTests.java +++ b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/FlightStreamPluginTests.java @@ -19,9 +19,7 @@ import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.plugins.SecureTransportSettingsProvider; -import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.ExecutorBuilder; import org.opensearch.threadpool.ThreadPool; @@ -31,19 +29,18 @@ import java.util.List; import java.util.function.Supplier; -import static org.opensearch.common.util.FeatureFlags.ARROW_STREAMS_SETTING; +import static org.opensearch.common.util.FeatureFlags.ARROW_STREAMS; import static org.opensearch.plugins.NetworkPlugin.AuxTransport.AUX_TRANSPORT_TYPES_KEY; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class FlightStreamPluginTests extends OpenSearchTestCase { - private Settings settings; + private final Settings settings = Settings.EMPTY; private ClusterService clusterService; @Override public void setUp() throws Exception { super.setUp(); - settings = Settings.builder().put(ARROW_STREAMS_SETTING.getKey(), true).build(); clusterService = mock(ClusterService.class); ClusterState clusterState = mock(ClusterState.class); DiscoveryNodes nodes = mock(DiscoveryNodes.class); @@ -52,9 +49,8 @@ public void setUp() throws Exception { when(nodes.getLocalNodeId()).thenReturn("test-node"); } + @LockFeatureFlag(ARROW_STREAMS) public void testPluginEnabled() throws IOException { - FeatureFlags.initializeFeatureFlags(settings); - FeatureFlagSetter.set(ARROW_STREAMS_SETTING.getKey()); FlightStreamPlugin plugin = new FlightStreamPlugin(settings); Collection components = plugin.createComponents( null, diff --git a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/bootstrap/FlightClientManagerTests.java b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/bootstrap/FlightClientManagerTests.java index acc32d6b32f4c..ce2f0df7f5f55 100644 --- a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/bootstrap/FlightClientManagerTests.java +++ b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/bootstrap/FlightClientManagerTests.java @@ -28,7 +28,6 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.transport.BoundTransportAddress; import org.opensearch.core.common.transport.TransportAddress; -import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.client.Client; @@ -55,6 +54,7 @@ import io.netty.util.NettyRuntime; import static org.opensearch.arrow.flight.bootstrap.FlightClientManager.LOCATION_TIMEOUT_MS; +import static org.opensearch.common.util.FeatureFlags.ARROW_STREAMS; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doAnswer; @@ -64,6 +64,7 @@ @SuppressWarnings("unchecked") public class FlightClientManagerTests extends OpenSearchTestCase { + private static FeatureFlags.TestUtils.FlagWriteLock ffLock = null; private static BufferAllocator allocator; private static EventLoopGroup elg; @@ -78,6 +79,7 @@ public class FlightClientManagerTests extends OpenSearchTestCase { @BeforeClass public static void setupClass() throws Exception { + ffLock = new FeatureFlags.TestUtils.FlagWriteLock(ARROW_STREAMS); ServerConfig.init(Settings.EMPTY); allocator = new RootAllocator(); elg = ServerConfig.createELG("test-grpc-worker-elg", NettyRuntime.availableProcessors() * 2); @@ -89,7 +91,6 @@ public void setUp() throws Exception { super.setUp(); locationUpdaterExecutor = Executors.newScheduledThreadPool(1); - FeatureFlagSetter.set(FeatureFlags.ARROW_STREAMS_SETTING.getKey()); clusterService = mock(ClusterService.class); client = mock(Client.class); state = getDefaultState(); @@ -176,6 +177,7 @@ private DiscoveryNode createNode(String nodeId, String host, int port) throws Ex @AfterClass public static void tearClass() { allocator.close(); + ffLock.close(); } public void testGetFlightClientForExistingNode() { diff --git a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/bootstrap/FlightServiceTests.java b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/bootstrap/FlightServiceTests.java index fa20535384557..d8f5d5ba6b45b 100644 --- a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/bootstrap/FlightServiceTests.java +++ b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/bootstrap/FlightServiceTests.java @@ -19,7 +19,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.transport.TransportAddress; -import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.client.Client; @@ -32,10 +31,12 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.atomic.AtomicInteger; +import static org.opensearch.common.util.FeatureFlags.ARROW_STREAMS; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class FlightServiceTests extends OpenSearchTestCase { + FeatureFlags.TestUtils.FlagWriteLock ffLock = null; private Settings settings; private ClusterService clusterService; @@ -47,7 +48,7 @@ public class FlightServiceTests extends OpenSearchTestCase { @Override public void setUp() throws Exception { super.setUp(); - FeatureFlagSetter.set(FeatureFlags.ARROW_STREAMS_SETTING.getKey()); + ffLock = new FeatureFlags.TestUtils.FlagWriteLock(ARROW_STREAMS); int availablePort = getBasePort(9500) + port.addAndGet(1); settings = Settings.EMPTY; localNode = createNode(availablePort); @@ -147,6 +148,7 @@ public void testLifecycleStateTransitions() throws Exception { @Override public void tearDown() throws Exception { super.tearDown(); + ffLock.close(); } private DiscoveryNode createNode(int port) throws Exception { diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java index 4ff81cf0c1c96..8fa914438c1c4 100644 --- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -12,43 +12,57 @@ import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; -import java.util.List; +import java.util.HashSet; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; /** - * Utility class to manage feature flags. Feature flags are system properties that must be set on the JVM. - * These are used to gate the visibility/availability of incomplete features. For more information, see + * Feature flags are used to gate the visibility/availability of incomplete features. For more information, see * https://featureflags.io/feature-flag-introduction/ - * + * Due to their use case, feature flag settings have several additional properties enforced by convention and code: + * - Feature flags are boolean settings. + * - Feature flags are static settings. + * - Feature flags are globally available. + * - Feature flags are configurable by JVM system properties with setting key. * @opensearch.internal */ public class FeatureFlags { + // Prefixes public for testing + private static final String OS_EXPERIMENTAL_PREFIX = "opensearch.experimental."; + static final String FEATURE_FLAG_PREFIX = OS_EXPERIMENTAL_PREFIX + "feature."; + /** * Gates the visibility of the remote store to docrep migration. */ - public static final String REMOTE_STORE_MIGRATION_EXPERIMENTAL = "opensearch.experimental.feature.remote_store.migration.enabled"; + public static final String REMOTE_STORE_MIGRATION_EXPERIMENTAL = FEATURE_FLAG_PREFIX + "remote_store.migration.enabled"; /** * Gates the ability for Searchable Snapshots to read snapshots that are older than the * guaranteed backward compatibility for OpenSearch (one prior major version) on a best effort basis. */ - public static final String SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY = - "opensearch.experimental.feature.searchable_snapshot.extended_compatibility.enabled"; + public static final String SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY = FEATURE_FLAG_PREFIX + + "searchable_snapshot.extended_compatibility.enabled"; + public static final Setting SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY_SETTING = Setting.boolSetting( + SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY, + false, + Property.NodeScope + ); /** * Gates the functionality of extensions. * Once the feature is ready for production release, this feature flag can be removed. */ - public static final String EXTENSIONS = "opensearch.experimental.feature.extensions.enabled"; + public static final String EXTENSIONS = FEATURE_FLAG_PREFIX + "extensions.enabled"; /** * Gates the functionality of telemetry framework. */ - public static final String TELEMETRY = "opensearch.experimental.feature.telemetry.enabled"; + public static final String TELEMETRY = FEATURE_FLAG_PREFIX + "telemetry.enabled"; /** * Gates the optimization of datetime formatters caching along with change in default datetime formatter. */ - public static final String DATETIME_FORMATTER_CACHING = "opensearch.experimental.optimization.datetime_formatter_caching.enabled"; + public static final String DATETIME_FORMATTER_CACHING = OS_EXPERIMENTAL_PREFIX + "optimization.datetime_formatter_caching.enabled"; /** * Gates the functionality of warm index having the capability to store data remotely. @@ -59,9 +73,9 @@ public class FeatureFlags { /** * Gates the functionality of background task execution. */ - public static final String BACKGROUND_TASK_EXECUTION_EXPERIMENTAL = "opensearch.experimental.feature.task.background.enabled"; + public static final String BACKGROUND_TASK_EXECUTION_EXPERIMENTAL = FEATURE_FLAG_PREFIX + "task.background.enabled"; - public static final String READER_WRITER_SPLIT_EXPERIMENTAL = "opensearch.experimental.feature.read.write.split.enabled"; + public static final String READER_WRITER_SPLIT_EXPERIMENTAL = FEATURE_FLAG_PREFIX + "read.write.split.enabled"; public static final Setting REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING = Setting.boolSetting( REMOTE_STORE_MIGRATION_EXPERIMENTAL, @@ -95,13 +109,13 @@ public class FeatureFlags { * Gates the functionality of star tree index, which improves the performance of search * aggregations. */ - public static final String STAR_TREE_INDEX = "opensearch.experimental.feature.composite_index.star_tree.enabled"; + public static final String STAR_TREE_INDEX = FEATURE_FLAG_PREFIX + "composite_index.star_tree.enabled"; public static final Setting STAR_TREE_INDEX_SETTING = Setting.boolSetting(STAR_TREE_INDEX, false, Property.NodeScope); /** * Gates the functionality of application based configuration templates. */ - public static final String APPLICATION_BASED_CONFIGURATION_TEMPLATES = "opensearch.experimental.feature.application_templates.enabled"; + public static final String APPLICATION_BASED_CONFIGURATION_TEMPLATES = FEATURE_FLAG_PREFIX + "application_templates.enabled"; public static final Setting APPLICATION_BASED_CONFIGURATION_TEMPLATES_SETTING = Setting.boolSetting( APPLICATION_BASED_CONFIGURATION_TEMPLATES, false, @@ -111,86 +125,243 @@ public class FeatureFlags { /** * Gates the functionality of ApproximatePointRangeQuery where we approximate query results. */ - public static final String APPROXIMATE_POINT_RANGE_QUERY = "opensearch.experimental.feature.approximate_point_range_query.enabled"; + public static final String APPROXIMATE_POINT_RANGE_QUERY = FEATURE_FLAG_PREFIX + "approximate_point_range_query.enabled"; public static final Setting APPROXIMATE_POINT_RANGE_QUERY_SETTING = Setting.boolSetting( APPROXIMATE_POINT_RANGE_QUERY, false, Property.NodeScope ); - public static final String TERM_VERSION_PRECOMMIT_ENABLE = "opensearch.experimental.optimization.termversion.precommit.enabled"; + public static final String TERM_VERSION_PRECOMMIT_ENABLE = OS_EXPERIMENTAL_PREFIX + "optimization.termversion.precommit.enabled"; public static final Setting TERM_VERSION_PRECOMMIT_ENABLE_SETTING = Setting.boolSetting( TERM_VERSION_PRECOMMIT_ENABLE, false, Property.NodeScope ); - public static final String ARROW_STREAMS = "opensearch.experimental.feature.arrow.streams.enabled"; + public static final String ARROW_STREAMS = FEATURE_FLAG_PREFIX + "arrow.streams.enabled"; public static final Setting ARROW_STREAMS_SETTING = Setting.boolSetting(ARROW_STREAMS, false, Property.NodeScope); - private static final List> ALL_FEATURE_FLAG_SETTINGS = List.of( - REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING, - EXTENSIONS_SETTING, - TELEMETRY_SETTING, - DATETIME_FORMATTER_CACHING_SETTING, - WRITABLE_WARM_INDEX_SETTING, - STAR_TREE_INDEX_SETTING, - APPLICATION_BASED_CONFIGURATION_TEMPLATES_SETTING, - READER_WRITER_SPLIT_EXPERIMENTAL_SETTING, - TERM_VERSION_PRECOMMIT_ENABLE_SETTING, - ARROW_STREAMS_SETTING - ); - /** - * Should store the settings from opensearch.yml. + * Underlying implementation for feature flags. + * All settable feature flags are tracked here in FeatureFlagsImpl.featureFlags. + * Contains all functionality across test and server use cases. */ - private static Settings settings; + static class FeatureFlagsImpl { + // Add an evergreen test feature flag and hide it in private scope + private static final String TEST_FLAG = "test.flag.enabled"; + private static final Setting TEST_FLAG_SETTING = Setting.boolSetting(TEST_FLAG, false, Property.NodeScope); + + private final ConcurrentHashMap, Boolean> featureFlags = new ConcurrentHashMap<>() { + { + put(TEST_FLAG_SETTING, TEST_FLAG_SETTING.get(Settings.EMPTY)); + put(REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING, REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING.getDefault(Settings.EMPTY)); + put(EXTENSIONS_SETTING, EXTENSIONS_SETTING.getDefault(Settings.EMPTY)); + put(TELEMETRY_SETTING, TELEMETRY_SETTING.getDefault(Settings.EMPTY)); + put(DATETIME_FORMATTER_CACHING_SETTING, DATETIME_FORMATTER_CACHING_SETTING.getDefault(Settings.EMPTY)); + put(WRITABLE_WARM_INDEX_SETTING, WRITABLE_WARM_INDEX_SETTING.getDefault(Settings.EMPTY)); + put(STAR_TREE_INDEX_SETTING, STAR_TREE_INDEX_SETTING.getDefault(Settings.EMPTY)); + put( + APPLICATION_BASED_CONFIGURATION_TEMPLATES_SETTING, + APPLICATION_BASED_CONFIGURATION_TEMPLATES_SETTING.getDefault(Settings.EMPTY) + ); + put(READER_WRITER_SPLIT_EXPERIMENTAL_SETTING, READER_WRITER_SPLIT_EXPERIMENTAL_SETTING.getDefault(Settings.EMPTY)); + put(TERM_VERSION_PRECOMMIT_ENABLE_SETTING, TERM_VERSION_PRECOMMIT_ENABLE_SETTING.getDefault(Settings.EMPTY)); + put(ARROW_STREAMS_SETTING, ARROW_STREAMS_SETTING.getDefault(Settings.EMPTY)); + put( + SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY_SETTING, + SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY_SETTING.getDefault(Settings.EMPTY) + ); + } + }; + + /** + * Initialize feature flags map from the following sources: + * (Each source overwrites previous feature flags) + * - Set from setting default + * - Set from JVM system property if flag exists + */ + FeatureFlagsImpl() { + initFromDefaults(); + initFromSysProperties(); + } + + /** + * Initialize feature flags map from the following sources: + * (Each source overwrites previous feature flags) + * - Set from setting default + * - Set from JVM system property if flag exists + * - Set from provided settings if flag exists + * @param openSearchSettings The settings stored in opensearch.yml. + */ + void initializeFeatureFlags(Settings openSearchSettings) { + initFromDefaults(); + initFromSysProperties(); + initFromSettings(openSearchSettings); + } + + /** + * Set all feature flags according to setting defaults. + * Overwrites existing entries in feature flags map. + * Skips flags which are write locked according to TestUtils.FlagLock. + */ + private void initFromDefaults() { + for (Setting ff : featureFlags.keySet()) { + if (TestUtils.FlagWriteLock.isLocked(ff.getKey())) continue; + featureFlags.put(ff, ff.getDefault(Settings.EMPTY)); + } + } + + /** + * Update feature flags according to JVM system properties. + * Feature flags are true if system property is set as "true" (case-insensitive). Else feature set to false. + * Overwrites existing value if system property exists. + * Skips flags which are write locked according to TestUtils.FlagLock. + */ + private void initFromSysProperties() { + for (Setting ff : featureFlags.keySet()) { + if (TestUtils.FlagWriteLock.isLocked(ff.getKey())) continue; + String prop = System.getProperty(ff.getKey()); + if (prop != null) { + featureFlags.put(ff, Boolean.valueOf(prop)); + } + } + } + + /** + * Update feature flags in ALL_FEATURE_FLAG_SETTINGS according to provided settings. + * Overwrites existing entries in feature flags map. + * Skips flags which are write locked according to TestUtils.FlagLock. + * @param settings settings to update feature flags from + */ + private void initFromSettings(Settings settings) { + for (Setting ff : featureFlags.keySet()) { + if (settings.hasValue(ff.getKey())) { + if (TestUtils.FlagWriteLock.isLocked(ff.getKey())) continue; + featureFlags.put(ff, settings.getAsBoolean(ff.getKey(), ff.getDefault(settings))); + } + } + } + + /** + * @param ff feature flag setting + * @return true if feature enabled - else false + */ + boolean isEnabled(Setting ff) { + return featureFlags.getOrDefault(ff, false); + } + + /** + * @param featureFlagName feature flag setting key + * @return true if feature enabled - else false + */ + boolean isEnabled(String featureFlagName) { + for (Setting ff : featureFlags.keySet()) { + if (ff.getKey().equals(featureFlagName)) return featureFlags.get(ff); + } + return false; + } - static { - Settings.Builder settingsBuilder = Settings.builder(); - for (Setting ffSetting : ALL_FEATURE_FLAG_SETTINGS) { - settingsBuilder = settingsBuilder.put(ffSetting.getKey(), ffSetting.getDefault(Settings.EMPTY)); + /** + * @param featureFlagName feature flag to set + * @param value value for flag + */ + void set(String featureFlagName, Boolean value) { + for (Setting ff : featureFlags.keySet()) { + if (ff.getKey().equals(featureFlagName)) featureFlags.put(ff, value); + } } - settings = settingsBuilder.build(); } + private static final FeatureFlagsImpl featureFlagsImpl = new FeatureFlagsImpl(); + /** - * This method is responsible to map settings from opensearch.yml to local stored - * settings value. That is used for the existing isEnabled method. - * - * @param openSearchSettings The settings stored in opensearch.yml. + * Server module public API. */ public static void initializeFeatureFlags(Settings openSearchSettings) { - Settings.Builder settingsBuilder = Settings.builder(); - for (Setting ffSetting : ALL_FEATURE_FLAG_SETTINGS) { - settingsBuilder = settingsBuilder.put( - ffSetting.getKey(), - openSearchSettings.getAsBoolean(ffSetting.getKey(), ffSetting.getDefault(openSearchSettings)) - ); - } - settings = settingsBuilder.build(); + featureFlagsImpl.initializeFeatureFlags(openSearchSettings); } - /** - * Used to test feature flags whose values are expected to be booleans. - * This method returns true if the value is "true" (case-insensitive), - * and false otherwise. - */ public static boolean isEnabled(String featureFlagName) { - if ("true".equalsIgnoreCase(System.getProperty(featureFlagName))) { - // TODO: Remove the if condition once FeatureFlags are only supported via opensearch.yml - return true; - } - return settings != null && settings.getAsBoolean(featureFlagName, false); + return featureFlagsImpl.isEnabled(featureFlagName); } public static boolean isEnabled(Setting featureFlag) { - if ("true".equalsIgnoreCase(System.getProperty(featureFlag.getKey()))) { - // TODO: Remove the if condition once FeatureFlags are only supported via opensearch.yml - return true; - } else if (settings != null) { - return featureFlag.get(settings); - } else { - return featureFlag.getDefault(Settings.EMPTY); + return featureFlagsImpl.isEnabled(featureFlag); + } + + /** + * Provides feature flag write access for test use cases. + * To enable a feature flag for a single test case see @LockFeatureFlag annotation. + * For more fine grain control us TestUtils.with() or explicitly construct a new FlagLock(). + * Note: JUnit will not run test cases concurrently within a suite by default. + * Similarly test suites are forked and run in a separate JVM environment. + * As such these utility methods do not provide any thread safety. + */ + public static class TestUtils { + /** + * AutoCloseable helper which sets a feature flag and makes it immutable for the lifetime of the lock. + * Throws an exception if two locks exist for the same flag as we should never reach this state. + * Initializing two write locks for the same flag throws a RuntimeException. + */ + public static class FlagWriteLock implements AutoCloseable { + private static final Set writeLocks = new HashSet<>(); + private final String flag; + private final Boolean prev; + + public static boolean isLocked(String flag) { + return writeLocks.contains(flag); + } + + public FlagWriteLock(String flag) { + this(flag, true); + } + + public FlagWriteLock(String flag, Boolean value) { + if (writeLocks.contains(flag)) { + throw new RuntimeException("Cannot initialize second write lock for feature flag: " + flag); + } + this.flag = flag; + this.prev = featureFlagsImpl.isEnabled(flag); + writeLocks.add(flag); + featureFlagsImpl.set(flag, value); + } + + public void unlock() { + featureFlagsImpl.set(flag, prev); + writeLocks.remove(flag); + } + + @Override + public void close() { + featureFlagsImpl.set(flag, prev); + writeLocks.remove(flag); + } + } + + /** + * For critical sections run as lambdas which may throw exceptions. + */ + @FunctionalInterface + public interface ThrowingRunnable { + void run() throws Exception; + } + + /** + * Executes runnable test action with the provided feature flag enabled. + * Returns feature flag to previous value. + * @param flag feature flag setting. + * @param action critical section to run while feature flag is set. + */ + public static void with(String flag, ThrowingRunnable action) throws Exception { + try (FlagWriteLock ignored = new FlagWriteLock(flag)) { + action.run(); + } + } + + public static void with(String flag, Boolean value, ThrowingRunnable action) throws Exception { + try (FlagWriteLock ignored = new FlagWriteLock(flag, value)) { + action.run(); + } } } } diff --git a/server/src/test/java/org/opensearch/action/admin/indices/shrink/TransportResizeActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/shrink/TransportResizeActionTests.java index 5bab2ceca0988..18069ede796af 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/shrink/TransportResizeActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/shrink/TransportResizeActionTests.java @@ -53,7 +53,6 @@ import org.opensearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.index.shard.DocsStats; import org.opensearch.index.store.StoreStats; @@ -602,11 +601,10 @@ public void testIndexBlocks() { assertEquals(request.waitForActiveShards(), activeShardCount); } + @LockFeatureFlag(REMOTE_STORE_MIGRATION_EXPERIMENTAL) public void testResizeFailuresDuringMigration() { // We will keep all other settings correct for resize request, // So we only need to test for the failures due to cluster setting validation while migration - final Settings directionEnabledNodeSettings = Settings.builder().put(REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build(); - FeatureFlags.initializeFeatureFlags(directionEnabledNodeSettings); boolean isRemoteStoreEnabled = randomBoolean(); CompatibilityMode compatibilityMode = randomFrom(CompatibilityMode.values()); RemoteStoreNodeService.Direction migrationDirection = randomFrom(RemoteStoreNodeService.Direction.values()); diff --git a/server/src/test/java/org/opensearch/cluster/applicationtemplates/SystemTemplatesServiceTests.java b/server/src/test/java/org/opensearch/cluster/applicationtemplates/SystemTemplatesServiceTests.java index affb017264fdf..6619f2587d188 100644 --- a/server/src/test/java/org/opensearch/cluster/applicationtemplates/SystemTemplatesServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/applicationtemplates/SystemTemplatesServiceTests.java @@ -11,7 +11,6 @@ import org.opensearch.cluster.service.applicationtemplates.TestSystemTemplatesRepositoryPlugin; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.ThreadPool; @@ -23,12 +22,14 @@ import org.mockito.Mockito; import static org.opensearch.common.settings.ClusterSettings.BUILT_IN_CLUSTER_SETTINGS; +import static org.opensearch.common.util.FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES; import static org.mockito.Mockito.when; public class SystemTemplatesServiceTests extends OpenSearchTestCase { private SystemTemplatesService systemTemplatesService; + @LockFeatureFlag(APPLICATION_BASED_CONFIGURATION_TEMPLATES) public void testSystemTemplatesLoaded() throws IOException { setupService(true); @@ -43,6 +44,7 @@ public void testSystemTemplatesLoaded() throws IOException { } } + @LockFeatureFlag(APPLICATION_BASED_CONFIGURATION_TEMPLATES) public void testSystemTemplatesVerifyAndLoad() throws IOException { setupService(false); @@ -61,6 +63,7 @@ public void testSystemTemplatesVerifyAndLoad() throws IOException { assertEquals(stats.getFailedLoadingRepositories(), 0L); } + @LockFeatureFlag(APPLICATION_BASED_CONFIGURATION_TEMPLATES) public void testSystemTemplatesVerifyWithFailingRepository() throws IOException { setupService(true); @@ -77,8 +80,6 @@ public void testSystemTemplatesVerifyWithFailingRepository() throws IOException } private void setupService(boolean errorFromMockPlugin) throws IOException { - FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES, true).build()); - ThreadPool mockPool = Mockito.mock(ThreadPool.class); when(mockPool.generic()).thenReturn(OpenSearchExecutors.newDirectExecutorService()); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java index 9b91e4d507d57..ab455a1fbb4e7 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java @@ -52,7 +52,6 @@ import org.opensearch.common.SetOnce; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.RepositoryMissingException; @@ -958,6 +957,7 @@ public void testUpdatesClusterStateWithMultiNodeClusterAndSameRepository() throw validateRepositoryMetadata(result.resultingState, clusterManagerNode, 2); } + @LockFeatureFlag(REMOTE_STORE_MIGRATION_EXPERIMENTAL) public void testUpdatesRepoRemoteNodeJoinPublicationCluster() throws Exception { final AllocationService allocationService = mock(AllocationService.class); when(allocationService.adaptAutoExpandReplicas(any())).then(invocationOnMock -> invocationOnMock.getArguments()[0]); @@ -1005,8 +1005,6 @@ public void testUpdatesRepoRemoteNodeJoinPublicationCluster() throws Exception { .put(MIGRATION_DIRECTION_SETTING.getKey(), RemoteStoreNodeService.Direction.REMOTE_STORE) .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") .build(); - final Settings nodeSettings = Settings.builder().put(REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build(); - FeatureFlags.initializeFeatureFlags(nodeSettings); Metadata metadata = Metadata.builder().persistentSettings(settings).build(); ClusterState currentState = ClusterState.builder(result.resultingState).metadata(metadata).build(); @@ -1029,6 +1027,7 @@ public void testUpdatesRepoRemoteNodeJoinPublicationCluster() throws Exception { validateRepositoriesMetadata(resultAfterRemoteNodeJoin.resultingState, remoteStoreNode, clusterManagerNode); } + @LockFeatureFlag(REMOTE_STORE_MIGRATION_EXPERIMENTAL) public void testUpdatesRepoPublicationNodeJoinRemoteCluster() throws Exception { final AllocationService allocationService = mock(AllocationService.class); when(allocationService.adaptAutoExpandReplicas(any())).then(invocationOnMock -> invocationOnMock.getArguments()[0]); @@ -1071,8 +1070,6 @@ public void testUpdatesRepoPublicationNodeJoinRemoteCluster() throws Exception { .put(MIGRATION_DIRECTION_SETTING.getKey(), RemoteStoreNodeService.Direction.REMOTE_STORE) .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") .build(); - final Settings nodeSettings = Settings.builder().put(REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build(); - FeatureFlags.initializeFeatureFlags(nodeSettings); Metadata metadata = Metadata.builder().persistentSettings(settings).build(); ClusterState currentState = ClusterState.builder(result.resultingState).metadata(metadata).build(); @@ -1309,6 +1306,7 @@ public void testRemoteRoutingTableNodeJoinNodeWithRemoteAndRoutingRepoDifference JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()); } + @LockFeatureFlag(REMOTE_STORE_MIGRATION_EXPERIMENTAL) public void testRemoteRoutingTableNodeJoinNodeWithRemoteAndRoutingRepoDifferenceMixedMode() { Map attr = remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO); attr.putAll(remoteRoutingTableAttributes(ROUTING_TABLE_REPO)); @@ -1332,8 +1330,6 @@ public void testRemoteRoutingTableNodeJoinNodeWithRemoteAndRoutingRepoDifference .put(MIGRATION_DIRECTION_SETTING.getKey(), RemoteStoreNodeService.Direction.REMOTE_STORE) .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") .build(); - final Settings nodeSettings = Settings.builder().put(REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build(); - FeatureFlags.initializeFeatureFlags(nodeSettings); Metadata metadata = Metadata.builder().persistentSettings(settings).build(); ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) .nodes(DiscoveryNodes.builder().add(existingNode2).add(existingNode).localNodeId(existingNode.getId()).build()) @@ -1344,6 +1340,7 @@ public void testRemoteRoutingTableNodeJoinNodeWithRemoteAndRoutingRepoDifference JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()); } + @LockFeatureFlag(REMOTE_STORE_MIGRATION_EXPERIMENTAL) public void testJoinRemoteStoreClusterWithRemotePublicationNodeInMixedMode() { final DiscoveryNode remoteStoreNode = new DiscoveryNode( UUIDs.base64UUID(), @@ -1365,8 +1362,6 @@ public void testJoinRemoteStoreClusterWithRemotePublicationNodeInMixedMode() { .put(MIGRATION_DIRECTION_SETTING.getKey(), RemoteStoreNodeService.Direction.REMOTE_STORE) .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") .build(); - final Settings nodeSettings = Settings.builder().put(REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build(); - FeatureFlags.initializeFeatureFlags(nodeSettings); Metadata metadata = Metadata.builder().persistentSettings(settings).build(); ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) .nodes(DiscoveryNodes.builder().add(remoteStoreNode).add(nonRemoteStoreNode).localNodeId(remoteStoreNode.getId()).build()) diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index dfe3928ac37f3..03237ba81f05e 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -156,6 +156,7 @@ import static org.opensearch.cluster.metadata.MetadataCreateIndexService.parseV1Mappings; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.resolveAndValidateAliases; import static org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider.INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING; +import static org.opensearch.common.util.FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES; import static org.opensearch.common.util.FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL; import static org.opensearch.index.IndexModule.INDEX_STORE_TYPE_SETTING; import static org.opensearch.index.IndexSettings.INDEX_MERGE_POLICY; @@ -247,8 +248,6 @@ public void setupCreateIndexRequestAndAliasValidator() { @After public void tearDown() throws Exception { super.tearDown(); - // clear any FeatureFlags needed for individual tests - FeatureFlags.initializeFeatureFlags(Settings.EMPTY); clusterSettings = null; } @@ -1600,9 +1599,8 @@ public void testRemoteStoreOverrideTranslogRepoIndexSettings() { })); } + @LockFeatureFlag(REMOTE_STORE_MIGRATION_EXPERIMENTAL) public void testNewIndexIsRemoteStoreBackedForRemoteStoreDirectionAndMixedMode() { - FeatureFlags.initializeFeatureFlags(Settings.builder().put(REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build()); - // non-remote cluster manager node DiscoveryNode nonRemoteClusterManagerNode = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); @@ -2314,40 +2312,41 @@ public void testIndexCreationWithIndexStoreTypeRemoteStoreThrowsException() { public void testCreateIndexWithContextDisabled() throws Exception { // Explicitly disable the FF - FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES, false).build()); - request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test").context(new Context(randomAlphaOfLength(5))); - withTemporaryClusterService((clusterService, threadPool) -> { - MetadataCreateIndexService checkerService = new MetadataCreateIndexService( - Settings.EMPTY, - clusterService, - indicesServices, - null, - null, - createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), - mock(Environment.class), - IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, - threadPool, - null, - new SystemIndices(Collections.emptyMap()), - false, - new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE, - repositoriesServiceSupplier - ); - CountDownLatch counter = new CountDownLatch(1); - InvalidIndexContextException exception = expectThrows( - InvalidIndexContextException.class, - () -> checkerService.validateContext(request) - ); - assertTrue( - "Invalid exception message." + exception.getMessage(), - exception.getMessage().contains("index specifies a context which cannot be used without enabling") - ); + FeatureFlags.TestUtils.with(APPLICATION_BASED_CONFIGURATION_TEMPLATES, false, () -> { + request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test").context(new Context(randomAlphaOfLength(5))); + withTemporaryClusterService((clusterService, threadPool) -> { + MetadataCreateIndexService checkerService = new MetadataCreateIndexService( + Settings.EMPTY, + clusterService, + indicesServices, + null, + null, + createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), + mock(Environment.class), + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + threadPool, + null, + new SystemIndices(Collections.emptyMap()), + false, + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE, + repositoriesServiceSupplier + ); + CountDownLatch counter = new CountDownLatch(1); + InvalidIndexContextException exception = expectThrows( + InvalidIndexContextException.class, + () -> checkerService.validateContext(request) + ); + assertTrue( + "Invalid exception message." + exception.getMessage(), + exception.getMessage().contains("index specifies a context which cannot be used without enabling") + ); + }); }); } + @LockFeatureFlag(APPLICATION_BASED_CONFIGURATION_TEMPLATES) public void testCreateIndexWithContextAbsent() throws Exception { - FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES, true).build()); request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test").context(new Context(randomAlphaOfLength(5))); withTemporaryClusterService((clusterService, threadPool) -> { MetadataCreateIndexService checkerService = new MetadataCreateIndexService( @@ -2379,8 +2378,8 @@ public void testCreateIndexWithContextAbsent() throws Exception { }); } + @LockFeatureFlag(APPLICATION_BASED_CONFIGURATION_TEMPLATES) public void testApplyContext() throws IOException { - FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES, true).build()); request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test").context(new Context(randomAlphaOfLength(5))); final Map mappings = new HashMap<>(); @@ -2476,8 +2475,8 @@ public void testApplyContext() throws IOException { }); } + @LockFeatureFlag(APPLICATION_BASED_CONFIGURATION_TEMPLATES) public void testApplyContextWithSettingsOverlap() throws IOException { - FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES, true).build()); request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test").context(new Context(randomAlphaOfLength(5))); Settings.Builder settingsBuilder = Settings.builder().put(INDEX_REFRESH_INTERVAL_SETTING.getKey(), "30s"); String templateContent = "{\n" diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java index 795d1713772c2..a91e724ec279d 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java @@ -47,7 +47,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsException; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.common.xcontent.XContentFactory; @@ -95,6 +94,7 @@ import static org.opensearch.cluster.applicationtemplates.SystemTemplateMetadata.fromComponentTemplateInfo; import static org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider.INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING; import static org.opensearch.common.settings.Settings.builder; +import static org.opensearch.common.util.FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES; import static org.opensearch.common.util.concurrent.ThreadContext.ACTION_ORIGIN_TRANSIENT_NAME; import static org.opensearch.env.Environment.PATH_HOME_SETTING; import static org.opensearch.index.mapper.DataStreamFieldMapper.Defaults.TIMESTAMP_FIELD; @@ -769,8 +769,8 @@ public void onFailure(Exception e) { ); } + @LockFeatureFlag(APPLICATION_BASED_CONFIGURATION_TEMPLATES) public void testPutGlobalV2TemplateWhichProvidesContextNotPresentInState() throws Exception { - FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES, true).build()); MetadataIndexTemplateService metadataIndexTemplateService = getMetadataIndexTemplateService(); ComposableIndexTemplate globalIndexTemplate = new ComposableIndexTemplate( List.of("*"), @@ -809,8 +809,8 @@ public void onFailure(Exception e) { ); } + @LockFeatureFlag(APPLICATION_BASED_CONFIGURATION_TEMPLATES) public void testPutGlobalV2TemplateWhichProvidesContextWithNonExistingVersion() throws Exception { - FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES, true).build()); MetadataIndexTemplateService metadataIndexTemplateService = getMetadataIndexTemplateService(); Function templateApplier = codec -> new Template( @@ -893,8 +893,8 @@ public void onFailure(Exception e) { ); } + @LockFeatureFlag(APPLICATION_BASED_CONFIGURATION_TEMPLATES) public void testPutGlobalV2TemplateWhichProvidesContextInComposedOfSection() throws Exception { - FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES, true).build()); MetadataIndexTemplateService metadataIndexTemplateService = getMetadataIndexTemplateService(); Function templateApplier = codec -> new Template( @@ -972,16 +972,18 @@ public void onFailure(Exception e) { ); } + @LockFeatureFlag(APPLICATION_BASED_CONFIGURATION_TEMPLATES) public void testPutGlobalV2TemplateWhichProvidesContextWithSpecificVersion() throws Exception { verifyTemplateCreationUsingContext("1"); } + @LockFeatureFlag(APPLICATION_BASED_CONFIGURATION_TEMPLATES) public void testPutGlobalV2TemplateWhichProvidesContextWithLatestVersion() throws Exception { verifyTemplateCreationUsingContext("_latest"); } + @LockFeatureFlag(APPLICATION_BASED_CONFIGURATION_TEMPLATES) public void testModifySystemTemplateViaUnknownSource() throws Exception { - FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES, true).build()); MetadataIndexTemplateService metadataIndexTemplateService = getMetadataIndexTemplateService(); Function templateApplier = codec -> new Template( @@ -1014,6 +1016,7 @@ public void testModifySystemTemplateViaUnknownSource() throws Exception { ); } + @LockFeatureFlag(APPLICATION_BASED_CONFIGURATION_TEMPLATES) public void testResolveSettingsWithContextVersion() throws Exception { ClusterService clusterService = node().injector().getInstance(ClusterService.class); final String indexTemplateName = verifyTemplateCreationUsingContext("1"); @@ -1022,6 +1025,7 @@ public void testResolveSettingsWithContextVersion() throws Exception { assertThat(settings.get("index.codec"), equalTo(CodecService.BEST_COMPRESSION_CODEC)); } + @LockFeatureFlag(APPLICATION_BASED_CONFIGURATION_TEMPLATES) public void testResolveSettingsWithContextLatest() throws Exception { ClusterService clusterService = node().injector().getInstance(ClusterService.class); final String indexTemplateName = verifyTemplateCreationUsingContext(Context.LATEST_VERSION); @@ -2629,8 +2633,8 @@ public static void assertTemplatesEqual(ComposableIndexTemplate actual, Composab } } + @LockFeatureFlag(APPLICATION_BASED_CONFIGURATION_TEMPLATES) private String verifyTemplateCreationUsingContext(String contextVersion) throws Exception { - FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES, true).build()); MetadataIndexTemplateService metadataIndexTemplateService = getMetadataIndexTemplateService(); Function templateApplier = codec -> new Template( @@ -2751,9 +2755,6 @@ protected boolean resetNodeAfterTest() { @Override protected Settings featureFlagSettings() { - return Settings.builder() - .put(super.featureFlagSettings()) - .put(FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES, false) - .build(); + return Settings.builder().put(super.featureFlagSettings()).put(APPLICATION_BASED_CONFIGURATION_TEMPLATES, false).build(); } } diff --git a/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java b/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java index 11890d561e8b7..e7d43de5d1d51 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java @@ -72,6 +72,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_VERSION_CREATED; +import static org.opensearch.common.util.FeatureFlags.WRITABLE_WARM_INDEX_EXPERIMENTAL_FLAG; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; @@ -1058,9 +1059,9 @@ public void testSearchableSnapshotPrimaryDefault() throws Exception { } } + @LockFeatureFlag(WRITABLE_WARM_INDEX_EXPERIMENTAL_FLAG) @SuppressForbidden(reason = "feature flag overrides") public void testPartialIndexPrimaryDefault() throws Exception { - System.setProperty(FeatureFlags.WRITABLE_WARM_INDEX_EXPERIMENTAL_FLAG, "true"); final int numIndices = 1; final int numShards = 2; final int numReplicas = 2; @@ -1116,7 +1117,6 @@ public void testPartialIndexPrimaryDefault() throws Exception { } finally { IOUtils.close(clusterService); terminate(threadPool); - System.setProperty(FeatureFlags.WRITABLE_WARM_INDEX_EXPERIMENTAL_FLAG, "false"); } } diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/FailedShardsRoutingTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/FailedShardsRoutingTests.java index f8e1c609e6ee8..d2b77a013a483 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/FailedShardsRoutingTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/FailedShardsRoutingTests.java @@ -50,7 +50,6 @@ import org.opensearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.node.remotestore.RemoteStoreNodeService; @@ -825,8 +824,8 @@ private void testReplicaIsPromoted(boolean isSegmentReplicationEnabled) { } } + @LockFeatureFlag(REMOTE_STORE_MIGRATION_EXPERIMENTAL) public void testPreferReplicaOnRemoteNodeForPrimaryPromotion() { - FeatureFlags.initializeFeatureFlags(Settings.builder().put(REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build()); AllocationService allocation = createAllocationService(Settings.builder().build()); // segment replication enabled diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/ShardsTieringAllocationTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/ShardsTieringAllocationTests.java index 765d88f7af360..a1d5cb3932aa7 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/ShardsTieringAllocationTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/ShardsTieringAllocationTests.java @@ -15,23 +15,17 @@ import org.opensearch.cluster.routing.RoutingNodes; import org.opensearch.cluster.routing.RoutingPool; import org.opensearch.cluster.routing.ShardRouting; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexModule; -import org.opensearch.test.FeatureFlagSetter; -import org.junit.Before; import static org.opensearch.cluster.routing.RoutingPool.LOCAL_ONLY; import static org.opensearch.cluster.routing.RoutingPool.REMOTE_CAPABLE; import static org.opensearch.cluster.routing.RoutingPool.getIndexPool; +import static org.opensearch.common.util.FeatureFlags.WRITABLE_WARM_INDEX_EXPERIMENTAL_FLAG; import static org.opensearch.index.IndexModule.INDEX_STORE_LOCALITY_SETTING; public class ShardsTieringAllocationTests extends TieringAllocationBaseTestCase { - @Before - public void setup() { - FeatureFlagSetter.set(FeatureFlags.WRITABLE_WARM_INDEX_EXPERIMENTAL_FLAG); - } - + @LockFeatureFlag(WRITABLE_WARM_INDEX_EXPERIMENTAL_FLAG) public void testShardsInLocalPool() { int localOnlyNodes = 5; int remoteCapableNodes = 3; @@ -52,6 +46,7 @@ public void testShardsInLocalPool() { } } + @LockFeatureFlag(WRITABLE_WARM_INDEX_EXPERIMENTAL_FLAG) public void testShardsInRemotePool() { int localOnlyNodes = 7; int remoteCapableNodes = 3; @@ -72,6 +67,7 @@ public void testShardsInRemotePool() { } } + @LockFeatureFlag(WRITABLE_WARM_INDEX_EXPERIMENTAL_FLAG) public void testShardsWithTiering() { int localOnlyNodes = 15; int remoteCapableNodes = 13; @@ -104,6 +100,7 @@ public void testShardsWithTiering() { } } + @LockFeatureFlag(WRITABLE_WARM_INDEX_EXPERIMENTAL_FLAG) public void testShardPoolForPartialIndices() { String index = "test-index"; IndexMetadata indexMetadata = IndexMetadata.builder(index) @@ -118,6 +115,7 @@ public void testShardPoolForPartialIndices() { assertEquals(REMOTE_CAPABLE, indexPool); } + @LockFeatureFlag(WRITABLE_WARM_INDEX_EXPERIMENTAL_FLAG) public void testShardPoolForFullIndices() { String index = "test-index"; IndexMetadata indexMetadata = IndexMetadata.builder(index) diff --git a/server/src/test/java/org/opensearch/common/settings/SettingsModuleTests.java b/server/src/test/java/org/opensearch/common/settings/SettingsModuleTests.java index d504c3af90679..0be28f580d73c 100644 --- a/server/src/test/java/org/opensearch/common/settings/SettingsModuleTests.java +++ b/server/src/test/java/org/opensearch/common/settings/SettingsModuleTests.java @@ -34,15 +34,14 @@ import org.opensearch.common.inject.ModuleTestCase; import org.opensearch.common.settings.Setting.Property; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexSettings; import org.opensearch.search.SearchService; -import org.opensearch.test.FeatureFlagSetter; import org.hamcrest.Matchers; import java.util.Arrays; import static java.util.Collections.emptySet; +import static org.opensearch.common.util.FeatureFlags.EXTENSIONS; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; @@ -242,8 +241,8 @@ public void testOldMaxClauseCountSetting() { ); } + @LockFeatureFlag(EXTENSIONS) public void testDynamicNodeSettingsRegistration() { - FeatureFlagSetter.set(FeatureFlags.EXTENSIONS); Settings settings = Settings.builder().put("some.custom.setting", "2.0").build(); SettingsModule module = new SettingsModule(settings, Setting.floatSetting("some.custom.setting", 1.0f, Property.NodeScope)); assertNotNull(module.getClusterSettings().get("some.custom.setting")); @@ -263,8 +262,8 @@ public void testDynamicNodeSettingsRegistration() { ); } + @LockFeatureFlag(EXTENSIONS) public void testDynamicIndexSettingsRegistration() { - FeatureFlagSetter.set(FeatureFlags.EXTENSIONS); Settings settings = Settings.builder().put("some.custom.setting", "2.0").build(); SettingsModule module = new SettingsModule(settings, Setting.floatSetting("some.custom.setting", 1.0f, Property.NodeScope)); assertNotNull(module.getClusterSettings().get("some.custom.setting")); diff --git a/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java b/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java index 6d9d1aad3c5d5..f3751e98f5b60 100644 --- a/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java +++ b/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java @@ -8,54 +8,121 @@ package org.opensearch.common.util; +import org.opensearch.common.SuppressForbidden; import org.opensearch.common.settings.Settings; -import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.OpenSearchTestCase; -import static org.opensearch.common.util.FeatureFlags.DATETIME_FORMATTER_CACHING; -import static org.opensearch.common.util.FeatureFlags.EXTENSIONS; +import static org.opensearch.common.util.FeatureFlags.FEATURE_FLAG_PREFIX; public class FeatureFlagTests extends OpenSearchTestCase { + // Evergreen test flag + private static final String TEST_FLAG = "test.flag.enabled"; - private final String FLAG_PREFIX = "opensearch.experimental.feature."; + public void testFeatureFlagsNotInitialized() { + FeatureFlags.FeatureFlagsImpl testFlagsImpl = new FeatureFlags.FeatureFlagsImpl(); + assertFalse(testFlagsImpl.isEnabled(TEST_FLAG)); + } + + public void testFeatureFlagsFromDefault() { + FeatureFlags.FeatureFlagsImpl testFlagsImpl = new FeatureFlags.FeatureFlagsImpl(); + assertFalse(testFlagsImpl.isEnabled(TEST_FLAG)); + } + + public void testFeatureFlagFromEmpty() { + FeatureFlags.FeatureFlagsImpl testFlagsImpl = new FeatureFlags.FeatureFlagsImpl(); + testFlagsImpl.initializeFeatureFlags(Settings.EMPTY); + assertFalse(testFlagsImpl.isEnabled(TEST_FLAG)); + } + + public void testFeatureFlagFromSettings() { + FeatureFlags.FeatureFlagsImpl testFlagsImpl = new FeatureFlags.FeatureFlagsImpl(); + testFlagsImpl.initializeFeatureFlags(Settings.builder().put(TEST_FLAG, true).build()); + assertTrue(testFlagsImpl.isEnabled(TEST_FLAG)); + testFlagsImpl.initializeFeatureFlags(Settings.builder().put(TEST_FLAG, false).build()); + assertFalse(testFlagsImpl.isEnabled(TEST_FLAG)); + } + + @SuppressForbidden(reason = "Testing system property functionality") + private void setSystemPropertyTrue(String key) { + System.setProperty(key, "true"); + } - public void testFeatureFlagSet() { - final String testFlag = FLAG_PREFIX + "testFlag"; - FeatureFlagSetter.set(testFlag); - assertNotNull(System.getProperty(testFlag)); - assertTrue(FeatureFlags.isEnabled(testFlag)); + @SuppressForbidden(reason = "Testing system property functionality") + private String getSystemProperty(String key) { + return System.getProperty(key); } - public void testMissingFeatureFlag() { - final String testFlag = FLAG_PREFIX + "testFlag"; - assertNull(System.getProperty(testFlag)); - assertFalse(FeatureFlags.isEnabled(testFlag)); + @SuppressForbidden(reason = "Testing system property functionality") + private void clearSystemProperty(String key) { + System.clearProperty(key); } public void testNonBooleanFeatureFlag() { + FeatureFlags.FeatureFlagsImpl testFlagsImpl = new FeatureFlags.FeatureFlagsImpl(); String javaVersionProperty = "java.version"; - assertNotNull(System.getProperty(javaVersionProperty)); - assertFalse(FeatureFlags.isEnabled(javaVersionProperty)); + assertNotNull(getSystemProperty(javaVersionProperty)); + assertFalse(testFlagsImpl.isEnabled(javaVersionProperty)); + } + + public void testFeatureFlagFromSystemProperty() { + synchronized (TEST_FLAG) { // sync for sys property + setSystemPropertyTrue(TEST_FLAG); + FeatureFlags.FeatureFlagsImpl testFlagsImpl = new FeatureFlags.FeatureFlagsImpl(); + assertTrue(testFlagsImpl.isEnabled(TEST_FLAG)); + clearSystemProperty(TEST_FLAG); + } + } + + @SuppressForbidden(reason = "Testing with system property") + public void testFeatureFlagSettingOverwritesSystemProperties() { + FeatureFlags.FeatureFlagsImpl testFlagsImpl = new FeatureFlags.FeatureFlagsImpl(); + synchronized (TEST_FLAG) { // sync for sys property + setSystemPropertyTrue(TEST_FLAG); + testFlagsImpl.initializeFeatureFlags(Settings.EMPTY); + assertTrue(testFlagsImpl.isEnabled(TEST_FLAG)); + clearSystemProperty(TEST_FLAG); + } + testFlagsImpl.initializeFeatureFlags(Settings.builder().put(TEST_FLAG, false).build()); + assertFalse(testFlagsImpl.isEnabled(TEST_FLAG)); + } + + @SuppressForbidden(reason = "Testing with system property") + public void testFeatureDoesNotExist() { + final String DNE_FF = FEATURE_FLAG_PREFIX + "doesntexist"; + FeatureFlags.FeatureFlagsImpl testFlagsImpl = new FeatureFlags.FeatureFlagsImpl(); + assertFalse(testFlagsImpl.isEnabled(DNE_FF)); + setSystemPropertyTrue(DNE_FF); + testFlagsImpl.initializeFeatureFlags(Settings.EMPTY); + assertFalse(testFlagsImpl.isEnabled(DNE_FF)); + clearSystemProperty(DNE_FF); + testFlagsImpl.initializeFeatureFlags(Settings.builder().put(DNE_FF, true).build()); + assertFalse(testFlagsImpl.isEnabled(DNE_FF)); } - public void testBooleanFeatureFlagWithDefaultSetToFalse() { - final String testFlag = EXTENSIONS; - FeatureFlags.initializeFeatureFlags(Settings.EMPTY); - assertNotNull(testFlag); - assertFalse(FeatureFlags.isEnabled(testFlag)); + /** + * Test global feature flag instance. + */ + + public void testLockFeatureFlagWithFlagLock() { + try (FeatureFlags.TestUtils.FlagWriteLock ignore = new FeatureFlags.TestUtils.FlagWriteLock(TEST_FLAG)) { + assertTrue(FeatureFlags.isEnabled(TEST_FLAG)); + FeatureFlags.initializeFeatureFlags(Settings.builder().put(TEST_FLAG, false).build()); + assertTrue(FeatureFlags.isEnabled(TEST_FLAG)); // flag is locked + } } - public void testBooleanFeatureFlagInitializedWithEmptySettingsAndDefaultSetToFalse() { - final String testFlag = DATETIME_FORMATTER_CACHING; - FeatureFlags.initializeFeatureFlags(Settings.EMPTY); - assertFalse(FeatureFlags.isEnabled(testFlag)); + public void testLockFeatureFlagWithHelper() throws Exception { + FeatureFlags.TestUtils.with(TEST_FLAG, () -> { + assertTrue(FeatureFlags.isEnabled(TEST_FLAG)); + FeatureFlags.initializeFeatureFlags(Settings.builder().put(TEST_FLAG, false).build()); + assertTrue(FeatureFlags.isEnabled(TEST_FLAG)); // flag is locked + }); } - public void testInitializeFeatureFlagsWithExperimentalSettings() { - FeatureFlags.initializeFeatureFlags(Settings.builder().put(EXTENSIONS, true).build()); - assertTrue(FeatureFlags.isEnabled(EXTENSIONS)); - assertFalse(FeatureFlags.isEnabled(DATETIME_FORMATTER_CACHING)); - // reset FeatureFlags to defaults - FeatureFlags.initializeFeatureFlags(Settings.EMPTY); + @LockFeatureFlag(TEST_FLAG) + public void testLockFeatureFlagAnnotation() { + assertTrue(FeatureFlags.isEnabled(TEST_FLAG)); + FeatureFlags.initializeFeatureFlags(Settings.builder().put(TEST_FLAG, false).build()); + assertTrue(FeatureFlags.isEnabled(TEST_FLAG)); // flag is locked } } diff --git a/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java b/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java index 8b0a455353c5f..df03120d06fb0 100644 --- a/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java +++ b/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java @@ -46,7 +46,6 @@ import org.opensearch.plugins.ExtensionAwarePlugin; import org.opensearch.rest.RestController; import org.opensearch.telemetry.tracing.noop.NoopTracer; -import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.MockLogAppender; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.client.NoOpNodeClient; @@ -74,6 +73,7 @@ import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; +import static org.opensearch.common.util.FeatureFlags.EXTENSIONS; import static org.opensearch.test.ClusterServiceUtils.createClusterService; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; @@ -86,6 +86,7 @@ import static org.mockito.Mockito.when; public class ExtensionsManagerTests extends OpenSearchTestCase { + private static FeatureFlags.TestUtils.FlagWriteLock ffLock = null; private TransportService transportService; private ActionModule actionModule; private DynamicActionRegistry dynamicActionRegistry; @@ -108,7 +109,7 @@ public class ExtensionsManagerTests extends OpenSearchTestCase { @Before public void setup() throws Exception { - FeatureFlagSetter.set(FeatureFlags.EXTENSIONS); + ffLock = new FeatureFlags.TestUtils.FlagWriteLock(EXTENSIONS); Settings settings = Settings.builder().put("cluster.name", "test").build(); transport = new MockNioTransport( settings, @@ -179,6 +180,7 @@ public void tearDown() throws Exception { super.tearDown(); transportService.close(); client.close(); + ffLock.close(); ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); } diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java index e3684178a18ea..4fffab79e036b 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java @@ -45,7 +45,6 @@ import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; @@ -2751,6 +2750,7 @@ public void testRemoteRoutingTableNotInitializedWhenDisabled() { } } + @LockFeatureFlag(REMOTE_PUBLICATION_SETTING_KEY) public void testRemoteRoutingTableInitializedWhenEnabled() { Settings newSettings = Settings.builder() .put("node.attr." + REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY, "routing_repository") @@ -2759,9 +2759,6 @@ public void testRemoteRoutingTableInitializedWhenEnabled() { .build(); clusterSettings.applySettings(newSettings); - Settings nodeSettings = Settings.builder().put(REMOTE_PUBLICATION_SETTING_KEY, "true").build(); - FeatureFlags.initializeFeatureFlags(nodeSettings); - remoteClusterStateService = new RemoteClusterStateService( "test-node-id", repositoriesServiceSupplier, diff --git a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java index bc505daa607c1..7ea9dd336ccb8 100644 --- a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java +++ b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java @@ -42,12 +42,10 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsException; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.index.translog.Translog; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.search.pipeline.SearchPipelineService; -import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; @@ -60,6 +58,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Function; +import static org.opensearch.common.util.FeatureFlags.SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY; import static org.opensearch.index.store.remote.directory.RemoteSnapshotDirectory.SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY_MINIMUM_VERSION; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.core.StringContains.containsString; @@ -997,9 +996,9 @@ public void testUpdateRemoteTranslogBufferInterval() { ); } + @LockFeatureFlag(SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY) @SuppressForbidden(reason = "sets the SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY feature flag") public void testExtendedCompatibilityVersionForRemoteSnapshot() throws Exception { - FeatureFlagSetter.set(FeatureFlags.SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY); IndexMetadata metadata = newIndexMeta( "index", Settings.builder() diff --git a/server/src/test/java/org/opensearch/index/codec/composite912/datacube/startree/AbstractStarTreeDVFormatTests.java b/server/src/test/java/org/opensearch/index/codec/composite912/datacube/startree/AbstractStarTreeDVFormatTests.java index 18a7bb03b0a59..dd754845d1f58 100644 --- a/server/src/test/java/org/opensearch/index/codec/composite912/datacube/startree/AbstractStarTreeDVFormatTests.java +++ b/server/src/test/java/org/opensearch/index/codec/composite912/datacube/startree/AbstractStarTreeDVFormatTests.java @@ -51,6 +51,7 @@ */ @LuceneTestCase.SuppressSysoutChecks(bugUrl = "we log a lot on purpose") public abstract class AbstractStarTreeDVFormatTests extends BaseDocValuesFormatTestCase { + private static FeatureFlags.TestUtils.FlagWriteLock ffLock = null; MapperService mapperService = null; StarTreeFieldConfiguration.StarTreeBuildMode buildMode; @@ -67,13 +68,13 @@ public static Collection parameters() { } @BeforeClass - public static void createMapper() throws Exception { - FeatureFlags.initializeFeatureFlags(Settings.builder().put(STAR_TREE_INDEX, "true").build()); + public static void createMapper() { + ffLock = new FeatureFlags.TestUtils.FlagWriteLock(STAR_TREE_INDEX); } @AfterClass public static void clearMapper() { - FeatureFlags.initializeFeatureFlags(Settings.EMPTY); + ffLock.close(); } @After diff --git a/server/src/test/java/org/opensearch/index/engine/ReadOnlyEngineTests.java b/server/src/test/java/org/opensearch/index/engine/ReadOnlyEngineTests.java index 288822ab1589f..6979407690749 100644 --- a/server/src/test/java/org/opensearch/index/engine/ReadOnlyEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/ReadOnlyEngineTests.java @@ -41,7 +41,6 @@ import org.opensearch.common.lucene.LuceneTests; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.index.IndexModule; @@ -51,7 +50,6 @@ import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.store.Store; import org.opensearch.index.translog.TranslogStats; -import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.IndexSettingsModule; import java.io.IOException; @@ -62,6 +60,7 @@ import java.util.function.Function; import static org.opensearch.common.lucene.index.OpenSearchDirectoryReader.getOpenSearchDirectoryReader; +import static org.opensearch.common.util.FeatureFlags.SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.instanceOf; @@ -240,13 +239,13 @@ public void testReadOnly() throws IOException { } } + @LockFeatureFlag(SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY) public void testReadOldIndices() throws Exception { IOUtils.close(engine, store); // The index has one document in it, so the checkpoint cannot be NO_OPS_PERFORMED final AtomicLong globalCheckpoint = new AtomicLong(0); Path tmp = createTempDir(); TestUtil.unzip(getClass().getResourceAsStream(LuceneTests.OLDER_VERSION_INDEX_ZIP_RELATIVE_PATH), tmp); - FeatureFlagSetter.set(FeatureFlags.SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY); final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings( "index", Settings.builder() diff --git a/server/src/test/java/org/opensearch/index/mapper/ObjectMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/ObjectMapperTests.java index 45483ef51a5f9..8b2eb67481abf 100644 --- a/server/src/test/java/org/opensearch/index/mapper/ObjectMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/ObjectMapperTests.java @@ -556,23 +556,20 @@ public void testCompositeFields() throws Exception { ex.getMessage() ); - final Settings starTreeEnabledSettings = Settings.builder().put(STAR_TREE_INDEX, "true").build(); - FeatureFlags.initializeFeatureFlags(starTreeEnabledSettings); - - DocumentMapper documentMapper = createIndex("test", settings).mapperService() - .documentMapperParser() - .parse("tweet", new CompressedXContent(mapping)); - - Mapper mapper = documentMapper.root().getMapper("startree"); - assertTrue(mapper instanceof StarTreeMapper); - StarTreeMapper starTreeMapper = (StarTreeMapper) mapper; - assertEquals("star_tree", starTreeMapper.fieldType().typeName()); - // Check that field in properties was parsed correctly as well - mapper = documentMapper.root().getMapper("@timestamp"); - assertNotNull(mapper); - assertEquals("date", mapper.typeName()); - - FeatureFlags.initializeFeatureFlags(Settings.EMPTY); + FeatureFlags.TestUtils.with(STAR_TREE_INDEX, () -> { + DocumentMapper documentMapper = createIndex("test", settings).mapperService() + .documentMapperParser() + .parse("tweet", new CompressedXContent(mapping)); + + Mapper mapper = documentMapper.root().getMapper("startree"); + assertTrue(mapper instanceof StarTreeMapper); + StarTreeMapper starTreeMapper = (StarTreeMapper) mapper; + assertEquals("star_tree", starTreeMapper.fieldType().typeName()); + // Check that field in properties was parsed correctly as well + mapper = documentMapper.root().getMapper("@timestamp"); + assertNotNull(mapper); + assertEquals("date", mapper.typeName()); + }); } public void testNestedIsParent() throws Exception { diff --git a/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java index 435621548942b..b355aaea24121 100644 --- a/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java @@ -43,6 +43,7 @@ import java.util.List; import java.util.Set; +import static org.opensearch.common.util.FeatureFlags.STAR_TREE_INDEX; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING; import static org.opensearch.index.compositeindex.CompositeIndexSettings.COMPOSITE_INDEX_MAX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING; @@ -53,15 +54,16 @@ * Tests for {@link StarTreeMapper}. */ public class StarTreeMapperTests extends MapperTestCase { + FeatureFlags.TestUtils.FlagWriteLock ffLock = null; @Before public void setup() { - FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.STAR_TREE_INDEX, true).build()); + ffLock = new FeatureFlags.TestUtils.FlagWriteLock(STAR_TREE_INDEX); } @After public void teardown() { - FeatureFlags.initializeFeatureFlags(Settings.EMPTY); + ffLock.close(); } @Override diff --git a/server/src/test/java/org/opensearch/index/store/StoreTests.java b/server/src/test/java/org/opensearch/index/store/StoreTests.java index 542f95a4894e2..22f7f1fd31997 100644 --- a/server/src/test/java/org/opensearch/index/store/StoreTests.java +++ b/server/src/test/java/org/opensearch/index/store/StoreTests.java @@ -72,7 +72,6 @@ import org.opensearch.common.lucene.LuceneTests; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.common.io.stream.InputStreamStreamInput; import org.opensearch.core.common.io.stream.OutputStreamStreamOutput; @@ -90,7 +89,6 @@ import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.indices.store.TransportNodesListShardStoreMetadataHelper.StoreFilesMetadata; import org.opensearch.test.DummyShardLock; -import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.IndexSettingsModule; import org.opensearch.test.OpenSearchTestCase; import org.hamcrest.Matchers; @@ -113,6 +111,7 @@ import java.util.concurrent.atomic.AtomicInteger; import static java.util.Collections.unmodifiableMap; +import static org.opensearch.common.util.FeatureFlags.SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY; import static org.opensearch.index.seqno.SequenceNumbers.LOCAL_CHECKPOINT_KEY; import static org.opensearch.index.store.remote.directory.RemoteSnapshotDirectory.SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY_MINIMUM_VERSION; import static org.opensearch.test.VersionUtils.randomVersion; @@ -1282,6 +1281,7 @@ public void testSegmentReplicationDiff() { assertTrue(diff.identical.isEmpty()); } + @LockFeatureFlag(SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY) @SuppressForbidden(reason = "sets the SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY feature flag") public void testReadSegmentsFromOldIndices() throws Exception { int expectedIndexCreatedVersionMajor = SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY_MINIMUM_VERSION.luceneVersion.major; @@ -1291,7 +1291,6 @@ public void testReadSegmentsFromOldIndices() throws Exception { Store store = null; try { - FeatureFlagSetter.set(FeatureFlags.SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY); IndexSettings indexSettings = IndexSettingsModule.newIndexSettings( "index", Settings.builder() diff --git a/server/src/test/java/org/opensearch/node/NodeTests.java b/server/src/test/java/org/opensearch/node/NodeTests.java index 2f769dbd51b0a..999586f4f8639 100644 --- a/server/src/test/java/org/opensearch/node/NodeTests.java +++ b/server/src/test/java/org/opensearch/node/NodeTests.java @@ -42,7 +42,6 @@ import org.opensearch.common.network.NetworkModule; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsException; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.breaker.CircuitBreaker; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.transport.BoundTransportAddress; @@ -69,7 +68,6 @@ import org.opensearch.telemetry.TelemetrySettings; import org.opensearch.telemetry.metrics.MetricsRegistry; import org.opensearch.telemetry.tracing.Tracer; -import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.MockHttpTransport; import org.opensearch.test.NodeRoles; @@ -94,6 +92,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.common.util.FeatureFlags.TELEMETRY; import static org.opensearch.test.NodeRoles.addRoles; import static org.opensearch.test.NodeRoles.dataNode; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -436,11 +435,11 @@ public void testTelemetryAwarePlugins() throws IOException { } } + @LockFeatureFlag(TELEMETRY) public void testTelemetryPluginShouldNOTImplementTelemetryAwarePlugin() throws IOException { Settings.Builder settings = baseSettings(); List> plugins = basePlugins(); plugins.add(MockTelemetryPlugin.class); - FeatureFlagSetter.set(FeatureFlags.TELEMETRY); settings.put(TelemetrySettings.TRACER_FEATURE_ENABLED_SETTING.getKey(), true); assertThrows(IllegalStateException.class, () -> new MockNode(settings.build(), plugins)); } diff --git a/server/src/test/java/org/opensearch/search/SearchModuleTests.java b/server/src/test/java/org/opensearch/search/SearchModuleTests.java index d78393e917b2f..658e7bd2297c4 100644 --- a/server/src/test/java/org/opensearch/search/SearchModuleTests.java +++ b/server/src/test/java/org/opensearch/search/SearchModuleTests.java @@ -34,7 +34,6 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.util.CharsRefBuilder; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -438,7 +437,6 @@ public void testConcurrentQueryPhaseSearcher() { QueryPhase queryPhase = searchModule.getQueryPhase(); assertTrue(queryPhase.getQueryPhaseSearcher() instanceof QueryPhaseSearcherWrapper); assertTrue(queryPhase.getQueryPhaseSearcher().aggregationProcessor(searchContext) instanceof ConcurrentAggregationProcessor); - FeatureFlags.initializeFeatureFlags(Settings.EMPTY); } public void testPluginQueryPhaseSearcher() { @@ -454,7 +452,6 @@ public Optional getQueryPhaseSearcher() { TestSearchContext searchContext = new TestSearchContext(null); assertEquals(queryPhaseSearcher, queryPhase.getQueryPhaseSearcher()); assertTrue(queryPhaseSearcher.aggregationProcessor(searchContext) instanceof DefaultAggregationProcessor); - FeatureFlags.initializeFeatureFlags(Settings.EMPTY); } public void testMultiplePluginRegisterQueryPhaseSearcher() { diff --git a/server/src/test/java/org/opensearch/search/SearchServiceStarTreeTests.java b/server/src/test/java/org/opensearch/search/SearchServiceStarTreeTests.java index 9038f194843e3..4332274f9255f 100644 --- a/server/src/test/java/org/opensearch/search/SearchServiceStarTreeTests.java +++ b/server/src/test/java/org/opensearch/search/SearchServiceStarTreeTests.java @@ -15,7 +15,6 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Rounding; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.Strings; import org.opensearch.index.IndexService; import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; @@ -69,6 +68,7 @@ import java.util.List; import java.util.Set; +import static org.opensearch.common.util.FeatureFlags.STAR_TREE_INDEX; import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; import static org.opensearch.search.aggregations.AggregationBuilders.max; import static org.opensearch.search.aggregations.AggregationBuilders.medianAbsoluteDeviation; @@ -93,8 +93,8 @@ public class SearchServiceStarTreeTests extends OpenSearchSingleNodeTestCase { /** * Test query parsing for non-nested metric aggregations, with/without numeric term query */ + @LockFeatureFlag(STAR_TREE_INDEX) public void testQueryParsingForMetricAggregations() throws IOException { - FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.STAR_TREE_INDEX, true).build()); setStarTreeIndexSetting("true"); Settings settings = Settings.builder() @@ -244,8 +244,8 @@ public void testQueryParsingForMetricAggregations() throws IOException { /** * Test query parsing for date histogram aggregations, with/without numeric term query */ + @LockFeatureFlag(STAR_TREE_INDEX) public void testQueryParsingForDateHistogramAggregations() throws IOException { - FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.STAR_TREE_INDEX, true).build()); setStarTreeIndexSetting("true"); Settings settings = Settings.builder() @@ -493,8 +493,8 @@ public void testCacheCreationInStarTreeQueryContext() throws IOException { /** * Test query parsing for date histogram aggregations on star-tree index when @timestamp field does not exist */ + @LockFeatureFlag(STAR_TREE_INDEX) public void testInvalidQueryParsingForDateHistogramAggregations() throws IOException { - FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.STAR_TREE_INDEX, true).build()); setStarTreeIndexSetting("true"); Settings settings = Settings.builder() @@ -547,8 +547,8 @@ public void testInvalidQueryParsingForDateHistogramAggregations() throws IOExcep /** * Test query parsing for bucket aggregations, with/without numeric term query */ + @LockFeatureFlag(STAR_TREE_INDEX) public void testQueryParsingForBucketAggregations() throws IOException { - FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.STAR_TREE_INDEX, true).build()); setStarTreeIndexSetting("true"); Settings settings = Settings.builder() diff --git a/server/src/test/java/org/opensearch/search/aggregations/startree/DateHistogramAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/startree/DateHistogramAggregatorTests.java index a374e2f5653b9..0a6a775ac57b5 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/startree/DateHistogramAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/startree/DateHistogramAggregatorTests.java @@ -28,7 +28,6 @@ import org.apache.lucene.tests.index.RandomIndexWriter; import org.opensearch.common.Rounding; import org.opensearch.common.lucene.Lucene; -import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; @@ -59,6 +58,7 @@ import java.util.List; import java.util.Random; +import static org.opensearch.common.util.FeatureFlags.STAR_TREE_INDEX; import static org.opensearch.index.codec.composite912.datacube.startree.AbstractStarTreeDVFormatTests.topMapping; import static org.opensearch.search.aggregations.AggregationBuilders.avg; import static org.opensearch.search.aggregations.AggregationBuilders.count; @@ -69,6 +69,7 @@ import static org.opensearch.test.InternalAggregationTestCase.DEFAULT_MAX_BUCKETS; public class DateHistogramAggregatorTests extends DateHistogramAggregatorTestCase { + private static FeatureFlags.TestUtils.FlagWriteLock fflock = null; private static final String TIMESTAMP_FIELD = "@timestamp"; private static final MappedFieldType TIMESTAMP_FIELD_TYPE = new DateFieldMapper.DateFieldType(TIMESTAMP_FIELD); @@ -80,12 +81,12 @@ public class DateHistogramAggregatorTests extends DateHistogramAggregatorTestCas @Before public void setup() { - FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.STAR_TREE_INDEX, true).build()); + fflock = new FeatureFlags.TestUtils.FlagWriteLock(STAR_TREE_INDEX); } @After public void teardown() throws IOException { - FeatureFlags.initializeFeatureFlags(Settings.EMPTY); + fflock.close(); } protected Codec getCodec() { diff --git a/server/src/test/java/org/opensearch/search/aggregations/startree/KeywordTermsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/startree/KeywordTermsAggregatorTests.java index 2ca9f6b592a0d..0cfaa52103539 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/startree/KeywordTermsAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/startree/KeywordTermsAggregatorTests.java @@ -31,7 +31,6 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.NumericUtils; import org.opensearch.common.lucene.Lucene; -import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; import org.opensearch.index.codec.composite.CompositeIndexReader; @@ -60,6 +59,7 @@ import java.util.List; import java.util.Random; +import static org.opensearch.common.util.FeatureFlags.STAR_TREE_INDEX; import static org.opensearch.search.aggregations.AggregationBuilders.avg; import static org.opensearch.search.aggregations.AggregationBuilders.count; import static org.opensearch.search.aggregations.AggregationBuilders.max; @@ -69,6 +69,7 @@ import static org.opensearch.test.InternalAggregationTestCase.DEFAULT_MAX_BUCKETS; public class KeywordTermsAggregatorTests extends AggregatorTestCase { + private static FeatureFlags.TestUtils.FlagWriteLock fflock = null; final static String STATUS = "status"; final static String SIZE = "size"; final static String CLIENTIP = "clientip"; @@ -81,12 +82,12 @@ public class KeywordTermsAggregatorTests extends AggregatorTestCase { @Before public void setup() { - FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.STAR_TREE_INDEX, true).build()); + fflock = new FeatureFlags.TestUtils.FlagWriteLock(STAR_TREE_INDEX); } @After public void teardown() throws IOException { - FeatureFlags.initializeFeatureFlags(Settings.EMPTY); + fflock.close(); } protected Codec getCodec() { diff --git a/server/src/test/java/org/opensearch/search/aggregations/startree/MetricAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/startree/MetricAggregatorTests.java index 6e10562c3a846..0f0db9907d381 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/startree/MetricAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/startree/MetricAggregatorTests.java @@ -91,6 +91,7 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.opensearch.common.util.FeatureFlags.STAR_TREE_INDEX; import static org.opensearch.search.aggregations.AggregationBuilders.avg; import static org.opensearch.search.aggregations.AggregationBuilders.count; import static org.opensearch.search.aggregations.AggregationBuilders.max; @@ -101,19 +102,19 @@ import static org.mockito.Mockito.when; public class MetricAggregatorTests extends AggregatorTestCase { - + private static FeatureFlags.TestUtils.FlagWriteLock fflock = null; private static final String FIELD_NAME = "field"; private static final NumberFieldMapper.NumberType DEFAULT_FIELD_TYPE = NumberFieldMapper.NumberType.LONG; private static final MappedFieldType DEFAULT_MAPPED_FIELD = new NumberFieldMapper.NumberFieldType(FIELD_NAME, DEFAULT_FIELD_TYPE); @Before public void setup() { - FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.STAR_TREE_INDEX, true).build()); + fflock = new FeatureFlags.TestUtils.FlagWriteLock(STAR_TREE_INDEX); } @After public void teardown() throws IOException { - FeatureFlags.initializeFeatureFlags(Settings.EMPTY); + fflock.close(); } protected Codec getCodec( diff --git a/server/src/test/java/org/opensearch/search/aggregations/startree/NumericTermsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/startree/NumericTermsAggregatorTests.java index 3c663ada97e6d..b4b8f1f9883d8 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/startree/NumericTermsAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/startree/NumericTermsAggregatorTests.java @@ -27,7 +27,6 @@ import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.NumericUtils; import org.opensearch.common.lucene.Lucene; -import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; @@ -54,6 +53,7 @@ import java.util.List; import java.util.Random; +import static org.opensearch.common.util.FeatureFlags.STAR_TREE_INDEX; import static org.opensearch.index.codec.composite912.datacube.startree.AbstractStarTreeDVFormatTests.topMapping; import static org.opensearch.search.aggregations.AggregationBuilders.avg; import static org.opensearch.search.aggregations.AggregationBuilders.count; @@ -64,6 +64,7 @@ import static org.opensearch.test.InternalAggregationTestCase.DEFAULT_MAX_BUCKETS; public class NumericTermsAggregatorTests extends AggregatorTestCase { + private static FeatureFlags.TestUtils.FlagWriteLock fflock = null; final static String STATUS = "status"; final static String SIZE = "size"; private static final MappedFieldType STATUS_FIELD_TYPE = new NumberFieldMapper.NumberFieldType( @@ -74,12 +75,12 @@ public class NumericTermsAggregatorTests extends AggregatorTestCase { @Before public void setup() { - FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.STAR_TREE_INDEX, true).build()); + fflock = new FeatureFlags.TestUtils.FlagWriteLock(STAR_TREE_INDEX); } @After public void teardown() throws IOException { - FeatureFlags.initializeFeatureFlags(Settings.EMPTY); + fflock.close(); } protected Codec getCodec() { diff --git a/server/src/test/java/org/opensearch/search/aggregations/startree/StarTreeFilterTests.java b/server/src/test/java/org/opensearch/search/aggregations/startree/StarTreeFilterTests.java index 7282b0fafb8aa..cd2943f23be7a 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/startree/StarTreeFilterTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/startree/StarTreeFilterTests.java @@ -24,7 +24,6 @@ import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.FixedBitSet; import org.opensearch.common.lucene.Lucene; -import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; @@ -57,9 +56,11 @@ import org.mockito.Mockito; +import static org.opensearch.common.util.FeatureFlags.STAR_TREE_INDEX; import static org.opensearch.index.codec.composite912.datacube.startree.AbstractStarTreeDVFormatTests.topMapping; public class StarTreeFilterTests extends AggregatorTestCase { + private static FeatureFlags.TestUtils.FlagWriteLock fflock = null; private static final String FIELD_NAME = "field"; private static final String SNDV = "sndv"; @@ -78,12 +79,12 @@ public class StarTreeFilterTests extends AggregatorTestCase { @Before public void setup() { - FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.STAR_TREE_INDEX, true).build()); + fflock = new FeatureFlags.TestUtils.FlagWriteLock(STAR_TREE_INDEX); } @After public void teardown() throws IOException { - FeatureFlags.initializeFeatureFlags(Settings.EMPTY); + fflock.close(); } protected Codec getCodec(int maxLeafDoc, boolean skipStarNodeCreationForSDVDimension) { diff --git a/test/framework/src/main/java/org/opensearch/test/FeatureFlagSetter.java b/test/framework/src/main/java/org/opensearch/test/FeatureFlagSetter.java deleted file mode 100644 index f698cd03c464f..0000000000000 --- a/test/framework/src/main/java/org/opensearch/test/FeatureFlagSetter.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.test; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.common.SuppressForbidden; -import org.opensearch.common.util.concurrent.ConcurrentCollections; - -import java.security.AccessController; -import java.security.PrivilegedAction; -import java.util.Set; - -/** - * Helper class that wraps the lifecycle of setting and finally clearing of - * a {@link org.opensearch.common.util.FeatureFlags} string. - */ -public class FeatureFlagSetter { - - private static FeatureFlagSetter INSTANCE = null; - - private static synchronized FeatureFlagSetter getInstance() { - if (INSTANCE == null) { - INSTANCE = new FeatureFlagSetter(); - } - return INSTANCE; - } - - public static synchronized void set(String flag) { - getInstance().setFlag(flag); - } - - public static synchronized void clear() { - if (INSTANCE != null) { - INSTANCE.clearAll(); - INSTANCE = null; - } - } - - private static final Logger LOGGER = LogManager.getLogger(FeatureFlagSetter.class); - private final Set flags = ConcurrentCollections.newConcurrentSet(); - - @SuppressWarnings("removal") - @SuppressForbidden(reason = "Enables setting of feature flags") - private void setFlag(String flag) { - flags.add(flag); - AccessController.doPrivileged((PrivilegedAction) () -> System.setProperty(flag, "true")); - LOGGER.info("set feature_flag={}", flag); - } - - @SuppressWarnings("removal") - @SuppressForbidden(reason = "Clears the set feature flags") - private void clearAll() { - for (String flag : flags) { - AccessController.doPrivileged((PrivilegedAction) () -> System.clearProperty(flag)); - } - LOGGER.info("unset feature_flags={}", flags); - flags.clear(); - } -} diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java index 0bd5d8afda91e..2525aef903298 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java @@ -85,6 +85,7 @@ import org.opensearch.common.time.DateUtils; import org.opensearch.common.time.FormatNames; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.MockBigArrays; import org.opensearch.common.util.MockPageCacheRecycler; import org.opensearch.common.util.concurrent.ThreadContext; @@ -146,11 +147,18 @@ import org.junit.Rule; import org.junit.internal.AssumptionViolatedException; import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; +import org.junit.runner.Description; +import org.junit.runners.model.Statement; import java.io.IOException; import java.io.InputStream; import java.io.PrintWriter; import java.io.StringWriter; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; import java.math.BigInteger; import java.net.InetAddress; import java.net.UnknownHostException; @@ -234,6 +242,45 @@ public abstract class OpenSearchTestCase extends LuceneTestCase { private static final Collection nettyLoggedLeaks = new ArrayList<>(); private HeaderWarningAppender headerWarningAppender; + /** + * Define LockFeatureFlag annotation for unit tests. + * Enables and make a flag immutable for the duration of the test case. + * Flag returned to previous value on test exit. + * Usage: LockFeatureFlag("example.featureflag.setting.key.enabled") + */ + @Retention(RetentionPolicy.RUNTIME) + @Target({ ElementType.METHOD }) + public @interface LockFeatureFlag { + String value(); + } + + public static class AnnotatedFeatureFlagRule implements TestRule { + /** + * Wrap base test case with an + * @param base test case to execute. + * @param description annotated test description. + */ + @Override + public Statement apply(Statement base, Description description) { + LockFeatureFlag annotation = description.getAnnotation(LockFeatureFlag.class); + if (annotation == null) { + return base; + } + String flagKey = annotation.value(); + return new Statement() { + @Override + public void evaluate() throws Throwable { + try (FeatureFlags.TestUtils.FlagWriteLock ignored = new FeatureFlags.TestUtils.FlagWriteLock(flagKey)) { + base.evaluate(); + } + } + }; + } + } + + @Rule + public AnnotatedFeatureFlagRule flagLockRule = new AnnotatedFeatureFlagRule(); + @AfterClass public static void resetPortCounter() { portGenerator.set(0); @@ -242,7 +289,6 @@ public static void resetPortCounter() { @Override public void tearDown() throws Exception { Schedulers.shutdownNow(); - FeatureFlagSetter.clear(); super.tearDown(); } @@ -1233,7 +1279,7 @@ public static boolean terminate(ThreadPool threadPool) { } /** - * Returns a {@link java.nio.file.Path} pointing to the class path relative resource given + * Returns a {@link Path} pointing to the class path relative resource given * as the first argument. In contrast to * getClass().getResource(...).getFile() this method will not * return URL encoded paths if the parent path contains spaces or other @@ -1395,7 +1441,7 @@ protected final BytesReference toShuffledXContent( boolean humanReadable, String... exceptFieldNames ) throws IOException { - BytesReference bytes = org.opensearch.core.xcontent.XContentHelper.toXContent(toXContent, mediaType, params, humanReadable); + BytesReference bytes = XContentHelper.toXContent(toXContent, mediaType, params, humanReadable); try (XContentParser parser = createParser(mediaType.xContent(), bytes)) { try (XContentBuilder builder = shuffleXContent(parser, rarely(), exceptFieldNames)) { return BytesReference.bytes(builder); @@ -1687,8 +1733,8 @@ protected static long spinForAtLeastNMilliseconds(final long ms) { protected IndexAnalyzers createDefaultIndexAnalyzers() { return new IndexAnalyzers( Collections.singletonMap("default", new NamedAnalyzer("default", AnalyzerScope.INDEX, new StandardAnalyzer())), - Collections.emptyMap(), - Collections.emptyMap() + emptyMap(), + emptyMap() ); } From 7b6108bcf47fe2a3c75af9d7a1adb29a9c04938c Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Tue, 8 Apr 2025 22:53:24 -0700 Subject: [PATCH 173/550] Fix feature flag error in SearchSErviceStarTreeTests (#17858) --- .../java/org/opensearch/search/SearchServiceStarTreeTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/test/java/org/opensearch/search/SearchServiceStarTreeTests.java b/server/src/test/java/org/opensearch/search/SearchServiceStarTreeTests.java index 4332274f9255f..c0583cd5227b2 100644 --- a/server/src/test/java/org/opensearch/search/SearchServiceStarTreeTests.java +++ b/server/src/test/java/org/opensearch/search/SearchServiceStarTreeTests.java @@ -694,8 +694,8 @@ public void testQueryParsingForBucketAggregations() throws IOException { /** * Test query parsing for range aggregations, with/without numeric term query */ + @LockFeatureFlag(STAR_TREE_INDEX) public void testQueryParsingForRangeAggregations() throws IOException { - FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.STAR_TREE_INDEX, true).build()); setStarTreeIndexSetting("true"); Settings settings = Settings.builder() From 396add1a1c49d6350826ebd5831373f5f1ca0917 Mon Sep 17 00:00:00 2001 From: guojialiang Date: Thu, 10 Apr 2025 00:32:22 +0800 Subject: [PATCH 174/550] [segment replication] Avoid skewed segment replication lag metric (#17831) * avoid skewed sr lag metric Signed-off-by: guojialiang * add CHANGELOG Signed-off-by: guojialiang * add test Signed-off-by: guojialiang --------- Signed-off-by: guojialiang --- CHANGELOG.md | 1 + .../java/org/opensearch/index/seqno/ReplicationTracker.java | 3 ++- .../org/opensearch/index/seqno/ReplicationTrackerTests.java | 2 ++ 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 73bac28902e21..fa608891d0092 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,6 +35,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Unwrap singleton DocValues in global ordinal value source of composite histogram aggregation ([#17740](https://github.com/opensearch-project/OpenSearch/pull/17740)) - Unwrap singleton DocValues in date histogram aggregation. ([#17643](https://github.com/opensearch-project/OpenSearch/pull/17643)) - Introduce 512 byte limit to search and ingest pipeline IDs ([#17786](https://github.com/opensearch-project/OpenSearch/pull/17786)) +- Avoid skewed segment replication lag metric ([#17831](https://github.com/opensearch-project/OpenSearch/pull/17831)) ### Dependencies - Bump `com.nimbusds:nimbus-jose-jwt` from 9.41.1 to 10.0.2 ([#17607](https://github.com/opensearch-project/OpenSearch/pull/17607), [#17669](https://github.com/opensearch-project/OpenSearch/pull/17669)) diff --git a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java index 76ef45158e3d5..36ef322b282f2 100644 --- a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java @@ -1307,7 +1307,8 @@ public synchronized void startReplicationLagTimers(ReplicationCheckpoint checkpo && replicationGroup.getUnavailableInSyncShards().contains(allocationId) == false && shouldSkipReplicationTimer(e.getKey()) == false && latestReplicationCheckpoint.isAheadOf(cps.visibleReplicationCheckpoint) - && cps.checkpointTimers.containsKey(latestReplicationCheckpoint)) { + && cps.checkpointTimers.containsKey(latestReplicationCheckpoint) + && cps.checkpointTimers.get(latestReplicationCheckpoint).startTime() == 0) { cps.checkpointTimers.get(latestReplicationCheckpoint).start(); } }); diff --git a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java index 899e80965e4fd..b987c60dda333 100644 --- a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java @@ -1870,6 +1870,8 @@ public void testSegmentReplicationCheckpointTracking() { tracker.setLatestReplicationCheckpoint(initialCheckpoint); tracker.startReplicationLagTimers(initialCheckpoint); + // retry start replication lag timers + tracker.startReplicationLagTimers(initialCheckpoint); tracker.setLatestReplicationCheckpoint(secondCheckpoint); tracker.startReplicationLagTimers(secondCheckpoint); tracker.setLatestReplicationCheckpoint(thirdCheckpoint); From 4c3230ac45e15c3ec07e4f4ea9a50f05aa889542 Mon Sep 17 00:00:00 2001 From: Varun Bharadwaj Date: Wed, 9 Apr 2025 10:30:36 -0700 Subject: [PATCH 175/550] [Pull-based ingestion] Support updates and deletes in ingestion flow (#17822) * Support updates and deletes in ingestion flow Signed-off-by: Varun Bharadwaj * Move ID generation to common util Signed-off-by: Varun Bharadwaj --------- Signed-off-by: Varun Bharadwaj --- CHANGELOG.md | 1 + .../plugin/kafka/IngestFromKafkaIT.java | 69 ++++++++++++++++++- .../plugin/kafka/KafkaIngestionBaseIT.java | 11 ++- .../opensearch/action/index/IndexRequest.java | 4 +- .../opensearch/common/util/RequestUtils.java | 28 ++++++++ .../index/engine/IngestionEngine.java | 38 +++++++++- .../index/engine/InternalEngine.java | 2 +- .../MessageProcessorRunnable.java | 66 +++++++++++++----- .../common/util/RequestUtilsTests.java | 19 +++++ .../pollingingest/MessageProcessorTests.java | 27 ++++++++ 10 files changed, 237 insertions(+), 28 deletions(-) create mode 100644 server/src/main/java/org/opensearch/common/util/RequestUtils.java create mode 100644 server/src/test/java/org/opensearch/common/util/RequestUtilsTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index fa608891d0092..cd180673aaa3e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,6 +28,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Star Tree] [Search] Resolving numeric range aggregation with metric aggregation using star-tree ([#17273](https://github.com/opensearch-project/OpenSearch/pull/17273)) - Added Search Only strict routing setting ([#17803](https://github.com/opensearch-project/OpenSearch/pull/17803)) - Disable the index API for ingestion engine ([#17768](https://github.com/opensearch-project/OpenSearch/pull/17768)) +- Add update and delete support in pull-based ingestion ([#17822](https://github.com/opensearch-project/OpenSearch/pull/17822)) ### Changed - Migrate BC libs to their FIPS counterparts ([#14912](https://github.com/opensearch-project/OpenSearch/pull/14912)) diff --git a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/IngestFromKafkaIT.java b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/IngestFromKafkaIT.java index 86d8710f4daab..ccecb08213772 100644 --- a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/IngestFromKafkaIT.java +++ b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/IngestFromKafkaIT.java @@ -15,7 +15,9 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; +import org.opensearch.index.query.BoolQueryBuilder; import org.opensearch.index.query.RangeQueryBuilder; +import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.indices.pollingingest.PollingIngestStats; import org.opensearch.plugins.PluginInfo; import org.opensearch.test.OpenSearchIntegTestCase; @@ -73,8 +75,8 @@ public void testKafkaIngestion() { } public void testKafkaIngestion_RewindByTimeStamp() { - produceData("1", "name1", "24", 1739459500000L); - produceData("2", "name2", "20", 1739459800000L); + produceData("1", "name1", "24", 1739459500000L, "index"); + produceData("2", "name2", "20", 1739459800000L, "index"); // create an index with ingestion source from kafka createIndex( @@ -135,4 +137,67 @@ public void testCloseIndex() throws Exception { ensureGreen(indexName); client().admin().indices().close(Requests.closeIndexRequest(indexName)).get(); } + + public void testUpdateAndDelete() throws Exception { + // Step 1: Produce message and wait for it to be searchable + + produceData("1", "name", "25", defaultMessageTimestamp, "index"); + createIndexWithDefaultSettings(1, 0); + ensureGreen(indexName); + waitForState(() -> { + BoolQueryBuilder query = new BoolQueryBuilder().must(new TermQueryBuilder("_id", "1")); + SearchResponse response = client().prepareSearch(indexName).setQuery(query).get(); + assertThat(response.getHits().getTotalHits().value(), is(1L)); + return 25 == (Integer) response.getHits().getHits()[0].getSourceAsMap().get("age"); + }); + + // Step 2: Update age field from 25 to 30 and validate + + produceData("1", "name", "30", defaultMessageTimestamp, "index"); + waitForState(() -> { + BoolQueryBuilder query = new BoolQueryBuilder().must(new TermQueryBuilder("_id", "1")); + SearchResponse response = client().prepareSearch(indexName).setQuery(query).get(); + assertThat(response.getHits().getTotalHits().value(), is(1L)); + return 30 == (Integer) response.getHits().getHits()[0].getSourceAsMap().get("age"); + }); + + // Step 3: Delete the document and validate + produceData("1", "name", "30", defaultMessageTimestamp, "delete"); + waitForState(() -> { + BoolQueryBuilder query = new BoolQueryBuilder().must(new TermQueryBuilder("_id", "1")); + SearchResponse response = client().prepareSearch(indexName).setQuery(query).get(); + return response.getHits().getTotalHits().value() == 0; + }); + } + + public void testUpdateWithoutIDField() throws Exception { + // Step 1: Produce message without ID + String payload = "{\"_op_type\":\"index\",\"_source\":{\"name\":\"name\", \"age\": 25}}"; + produceData(payload); + + createIndexWithDefaultSettings(1, 0); + ensureGreen(indexName); + + waitForState(() -> { + BoolQueryBuilder query = new BoolQueryBuilder().must(new TermQueryBuilder("age", "25")); + SearchResponse response = client().prepareSearch(indexName).setQuery(query).get(); + assertThat(response.getHits().getTotalHits().value(), is(1L)); + return 25 == (Integer) response.getHits().getHits()[0].getSourceAsMap().get("age"); + }); + + SearchResponse searchableDocsResponse = client().prepareSearch(indexName).setSize(10).setPreference("_only_local").get(); + assertThat(searchableDocsResponse.getHits().getTotalHits().value(), is(1L)); + assertEquals(25, searchableDocsResponse.getHits().getHits()[0].getSourceAsMap().get("age")); + String id = searchableDocsResponse.getHits().getHits()[0].getId(); + + // Step 2: Produce an update message using retrieved ID and validate + + produceData(id, "name", "30", defaultMessageTimestamp, "index"); + waitForState(() -> { + BoolQueryBuilder query = new BoolQueryBuilder().must(new TermQueryBuilder("_id", id)); + SearchResponse response = client().prepareSearch(indexName).setQuery(query).get(); + assertThat(response.getHits().getTotalHits().value(), is(1L)); + return 30 == (Integer) response.getHits().getHits()[0].getSourceAsMap().get("age"); + }); + } } diff --git a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/KafkaIngestionBaseIT.java b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/KafkaIngestionBaseIT.java index a9ae195332117..eb118c7bdbfce 100644 --- a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/KafkaIngestionBaseIT.java +++ b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/KafkaIngestionBaseIT.java @@ -93,20 +93,25 @@ private void stopKafka() { } protected void produceData(String id, String name, String age) { - produceData(id, name, age, defaultMessageTimestamp); + produceData(id, name, age, defaultMessageTimestamp, "index"); } - protected void produceData(String id, String name, String age, long timestamp) { + protected void produceData(String id, String name, String age, long timestamp, String opType) { String payload = String.format( Locale.ROOT, - "{\"_id\":\"%s\", \"_op_type:\":\"index\",\"_source\":{\"name\":\"%s\", \"age\": %s}}", + "{\"_id\":\"%s\", \"_op_type\":\"%s\",\"_source\":{\"name\":\"%s\", \"age\": %s}}", id, + opType, name, age ); producer.send(new ProducerRecord<>(topicName, null, timestamp, "null", payload)); } + protected void produceData(String payload) { + producer.send(new ProducerRecord<>(topicName, null, defaultMessageTimestamp, "null", payload)); + } + protected void waitForSearchableDocs(long docCount, List nodes) throws Exception { assertBusy(() -> { for (String node : nodes) { diff --git a/server/src/main/java/org/opensearch/action/index/IndexRequest.java b/server/src/main/java/org/opensearch/action/index/IndexRequest.java index 7a8e9a914ac57..7bff66f4e860e 100644 --- a/server/src/main/java/org/opensearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/opensearch/action/index/IndexRequest.java @@ -46,9 +46,9 @@ import org.opensearch.cluster.metadata.MappingMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.common.Nullable; -import org.opensearch.common.UUIDs; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lucene.uid.Versions; +import org.opensearch.common.util.RequestUtils; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.Strings; @@ -625,7 +625,7 @@ public void process(Version indexCreatedVersion, @Nullable MappingMetadata mappi assert ifSeqNo == UNASSIGNED_SEQ_NO; assert ifPrimaryTerm == UNASSIGNED_PRIMARY_TERM; autoGeneratedTimestamp = Math.max(0, System.currentTimeMillis()); // extra paranoia - id(UUIDs.base64UUID()); + id(RequestUtils.generateID()); } } diff --git a/server/src/main/java/org/opensearch/common/util/RequestUtils.java b/server/src/main/java/org/opensearch/common/util/RequestUtils.java new file mode 100644 index 0000000000000..0affdac27d62b --- /dev/null +++ b/server/src/main/java/org/opensearch/common/util/RequestUtils.java @@ -0,0 +1,28 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util; + +import org.opensearch.common.UUIDs; + +/** + * Common utility methods for request handling. + * + * @opensearch.internal + */ +public final class RequestUtils { + + private RequestUtils() {} + + /** + * Generates a new ID field for new documents. + */ + public static String generateID() { + return UUIDs.base64UUID(); + } +} diff --git a/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java b/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java index 4839b9ceb463b..6d5f112efe594 100644 --- a/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java @@ -10,6 +10,7 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.opensearch.ExceptionsHelper; import org.opensearch.action.admin.indices.streamingingestion.state.ShardIngestionState; @@ -22,6 +23,8 @@ import org.opensearch.index.mapper.DocumentMapperForType; import org.opensearch.index.mapper.IdFieldMapper; import org.opensearch.index.mapper.ParseContext; +import org.opensearch.index.mapper.ParsedDocument; +import org.opensearch.index.mapper.SeqNoFieldMapper; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.translog.NoOpTranslogManager; import org.opensearch.index.translog.Translog; @@ -43,6 +46,7 @@ import java.util.Set; import java.util.function.BiFunction; +import static org.opensearch.action.index.IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP; import static org.opensearch.index.translog.Translog.EMPTY_TRANSLOG_SNAPSHOT; /** @@ -163,8 +167,13 @@ public IndexResult indexInternal(Index index) throws IOException { } private IndexResult indexIntoLucene(Index index) throws IOException { - // todo: handle updates - addDocs(index.docs(), indexWriter); + if (index.getAutoGeneratedIdTimestamp() != UNSET_AUTO_GENERATED_TIMESTAMP) { + assert index.getAutoGeneratedIdTimestamp() >= 0 : "autoGeneratedIdTimestamp must be positive but was: " + + index.getAutoGeneratedIdTimestamp(); + addDocs(index.docs(), indexWriter); + } else { + updateDocs(index.uid(), index.docs(), indexWriter); + } return new IndexResult(index.version(), index.primaryTerm(), index.seqNo(), true); } @@ -176,11 +185,36 @@ private void addDocs(final List docs, final IndexWriter i } } + private void updateDocs(final Term uid, final List docs, final IndexWriter indexWriter) throws IOException { + if (docs.size() > 1) { + indexWriter.softUpdateDocuments(uid, docs, softDeletesField); + } else { + indexWriter.softUpdateDocument(uid, docs.get(0), softDeletesField); + } + } + @Override public DeleteResult delete(Delete delete) throws IOException { throw new IngestionEngineException("push-based deletion is not supported in ingestion engine, use streaming source instead"); } + /** + * Processes delete operations. This is used internally by the stream poller only. + */ + public DeleteResult deleteInternal(Delete delete) throws IOException { + assert Objects.equals(delete.uid().field(), IdFieldMapper.NAME) : delete.uid().field(); + ensureOpen(); + final ParsedDocument tombstone = engineConfig.getTombstoneDocSupplier().newDeleteTombstoneDoc(delete.id()); + assert tombstone.docs().size() == 1 : "Tombstone doc should have single doc [" + tombstone + "]"; + final ParseContext.Document doc = tombstone.docs().get(0); + assert doc.getField(SeqNoFieldMapper.TOMBSTONE_NAME) != null : "Delete tombstone document but _tombstone field is not set [" + + doc + + " ]"; + doc.add(softDeletesField); + indexWriter.softUpdateDocument(delete.uid(), doc, softDeletesField); + return new DeleteResult(1, delete.primaryTerm(), -1, true); + } + @Override public NoOpResult noOp(NoOp noOp) throws IOException { ensureOpen(); diff --git a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java index 064e757c6ebb7..7e171e3f1714c 100644 --- a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java @@ -161,6 +161,7 @@ public class InternalEngine extends Engine { protected final AtomicLong maxUnsafeAutoIdTimestamp = new AtomicLong(-1); protected final SoftDeletesPolicy softDeletesPolicy; protected final AtomicBoolean shouldPeriodicallyFlushAfterBigMerge = new AtomicBoolean(false); + protected final NumericDocValuesField softDeletesField = Lucene.newSoftDeletesField(); @Nullable protected final String historyUUID; @@ -197,7 +198,6 @@ public class InternalEngine extends Engine { private final CounterMetric numDocDeletes = new CounterMetric(); private final CounterMetric numDocAppends = new CounterMetric(); private final CounterMetric numDocUpdates = new CounterMetric(); - private final NumericDocValuesField softDeletesField = Lucene.newSoftDeletesField(); private final LastRefreshedCheckpointListener lastRefreshedCheckpointListener; private final CompletionStatsCache completionStatsCache; diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/MessageProcessorRunnable.java b/server/src/main/java/org/opensearch/indices/pollingingest/MessageProcessorRunnable.java index 23aa1a043d774..28de7224f9d89 100644 --- a/server/src/main/java/org/opensearch/indices/pollingingest/MessageProcessorRunnable.java +++ b/server/src/main/java/org/opensearch/indices/pollingingest/MessageProcessorRunnable.java @@ -15,8 +15,10 @@ import org.opensearch.action.DocWriteRequest; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.common.metrics.CounterMetric; +import org.opensearch.common.util.RequestUtils; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.MediaTypeRegistry; @@ -38,6 +40,7 @@ import java.util.concurrent.BlockingQueue; import java.util.concurrent.TimeUnit; +import static org.opensearch.action.index.IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP; import static org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; /** @@ -122,7 +125,9 @@ protected void process(Message message, IngestionShardPointer pointer) { engine.indexInternal((Engine.Index) operation); break; case DELETE: - engine.delete((Engine.Delete) operation); + engine.deleteInternal((Engine.Delete) operation); + break; + case NO_OP: break; default: throw new IllegalArgumentException("Invalid operation: " + operation); @@ -140,15 +145,23 @@ protected void process(Message message, IngestionShardPointer pointer) { * @return the engine operation */ protected Engine.Operation getOperation(byte[] payload, IngestionShardPointer pointer) throws IOException { - BytesReference payloadBR = new BytesArray(payload); - Map payloadMap = XContentHelper.convertToMap(payloadBR, false, MediaTypeRegistry.xContentType(payloadBR)).v2(); + Map payloadMap = getParsedPayloadMap(payload); - String id = (String) payloadMap.getOrDefault(ID, "null"); if (payloadMap.containsKey(OP_TYPE) && !(payloadMap.get(OP_TYPE) instanceof String)) { // TODO: add metric logger.error("_op_type field is of type {} but not string, skipping the message", payloadMap.get(OP_TYPE).getClass()); return null; } + + String id = (String) payloadMap.get(ID); + long autoGeneratedIdTimestamp = UNSET_AUTO_GENERATED_TIMESTAMP; + if (Strings.isNullOrEmpty(id)) { + // auto generate ID for the message + id = RequestUtils.generateID(); + payloadMap.put(ID, id); + autoGeneratedIdTimestamp = System.currentTimeMillis(); + } + String opTypeString = (String) payloadMap.getOrDefault(OP_TYPE, "index"); DocWriteRequest.OpType opType = DocWriteRequest.OpType.fromString(opTypeString); @@ -177,7 +190,7 @@ protected Engine.Operation getOperation(byte[] payload, IngestionShardPointer po document.add(new StoredField(IngestionShardPointer.OFFSET_FIELD, pointer.asString())); operation = new Engine.Index( - new Term("_id", id), + new Term(IdFieldMapper.NAME, Uid.encodeId(id)), doc, 0, 1, @@ -185,25 +198,36 @@ protected Engine.Operation getOperation(byte[] payload, IngestionShardPointer po VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), - System.currentTimeMillis(), + autoGeneratedIdTimestamp, false, UNASSIGNED_SEQ_NO, 0 ); break; case DELETE: - operation = new Engine.Delete( - id, - new Term(IdFieldMapper.NAME, Uid.encodeId(id)), - 0, - 1, - Versions.MATCH_ANY, - VersionType.INTERNAL, - Engine.Operation.Origin.PRIMARY, - System.nanoTime(), - UNASSIGNED_SEQ_NO, - 0 - ); + if (autoGeneratedIdTimestamp != UNSET_AUTO_GENERATED_TIMESTAMP) { + logger.info("Delete operation without ID received, and will be dropped."); + operation = new Engine.NoOp( + 0, + 1, + Engine.Operation.Origin.PRIMARY, + System.nanoTime(), + "Delete operation is missing ID" + ); + } else { + operation = new Engine.Delete( + id, + new Term(IdFieldMapper.NAME, Uid.encodeId(id)), + 0, + 1, + Versions.MATCH_ANY, + VersionType.INTERNAL, + Engine.Operation.Origin.PRIMARY, + System.nanoTime(), + UNASSIGNED_SEQ_NO, + 0 + ); + } break; default: logger.error("Unsupported operation type {}", opType); @@ -212,6 +236,12 @@ protected Engine.Operation getOperation(byte[] payload, IngestionShardPointer po return operation; } + + private Map getParsedPayloadMap(byte[] payload) { + BytesReference payloadBR = new BytesArray(payload); + Map payloadMap = XContentHelper.convertToMap(payloadBR, false, MediaTypeRegistry.xContentType(payloadBR)).v2(); + return payloadMap; + } } private static BytesReference convertToBytes(Object object) throws IOException { diff --git a/server/src/test/java/org/opensearch/common/util/RequestUtilsTests.java b/server/src/test/java/org/opensearch/common/util/RequestUtilsTests.java new file mode 100644 index 0000000000000..091b22626e1a6 --- /dev/null +++ b/server/src/test/java/org/opensearch/common/util/RequestUtilsTests.java @@ -0,0 +1,19 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util; + +import org.opensearch.core.common.Strings; +import org.opensearch.test.OpenSearchTestCase; + +public class RequestUtilsTests extends OpenSearchTestCase { + + public void testGenerateID() { + assertTrue(Strings.hasText(RequestUtils.generateID())); + } +} diff --git a/server/src/test/java/org/opensearch/indices/pollingingest/MessageProcessorTests.java b/server/src/test/java/org/opensearch/indices/pollingingest/MessageProcessorTests.java index 273e25c0a5bfc..8eb6c2d0ff161 100644 --- a/server/src/test/java/org/opensearch/indices/pollingingest/MessageProcessorTests.java +++ b/server/src/test/java/org/opensearch/indices/pollingingest/MessageProcessorTests.java @@ -96,4 +96,31 @@ public void testUnsupportedOperation() throws IOException { Engine.Operation operation = processor.getOperation(payload, pointer); assertNull(operation); } + + public void testAutoGeneratedIDForIndexOperation() throws IOException { + byte[] payload = "{\"_source\":{\"name\":\"bob\", \"age\": 24}}".getBytes(StandardCharsets.UTF_8); + FakeIngestionSource.FakeIngestionShardPointer pointer = new FakeIngestionSource.FakeIngestionShardPointer(0); + + ParsedDocument parsedDocument = mock(ParsedDocument.class); + when(documentMapper.parse(any())).thenReturn(parsedDocument); + when(parsedDocument.rootDoc()).thenReturn(new ParseContext.Document()); + Engine.Operation operation = processor.getOperation(payload, pointer); + + assertTrue(operation instanceof Engine.Index); + ArgumentCaptor captor = ArgumentCaptor.forClass(SourceToParse.class); + verify(documentMapper).parse(captor.capture()); + assertEquals("index", captor.getValue().index()); + assertNotNull(captor.getValue().id()); + } + + public void testDeleteWithMissingID() throws IOException { + byte[] payload = "{\"_op_type\":\"delete\"}".getBytes(StandardCharsets.UTF_8); + FakeIngestionSource.FakeIngestionShardPointer pointer = new FakeIngestionSource.FakeIngestionShardPointer(0); + + ParsedDocument parsedDocument = mock(ParsedDocument.class); + when(documentMapper.parse(any())).thenReturn(parsedDocument); + when(parsedDocument.rootDoc()).thenReturn(new ParseContext.Document()); + Engine.Operation operation = processor.getOperation(payload, pointer); + assertTrue(operation instanceof Engine.NoOp); + } } From 18b0d1c7dbb756e52274cad2098664bdef7cacc5 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Wed, 9 Apr 2025 13:59:42 -0400 Subject: [PATCH 176/550] [Security Manager Replacement] Phase off SecurityManager usage in favor of Java Agent (#17861) Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + build.gradle | 11 +- buildSrc/build.gradle | 4 +- .../gradle/OpenSearchTestBasePlugin.java | 3 - .../org/opensearch/bootstrap/test.policy | 1 + distribution/archives/build.gradle | 9 + distribution/build.gradle | 12 + distribution/src/config/jvm.options | 8 +- .../tools/launchers/SystemJvmOptions.java | 10 - gradle/ide.gradle | 4 +- libs/agent-sm/agent/build.gradle | 4 + .../java/org/opensearch/javaagent/Agent.java | 2 +- libs/build.gradle | 25 +- libs/secure-sm/build.gradle | 1 + .../org/opensearch/secure_sm/SecureSM.java | 312 ------------------ .../SecuredForkJoinWorkerThreadFactory.java | 47 --- .../secure_sm/ThreadContextPermission.java | 1 - .../secure_sm/ThreadPermission.java | 5 - .../opensearch/secure_sm/SecureSMTests.java | 156 --------- plugins/repository-hdfs/build.gradle | 4 - .../org/opensearch/bootstrap/test.policy | 12 + .../org/opensearch/bootstrap/test.policy | 12 + server/build.gradle | 5 +- .../opensearch/bootstrap/BootstrapChecks.java | 8 +- .../org/opensearch/bootstrap/OpenSearch.java | 13 - .../org/opensearch/bootstrap/Security.java | 29 +- .../common/util/concurrent/ThreadContext.java | 19 -- .../org/opensearch/bootstrap/security.policy | 24 ++ .../bootstrap/test-framework.policy | 10 +- .../ExceptionSerializationTests.java | 6 +- .../org/opensearch/bootstrap/test.policy | 6 + test/framework/build.gradle | 8 +- .../org/opensearch/bootstrap/AgentAttach.java | 20 ++ .../bootstrap/BootstrapForTesting.java | 32 +- .../mockito/plugin/PriviledgedMockMaker.java | 134 -------- .../org.mockito.plugins.MockMaker | 2 +- .../org/opensearch/bootstrap/test.policy | 16 + 37 files changed, 209 insertions(+), 767 deletions(-) delete mode 100644 libs/secure-sm/src/main/java/org/opensearch/secure_sm/SecureSM.java delete mode 100644 libs/secure-sm/src/main/java/org/opensearch/secure_sm/SecuredForkJoinWorkerThreadFactory.java delete mode 100644 libs/secure-sm/src/test/java/org/opensearch/secure_sm/SecureSMTests.java create mode 100644 plugins/repository-hdfs/src/test/resources/org/opensearch/bootstrap/test.policy create mode 100644 plugins/repository-s3/src/internalClusterTest/resources/org/opensearch/bootstrap/test.policy create mode 100644 test/framework/src/main/java/org/opensearch/bootstrap/AgentAttach.java delete mode 100644 test/framework/src/main/java/org/opensearch/mockito/plugin/PriviledgedMockMaker.java create mode 100644 test/framework/src/test/resources/org/opensearch/bootstrap/test.policy diff --git a/CHANGELOG.md b/CHANGELOG.md index cd180673aaa3e..fb9858ff83534 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Security Manager Replacement] Add a policy parser for Java agent security policies ([#17753](https://github.com/opensearch-project/OpenSearch/pull/17753)) - [Security Manager Replacement] Implement File Interceptor and add integration tests ([#17760](https://github.com/opensearch-project/OpenSearch/pull/17760)) - [Security Manager Replacement] Enhance Java Agent to intercept Runtime::halt ([#17757](https://github.com/opensearch-project/OpenSearch/pull/17757)) +- [Security Manager Replacement] Phase off SecurityManager usage in favor of Java Agent ([#17861](https://github.com/opensearch-project/OpenSearch/pull/17861)) - Support AutoExpand for SearchReplica ([#17741](https://github.com/opensearch-project/OpenSearch/pull/17741)) - Implement fixed interval refresh task scheduling ([#17777](https://github.com/opensearch-project/OpenSearch/pull/17777)) - Add GRPC DocumentService and Bulk endpoint ([#17727](https://github.com/opensearch-project/OpenSearch/pull/17727)) diff --git a/build.gradle b/build.gradle index 187574da9e62a..0ac56b1389245 100644 --- a/build.gradle +++ b/build.gradle @@ -433,11 +433,12 @@ gradle.projectsEvaluated { project.tasks.withType(Test) { task -> if (task != null) { - if (BuildParams.runtimeJavaVersion > JavaVersion.VERSION_17) { - task.jvmArgs += ["-Djava.security.manager=allow"] - } - if (BuildParams.runtimeJavaVersion >= JavaVersion.VERSION_20) { - task.jvmArgs += ["--add-modules=jdk.incubator.vector"] + task.jvmArgs += ["--add-modules=jdk.incubator.vector"] + + // Add Java Agent for security sandboxing + if (!(project.path in [':build-tools', ":libs:agent-sm:bootstrap", ":libs:agent-sm:agent"])) { + dependsOn(project(':libs:agent-sm:agent').prepareAgent) + jvmArgs += ["-javaagent:" + project(':libs:agent-sm:agent').jar.archiveFile.get()] } } } diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 65986f2361c9d..e8459443e8a04 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -110,12 +110,12 @@ dependencies { api 'com.netflix.nebula:gradle-info-plugin:12.1.6' api 'org.apache.rat:apache-rat:0.15' api "commons-io:commons-io:${props.getProperty('commonsio')}" - api "net.java.dev.jna:jna:5.14.0" + api "net.java.dev.jna:jna:5.16.0" api 'com.gradleup.shadow:shadow-gradle-plugin:8.3.5' api 'org.jdom:jdom2:2.0.6.1' api "org.jetbrains.kotlin:kotlin-stdlib-jdk8:${props.getProperty('kotlin')}" api 'de.thetaphi:forbiddenapis:3.8' - api 'com.avast.gradle:gradle-docker-compose-plugin:0.17.6' + api 'com.avast.gradle:gradle-docker-compose-plugin:0.17.12' api "org.yaml:snakeyaml:${props.getProperty('snakeyaml')}" api 'org.apache.maven:maven-model:3.9.6' api 'com.networknt:json-schema-validator:1.2.0' diff --git a/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchTestBasePlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchTestBasePlugin.java index d79dfb1124757..55f71753a2e37 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchTestBasePlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchTestBasePlugin.java @@ -115,9 +115,6 @@ public void execute(Task t) { test.jvmArgs("--illegal-access=warn"); } } - if (test.getJavaVersion().compareTo(JavaVersion.VERSION_17) > 0) { - test.jvmArgs("-Djava.security.manager=allow"); - } } }); test.getJvmArgumentProviders().add(nonInputProperties); diff --git a/client/rest-high-level/src/test/resources/org/opensearch/bootstrap/test.policy b/client/rest-high-level/src/test/resources/org/opensearch/bootstrap/test.policy index 2604c2492d8ab..96cd3e9f148cf 100644 --- a/client/rest-high-level/src/test/resources/org/opensearch/bootstrap/test.policy +++ b/client/rest-high-level/src/test/resources/org/opensearch/bootstrap/test.policy @@ -8,4 +8,5 @@ grant { permission java.net.SocketPermission "*", "connect,resolve"; + permission java.net.NetPermission "accessUnixDomainSocket"; }; diff --git a/distribution/archives/build.gradle b/distribution/archives/build.gradle index 792b1ab57ddbc..f42dc422cb938 100644 --- a/distribution/archives/build.gradle +++ b/distribution/archives/build.gradle @@ -38,6 +38,9 @@ CopySpec archiveFiles(CopySpec modulesFiles, String distributionType, String pla into('lib') { with libFiles() } + into('agent') { + with agentFiles() + } into('config') { dirPermissions { unix 0750 @@ -226,3 +229,9 @@ subprojects { group = "org.opensearch.distribution" } + +tasks.each { + if (it.name.startsWith("build")) { + it.dependsOn project(':libs:agent-sm:agent').assemble + } +} diff --git a/distribution/build.gradle b/distribution/build.gradle index 8fe9a89059a50..e863d5ab21fe0 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -357,6 +357,18 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { } } + agentFiles = { + copySpec { + from(project(':libs:agent-sm:agent').prepareAgent) { + include '**/*.jar' + exclude '**/*-javadoc.jar' + exclude '**/*-sources.jar' + // strip the version since jvm.options is using agent without version + rename("opensearch-agent-${project.version}.jar", "opensearch-agent.jar") + } + } + } + modulesFiles = { platform -> copySpec { eachFile { diff --git a/distribution/src/config/jvm.options b/distribution/src/config/jvm.options index a8c96f33ce51d..54222d07634fc 100644 --- a/distribution/src/config/jvm.options +++ b/distribution/src/config/jvm.options @@ -76,16 +76,12 @@ ${error.file} # JDK 9+ GC logging 9-:-Xlog:gc*,gc+age=trace,safepoint:file=${loggc}:utctime,pid,tags:filecount=32,filesize=64m -# Explicitly allow security manager (https://bugs.openjdk.java.net/browse/JDK-8270380) -18-:-Djava.security.manager=allow - # JDK 20+ Incubating Vector Module for SIMD optimizations; # disabling may reduce performance on vector optimized lucene 20-:--add-modules=jdk.incubator.vector -# HDFS ForkJoinPool.common() support by SecurityManager --Djava.util.concurrent.ForkJoinPool.common.threadFactory=org.opensearch.secure_sm.SecuredForkJoinWorkerThreadFactory - # See please https://bugs.openjdk.org/browse/JDK-8341127 (openjdk/jdk#21283) 23:-XX:CompileCommand=dontinline,java/lang/invoke/MethodHandle.setAsTypeCache 23:-XX:CompileCommand=dontinline,java/lang/invoke/MethodHandle.asTypeUncached + +21-:-javaagent:agent/opensearch-agent.jar diff --git a/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java b/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java index af7138569972a..3dc6725918fa2 100644 --- a/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java +++ b/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java @@ -77,21 +77,11 @@ static List systemJvmOptions() { // log4j 2 "-Dlog4j.shutdownHookEnabled=false", "-Dlog4j2.disable.jmx=true", - // security manager - allowSecurityManagerOption(), javaLocaleProviders() ) ).stream().filter(e -> e.isEmpty() == false).collect(Collectors.toList()); } - private static String allowSecurityManagerOption() { - if (Runtime.version().feature() > 17) { - return "-Djava.security.manager=allow"; - } else { - return ""; - } - } - private static String maybeShowCodeDetailsInExceptionMessages() { if (Runtime.version().feature() >= 14) { return "-XX:+ShowCodeDetailsInExceptionMessages"; diff --git a/gradle/ide.gradle b/gradle/ide.gradle index aa86e47dcabde..4ea95e94d8f33 100644 --- a/gradle/ide.gradle +++ b/gradle/ide.gradle @@ -82,9 +82,7 @@ if (System.getProperty('idea.active') == 'true') { runConfigurations { defaults(JUnit) { vmParameters = '-ea -Djava.locale.providers=SPI,CLDR' - if (BuildParams.runtimeJavaVersion > JavaVersion.VERSION_17) { - vmParameters += ' -Djava.security.manager=allow' - } + vmParameters += ' -javaagent:' + project(':libs:agent-sm:agent').jar.archiveFile.get() } } copyright { diff --git a/libs/agent-sm/agent/build.gradle b/libs/agent-sm/agent/build.gradle index fb2c71af42d74..7383af76fd3bc 100644 --- a/libs/agent-sm/agent/build.gradle +++ b/libs/agent-sm/agent/build.gradle @@ -75,3 +75,7 @@ tasks.test { tasks.check { dependsOn test } + +tasks.named('assemble') { + dependsOn prepareAgent +} diff --git a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/Agent.java b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/Agent.java index 1226943cb6fed..f3129b6bb8b54 100644 --- a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/Agent.java +++ b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/Agent.java @@ -101,7 +101,7 @@ private static AgentBuilder createAgentBuilder(Instrumentation inst) throws Exce final AgentBuilder agentBuilder = new AgentBuilder.Default(byteBuddy).with(AgentBuilder.InitializationStrategy.NoOp.INSTANCE) .with(AgentBuilder.RedefinitionStrategy.REDEFINITION) .with(AgentBuilder.TypeStrategy.Default.REDEFINE) - .ignore(ElementMatchers.none()) + .ignore(ElementMatchers.nameContains("$MockitoMock$")) /* ingore all Mockito mocks */ .type(systemType) .transform(socketTransformer) .type(pathType.or(fileChannelType)) diff --git a/libs/build.gradle b/libs/build.gradle index 9bf359d936178..32438176ad5a6 100644 --- a/libs/build.gradle +++ b/libs/build.gradle @@ -41,20 +41,21 @@ subprojects { */ project.afterEvaluate { if (!project.path.equals(':libs:agent-sm:agent')) { - configurations.all { Configuration conf -> - dependencies.matching { it instanceof ProjectDependency }.all { ProjectDependency dep -> - Project depProject = project.project(dep.path) - if (depProject != null - && (false == depProject.path.equals(':libs:opensearch-core') && - false == depProject.path.equals(':libs:opensearch-common')) - && depProject.path.startsWith(':libs')) { - throw new InvalidUserDataException("projects in :libs " - + "may not depend on other projects libs except " - + ":libs:opensearch-core or :libs:opensearch-common but " - + "${project.path} depends on ${depProject.path}") + configurations.all { Configuration conf -> + dependencies.matching { it instanceof ProjectDependency }.all { ProjectDependency dep -> + Project depProject = project.project(dep.path) + if (depProject != null + && (false == depProject.path.equals(':libs:opensearch-core') && + false == depProject.path.equals(':libs:opensearch-common')&& + false == depProject.path.equals(':libs:agent-sm:agent-policy')) + && depProject.path.startsWith(':libs')) { + throw new InvalidUserDataException("projects in :libs " + + "may not depend on other projects libs except " + + ":libs:opensearch-core, :libs:agent-sm:agent-policy or :libs:opensearch-common but " + + "${project.path} depends on ${depProject.path}") + } } } - } } } } diff --git a/libs/secure-sm/build.gradle b/libs/secure-sm/build.gradle index 7a0b06699bf35..9febde423f796 100644 --- a/libs/secure-sm/build.gradle +++ b/libs/secure-sm/build.gradle @@ -31,6 +31,7 @@ apply plugin: 'opensearch.publish' dependencies { // do not add non-test compile dependencies to secure-sm without a good reason to do so + api project(":libs:agent-sm:agent-policy") testImplementation "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" testImplementation "junit:junit:${versions.junit}" diff --git a/libs/secure-sm/src/main/java/org/opensearch/secure_sm/SecureSM.java b/libs/secure-sm/src/main/java/org/opensearch/secure_sm/SecureSM.java deleted file mode 100644 index a2531f4a9156e..0000000000000 --- a/libs/secure-sm/src/main/java/org/opensearch/secure_sm/SecureSM.java +++ /dev/null @@ -1,312 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.secure_sm; - -import java.security.AccessController; -import java.security.Permission; -import java.security.PrivilegedAction; -import java.util.Arrays; -import java.util.Objects; -import java.util.Set; - -/** - * Extension of SecurityManager that works around a few design flaws in Java Security. - *

- * There are a few major problems that require custom {@code SecurityManager} logic to fix: - *

    - *
  • {@code exitVM} permission is implicitly granted to all code by the default - * Policy implementation. For a server app, this is not wanted.
  • - *
  • ThreadGroups are not enforced by default, instead only system threads are - * protected out of box by {@code modifyThread/modifyThreadGroup}. Applications - * are encouraged to override the logic here to implement a stricter policy. - *
  • System threads are not even really protected, because if the system uses - * ThreadPools, {@code modifyThread} is abused by its {@code shutdown} checks. This means - * a thread must have {@code modifyThread} to even terminate its own pool, leaving - * system threads unprotected. - *
- * This class throws exception on {@code exitVM} calls, and provides an allowlist where calls - * from exit are allowed. - *

- * Additionally it enforces threadgroup security with the following rules: - *

    - *
  • {@code modifyThread} and {@code modifyThreadGroup} are required for any thread access - * checks: with these permissions, access is granted as long as the thread group is - * the same or an ancestor ({@code sourceGroup.parentOf(targetGroup) == true}). - *
  • code without these permissions can do very little, except to interrupt itself. It may - * not even create new threads. - *
  • very special cases (like test runners) that have {@link ThreadPermission} can violate - * threadgroup security rules. - *
- *

- * If java security debugging ({@code java.security.debug}) is enabled, and this SecurityManager - * is installed, it will emit additional debugging information when threadgroup access checks fail. - * - * @see SecurityManager#checkAccess(Thread) - * @see SecurityManager#checkAccess(ThreadGroup) - * @see - * http://cs.oswego.edu/pipermail/concurrency-interest/2009-August/006508.html - */ -@SuppressWarnings("removal") -public class SecureSM extends SecurityManager { - - private final String[] classesThatCanExit; - - /** - * Creates a new security manager where no packages can exit nor halt the virtual machine. - */ - public SecureSM() { - this(new String[0]); - } - - /** - * Creates a new security manager with the specified list of regular expressions as the those that class names will be tested against to - * check whether or not a class can exit or halt the virtual machine. - * - * @param classesThatCanExit the list of classes that can exit or halt the virtual machine - */ - public SecureSM(final String[] classesThatCanExit) { - this.classesThatCanExit = classesThatCanExit; - } - - /** - * Creates a new security manager with a standard set of test packages being the only packages that can exit or halt the virtual - * machine. The packages that can exit are: - *

    - *
  • org.apache.maven.surefire.booter.
  • - *
  • com.carrotsearch.ant.tasks.junit4.
  • - *
  • org.eclipse.internal.junit.runner.
  • - *
  • com.intellij.rt.execution.junit.
  • - *
- * - * For testing purposes, the security manager grants network permissions "connect, accept" - * to following classes, granted they only access local network interfaces. - * - *
    - *
  • sun.net.httpserver.ServerImpl
  • - *
  • java.net.ServerSocket"
  • - *
  • java.net.Socket
  • - *
- * - * @return an instance of SecureSM where test packages can halt or exit the virtual machine - */ - public static SecureSM createTestSecureSM(final Set trustedHosts) { - return new SecureSM(TEST_RUNNER_PACKAGES) { - // Trust these callers inside the test suite only - final String[] TRUSTED_CALLERS = new String[] { "sun.net.httpserver.ServerImpl", "java.net.ServerSocket", "java.net.Socket" }; - - @Override - public void checkConnect(String host, int port) { - // Allow to connect from selected trusted classes to local addresses only - if (!hasTrustedCallerChain() || !trustedHosts.contains(host)) { - super.checkConnect(host, port); - } - } - - @Override - public void checkAccept(String host, int port) { - // Allow to accept connections from selected trusted classes to local addresses only - if (!hasTrustedCallerChain() || !trustedHosts.contains(host)) { - super.checkAccept(host, port); - } - } - - private boolean hasTrustedCallerChain() { - return Arrays.stream(getClassContext()) - .anyMatch(c -> Arrays.stream(TRUSTED_CALLERS).anyMatch(t -> c.getName().startsWith(t))); - } - }; - } - - static final String[] TEST_RUNNER_PACKAGES = new String[] { - // surefire test runner - "org\\.apache\\.maven\\.surefire\\.booter\\..*", - // junit4 test runner - "com\\.carrotsearch\\.ant\\.tasks\\.junit4\\.slave\\..*", - // eclipse test runner - "org\\.eclipse.jdt\\.internal\\.junit\\.runner\\..*", - // intellij test runner (before IDEA version 2019.3) - "com\\.intellij\\.rt\\.execution\\.junit\\..*", - // intellij test runner (since IDEA version 2019.3) - "com\\.intellij\\.rt\\.junit\\..*" }; - - // java.security.debug support - private static final boolean DEBUG = AccessController.doPrivileged(new PrivilegedAction() { - @Override - public Boolean run() { - try { - String v = System.getProperty("java.security.debug"); - // simple check that they are trying to debug - return v != null && v.length() > 0; - } catch (SecurityException e) { - return false; - } - } - }); - - @Override - @SuppressForbidden(reason = "java.security.debug messages go to standard error") - public void checkAccess(Thread t) { - try { - checkThreadAccess(t); - } catch (SecurityException e) { - if (DEBUG) { - System.err.println("access: caller thread=" + Thread.currentThread()); - System.err.println("access: target thread=" + t); - debugThreadGroups(Thread.currentThread().getThreadGroup(), t.getThreadGroup()); - } - throw e; - } - } - - @Override - @SuppressForbidden(reason = "java.security.debug messages go to standard error") - public void checkAccess(ThreadGroup g) { - try { - checkThreadGroupAccess(g); - } catch (SecurityException e) { - if (DEBUG) { - System.err.println("access: caller thread=" + Thread.currentThread()); - debugThreadGroups(Thread.currentThread().getThreadGroup(), g); - } - throw e; - } - } - - @SuppressForbidden(reason = "java.security.debug messages go to standard error") - private void debugThreadGroups(final ThreadGroup caller, final ThreadGroup target) { - System.err.println("access: caller group=" + caller); - System.err.println("access: target group=" + target); - } - - // thread permission logic - - private static final Permission MODIFY_THREAD_PERMISSION = new RuntimePermission("modifyThread"); - private static final Permission MODIFY_ARBITRARY_THREAD_PERMISSION = new ThreadPermission("modifyArbitraryThread"); - - protected void checkThreadAccess(Thread t) { - Objects.requireNonNull(t); - - // first, check if we can modify threads at all. - checkPermission(MODIFY_THREAD_PERMISSION); - - // check the threadgroup, if its our thread group or an ancestor, its fine. - final ThreadGroup source = Thread.currentThread().getThreadGroup(); - final ThreadGroup target = t.getThreadGroup(); - - if (target == null) { - return; // its a dead thread, do nothing. - } else if (source.parentOf(target) == false) { - checkPermission(MODIFY_ARBITRARY_THREAD_PERMISSION); - } - } - - private static final Permission MODIFY_THREADGROUP_PERMISSION = new RuntimePermission("modifyThreadGroup"); - private static final Permission MODIFY_ARBITRARY_THREADGROUP_PERMISSION = new ThreadPermission("modifyArbitraryThreadGroup"); - - protected void checkThreadGroupAccess(ThreadGroup g) { - Objects.requireNonNull(g); - - // first, check if we can modify thread groups at all. - checkPermission(MODIFY_THREADGROUP_PERMISSION); - - // check the threadgroup, if its our thread group or an ancestor, its fine. - final ThreadGroup source = Thread.currentThread().getThreadGroup(); - final ThreadGroup target = g; - - if (source == null) { - return; // we are a dead thread, do nothing - } else if (source.parentOf(target) == false) { - checkPermission(MODIFY_ARBITRARY_THREADGROUP_PERMISSION); - } - } - - // exit permission logic - @Override - public void checkExit(int status) { - innerCheckExit(status); - } - - /** - * The "Uwe Schindler" algorithm. - * - * @param status the exit status - */ - protected void innerCheckExit(final int status) { - AccessController.doPrivileged(new PrivilegedAction() { - @Override - public Void run() { - final String systemClassName = System.class.getName(), runtimeClassName = Runtime.class.getName(); - String exitMethodHit = null; - for (final StackTraceElement se : Thread.currentThread().getStackTrace()) { - final String className = se.getClassName(), methodName = se.getMethodName(); - if (("exit".equals(methodName) || "halt".equals(methodName)) - && (systemClassName.equals(className) || runtimeClassName.equals(className))) { - exitMethodHit = className + '#' + methodName + '(' + status + ')'; - continue; - } - - if (exitMethodHit != null) { - if (classesThatCanExit == null) { - break; - } - if (classCanExit(className, classesThatCanExit)) { - // this exit point is allowed, we return normally from closure: - return null; - } - // anything else in stack trace is not allowed, break and throw SecurityException below: - break; - } - } - - if (exitMethodHit == null) { - // should never happen, only if JVM hides stack trace - replace by generic: - exitMethodHit = "JVM exit method"; - } - throw new SecurityException(exitMethodHit + " calls are not allowed"); - } - }); - - // we passed the stack check, delegate to super, so default policy can still deny permission: - super.checkExit(status); - } - - static boolean classCanExit(final String className, final String[] classesThatCanExit) { - for (final String classThatCanExit : classesThatCanExit) { - if (className.matches(classThatCanExit)) { - return true; - } - } - return false; - } - -} diff --git a/libs/secure-sm/src/main/java/org/opensearch/secure_sm/SecuredForkJoinWorkerThreadFactory.java b/libs/secure-sm/src/main/java/org/opensearch/secure_sm/SecuredForkJoinWorkerThreadFactory.java deleted file mode 100644 index 3c8e78a902fcb..0000000000000 --- a/libs/secure-sm/src/main/java/org/opensearch/secure_sm/SecuredForkJoinWorkerThreadFactory.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.secure_sm; - -import java.security.AccessControlContext; -import java.security.AccessController; -import java.security.Permission; -import java.security.Permissions; -import java.security.PrivilegedAction; -import java.security.ProtectionDomain; -import java.util.concurrent.ForkJoinPool; -import java.util.concurrent.ForkJoinPool.ForkJoinWorkerThreadFactory; -import java.util.concurrent.ForkJoinWorkerThread; - -@SuppressWarnings("removal") -public class SecuredForkJoinWorkerThreadFactory implements ForkJoinWorkerThreadFactory { - static AccessControlContext contextWithPermissions(Permission... perms) { - Permissions permissions = new Permissions(); - for (Permission perm : perms) - permissions.add(perm); - return new AccessControlContext(new ProtectionDomain[] { new ProtectionDomain(null, permissions) }); - } - - // ACC for access to the factory - private static final AccessControlContext ACC = contextWithPermissions( - new RuntimePermission("getClassLoader"), - new RuntimePermission("setContextClassLoader"), - new RuntimePermission("modifyThreadGroup"), - new RuntimePermission("modifyThread") - ); - - public final ForkJoinWorkerThread newThread(ForkJoinPool pool) { - return AccessController.doPrivileged(new PrivilegedAction<>() { - public ForkJoinWorkerThread run() { - return new ForkJoinWorkerThread(pool) { - - }; - } - }, ACC); - } -} diff --git a/libs/secure-sm/src/main/java/org/opensearch/secure_sm/ThreadContextPermission.java b/libs/secure-sm/src/main/java/org/opensearch/secure_sm/ThreadContextPermission.java index 2f33eb513c165..9b0ea724a1288 100644 --- a/libs/secure-sm/src/main/java/org/opensearch/secure_sm/ThreadContextPermission.java +++ b/libs/secure-sm/src/main/java/org/opensearch/secure_sm/ThreadContextPermission.java @@ -14,7 +14,6 @@ * Permission to utilize methods in the ThreadContext class that are normally not accessible * * @see ThreadGroup - * @see SecureSM */ public final class ThreadContextPermission extends BasicPermission { diff --git a/libs/secure-sm/src/main/java/org/opensearch/secure_sm/ThreadPermission.java b/libs/secure-sm/src/main/java/org/opensearch/secure_sm/ThreadPermission.java index 67be8c132aa5e..15ce32100ec8d 100644 --- a/libs/secure-sm/src/main/java/org/opensearch/secure_sm/ThreadPermission.java +++ b/libs/secure-sm/src/main/java/org/opensearch/secure_sm/ThreadPermission.java @@ -38,16 +38,11 @@ * Permission to modify threads or thread groups normally not accessible * to the current thread. *

- * {@link SecureSM} enforces ThreadGroup security: threads with - * {@code RuntimePermission("modifyThread")} or {@code RuntimePermission("modifyThreadGroup")} - * are only allowed to modify their current thread group or an ancestor of that group. - *

* In some cases (e.g. test runners), code needs to manipulate arbitrary threads, * so this Permission provides for that: the targets {@code modifyArbitraryThread} and * {@code modifyArbitraryThreadGroup} allow a thread blanket access to any group. * * @see ThreadGroup - * @see SecureSM */ public final class ThreadPermission extends BasicPermission { diff --git a/libs/secure-sm/src/test/java/org/opensearch/secure_sm/SecureSMTests.java b/libs/secure-sm/src/test/java/org/opensearch/secure_sm/SecureSMTests.java deleted file mode 100644 index fd666c70cfebb..0000000000000 --- a/libs/secure-sm/src/test/java/org/opensearch/secure_sm/SecureSMTests.java +++ /dev/null @@ -1,156 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.secure_sm; - -import java.security.Permission; -import java.security.Policy; -import java.security.ProtectionDomain; -import java.util.Collections; -import java.util.concurrent.atomic.AtomicBoolean; - -import junit.framework.TestCase; - -/** Simple tests for SecureSM */ -@SuppressWarnings("removal") -public class SecureSMTests extends TestCase { - static { - // install a mock security policy: - // AllPermission to source code - // ThreadPermission not granted anywhere else - final ProtectionDomain sourceCode = SecureSM.class.getProtectionDomain(); - Policy.setPolicy(new Policy() { - @Override - public boolean implies(ProtectionDomain domain, Permission permission) { - if (domain == sourceCode) { - return true; - } else if (permission instanceof ThreadPermission) { - return false; - } - return true; - } - }); - System.setSecurityManager(SecureSM.createTestSecureSM(Collections.emptySet())); - } - - @SuppressForbidden(reason = "testing that System#exit is blocked") - public void testTryToExit() { - try { - System.exit(1); - fail("did not hit expected exception"); - } catch (SecurityException expected) {} - } - - public void testClassCanExit() { - assertTrue(SecureSM.classCanExit("org.apache.maven.surefire.booter.CommandReader", SecureSM.TEST_RUNNER_PACKAGES)); - assertTrue(SecureSM.classCanExit("com.carrotsearch.ant.tasks.junit4.slave.JvmExit", SecureSM.TEST_RUNNER_PACKAGES)); - assertTrue(SecureSM.classCanExit("org.eclipse.jdt.internal.junit.runner.RemoteTestRunner", SecureSM.TEST_RUNNER_PACKAGES)); - assertTrue(SecureSM.classCanExit("com.intellij.rt.execution.junit.JUnitStarter", SecureSM.TEST_RUNNER_PACKAGES)); - assertTrue(SecureSM.classCanExit("org.opensearch.Foo", new String[] { "org.opensearch.Foo" })); - assertFalse(SecureSM.classCanExit("org.opensearch.Foo", new String[] { "org.opensearch.Bar" })); - } - - public void testCreateThread() throws Exception { - Thread t = new Thread(); - t.start(); - t.join(); - // no exception - } - - public void testCreateThreadGroup() throws Exception { - Thread t = new Thread(new ThreadGroup("childgroup"), "child"); - t.start(); - t.join(); - // no exception - } - - public void testModifyChild() throws Exception { - final AtomicBoolean interrupted = new AtomicBoolean(false); - Thread t = new Thread(new ThreadGroup("childgroup"), "child") { - @Override - public void run() { - try { - Thread.sleep(Long.MAX_VALUE); - } catch (InterruptedException expected) { - interrupted.set(true); - } - } - }; - t.start(); - t.interrupt(); - t.join(); - // no exception - assertTrue(interrupted.get()); - } - - public void testNoModifySibling() throws Exception { - final AtomicBoolean interrupted1 = new AtomicBoolean(false); - final AtomicBoolean interrupted2 = new AtomicBoolean(false); - - final Thread t1 = new Thread(new ThreadGroup("childgroup"), "child") { - @Override - public void run() { - try { - Thread.sleep(Long.MAX_VALUE); - } catch (InterruptedException expected) { - interrupted1.set(true); - } - } - }; - t1.start(); - - Thread t2 = new Thread(new ThreadGroup("anothergroup"), "another child") { - @Override - public void run() { - try { - Thread.sleep(Long.MAX_VALUE); - } catch (InterruptedException expected) { - interrupted2.set(true); - try { - t1.interrupt(); // try to bogusly interrupt our sibling - fail("did not hit expected exception"); - } catch (SecurityException expected2) {} - } - } - }; - t2.start(); - t2.interrupt(); - t2.join(); - // sibling attempted to but was not able to muck with its other sibling - assertTrue(interrupted2.get()); - assertFalse(interrupted1.get()); - // but we are the parent and can terminate - t1.interrupt(); - t1.join(); - assertTrue(interrupted1.get()); - } -} diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index d3c92ac39f5b4..7c2488ff6b1a8 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -146,10 +146,6 @@ for (String fixtureName : ['hdfsFixture', 'haHdfsFixture', 'secureHdfsFixture', } final List miniHDFSArgs = [] - if (BuildParams.runtimeJavaVersion >= JavaVersion.VERSION_23) { - miniHDFSArgs.add('-Djava.security.manager=allow') - } - // If it's a secure fixture, then depend on Kerberos Fixture and principals + add the krb5conf to the JVM options if (fixtureName.equals('secureHdfsFixture') || fixtureName.equals('secureHaHdfsFixture')) { miniHDFSArgs.add("-Djava.security.krb5.conf=${project(':test:fixtures:krb5kdc-fixture').ext.krb5Conf("hdfs")}"); diff --git a/plugins/repository-hdfs/src/test/resources/org/opensearch/bootstrap/test.policy b/plugins/repository-hdfs/src/test/resources/org/opensearch/bootstrap/test.policy new file mode 100644 index 0000000000000..7899f339e5732 --- /dev/null +++ b/plugins/repository-hdfs/src/test/resources/org/opensearch/bootstrap/test.policy @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +grant { + permission java.net.NetPermission "accessUnixDomainSocket"; + permission java.net.SocketPermission "*", "connect,resolve"; +}; diff --git a/plugins/repository-s3/src/internalClusterTest/resources/org/opensearch/bootstrap/test.policy b/plugins/repository-s3/src/internalClusterTest/resources/org/opensearch/bootstrap/test.policy new file mode 100644 index 0000000000000..7899f339e5732 --- /dev/null +++ b/plugins/repository-s3/src/internalClusterTest/resources/org/opensearch/bootstrap/test.policy @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +grant { + permission java.net.NetPermission "accessUnixDomainSocket"; + permission java.net.SocketPermission "*", "connect,resolve"; +}; diff --git a/server/build.gradle b/server/build.gradle index fd2cac4c7506f..d3e55c4d8f784 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -71,6 +71,7 @@ dependencies { api project(":libs:opensearch-task-commons") implementation project(':libs:opensearch-arrow-spi') + compileOnly project(":libs:agent-sm:bootstrap") compileOnly project(':libs:opensearch-plugin-classloader') testRuntimeOnly project(':libs:opensearch-plugin-classloader') @@ -377,9 +378,7 @@ tasks.named("licenseHeaders").configure { tasks.test { environment "node.roles.test", "[]" - if (BuildParams.runtimeJavaVersion > JavaVersion.VERSION_1_8) { - jvmArgs += ["--add-opens", "java.base/java.nio.file=ALL-UNNAMED"] - } + jvmArgs += ["--add-opens", "java.base/java.nio.file=ALL-UNNAMED", "-Djdk.attach.allowAttachSelf=true", "-XX:+EnableDynamicAgentLoading" ] } tasks.named("sourcesJar").configure { diff --git a/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java b/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java index 8285f361ee220..b484c33fda5c9 100644 --- a/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java +++ b/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java @@ -47,6 +47,7 @@ import org.opensearch.discovery.DiscoveryModule; import org.opensearch.env.Environment; import org.opensearch.index.IndexModule; +import org.opensearch.javaagent.bootstrap.AgentPolicy; import org.opensearch.monitor.jvm.JvmInfo; import org.opensearch.monitor.process.ProcessProbe; import org.opensearch.node.NodeRoleSettings; @@ -57,6 +58,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.security.AllPermission; +import java.security.Policy; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -720,10 +722,10 @@ public final BootstrapCheckResult check(BootstrapContext context) { @SuppressWarnings("removal") boolean isAllPermissionGranted() { - final SecurityManager sm = System.getSecurityManager(); - assert sm != null; + final Policy policy = AgentPolicy.getPolicy(); + assert policy != null; try { - sm.checkPermission(new AllPermission()); + AgentPolicy.checkPermission(new AllPermission()); } catch (final SecurityException e) { return false; } diff --git a/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java b/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java index 162b9be318cd5..7b011b5828428 100644 --- a/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java +++ b/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java @@ -48,7 +48,6 @@ import java.io.IOException; import java.nio.file.Path; -import java.security.Permission; import java.security.Security; import java.util.Arrays; import java.util.Locale; @@ -86,19 +85,7 @@ class OpenSearch extends EnvironmentAwareCommand { @SuppressWarnings("removal") public static void main(final String[] args) throws Exception { overrideDnsCachePolicyProperties(); - /* - * We want the JVM to think there is a security manager installed so that if internal policy decisions that would be based on the - * presence of a security manager or lack thereof act as if there is a security manager present (e.g., DNS cache policy). This - * forces such policies to take effect immediately. - */ - System.setSecurityManager(new SecurityManager() { - - @Override - public void checkPermission(Permission perm) { - // grant all permissions so that we can later set the security manager to the one that we want - } - }); LogConfigurator.registerErrorListener(); final OpenSearch opensearch = new OpenSearch(); int status = main(args, opensearch, Terminal.DEFAULT); diff --git a/server/src/main/java/org/opensearch/bootstrap/Security.java b/server/src/main/java/org/opensearch/bootstrap/Security.java index 9c93b0414bdda..ef7150dcc1178 100644 --- a/server/src/main/java/org/opensearch/bootstrap/Security.java +++ b/server/src/main/java/org/opensearch/bootstrap/Security.java @@ -41,9 +41,10 @@ import org.opensearch.common.transport.PortsRange; import org.opensearch.env.Environment; import org.opensearch.http.HttpTransportSettings; +import org.opensearch.javaagent.bootstrap.AgentPolicy; import org.opensearch.plugins.PluginInfo; import org.opensearch.plugins.PluginsService; -import org.opensearch.secure_sm.SecureSM; +import org.opensearch.secure_sm.policy.PolicyFile; import org.opensearch.transport.TcpTransport; import java.io.IOException; @@ -59,7 +60,6 @@ import java.security.NoSuchAlgorithmException; import java.security.Permissions; import java.security.Policy; -import java.security.URIParameter; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -144,23 +144,26 @@ static void configure(Environment environment, boolean filterBadDefaults) throws // enable security policy: union of template and environment-based paths, and possibly plugin permissions Map codebases = getCodebaseJarMap(JarHell.parseClassPath()); - Policy.setPolicy( + + // enable security manager + final String[] classesThatCanExit = new String[] { + // SecureSM matches class names as regular expressions so we escape the $ that arises from the nested class name + OpenSearchUncaughtExceptionHandler.PrivilegedHaltAction.class.getName().replace("$", "\\$"), + Command.class.getName() }; + + AgentPolicy.setPolicy( new OpenSearchPolicy( codebases, createPermissions(environment), getPluginPermissions(environment), filterBadDefaults, createRecursiveDataPathPermission(environment) - ) + ), + Set.of() /* trusted hosts */, + Set.of() /* trusted file systems */, + new AgentPolicy.AnyCanExit(classesThatCanExit) ); - // enable security manager - final String[] classesThatCanExit = new String[] { - // SecureSM matches class names as regular expressions so we escape the $ that arises from the nested class name - OpenSearchUncaughtExceptionHandler.PrivilegedHaltAction.class.getName().replace("$", "\\$"), - Command.class.getName() }; - System.setSecurityManager(new SecureSM(classesThatCanExit)); - // do some basic tests selfTest(); } @@ -280,14 +283,14 @@ static Policy readPolicy(URL policyFile, Map codebases) { addCodebaseToSystemProperties(propertiesSet, url, property, aliasProperty); } - return Policy.getInstance("JavaPolicy", new URIParameter(policyFile.toURI())); + return new PolicyFile(policyFile); } finally { // clear codebase properties for (String property : propertiesSet) { System.clearProperty(property); } } - } catch (NoSuchAlgorithmException | URISyntaxException e) { + } catch (final RuntimeException e) { throw new IllegalArgumentException("unable to parse policy file `" + policyFile + "`", e); } } diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java index 8c15706adceeb..d680fc04789f8 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java @@ -44,14 +44,12 @@ import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.http.HttpTransportSettings; -import org.opensearch.secure_sm.ThreadContextPermission; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskThreadContextStatePropagator; import org.opensearch.transport.client.OriginSettingClient; import java.io.IOException; import java.nio.charset.StandardCharsets; -import java.security.Permission; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -114,11 +112,6 @@ public final class ThreadContext implements Writeable { public static final String ACTION_ORIGIN_TRANSIENT_NAME = "action.origin"; // thread context permissions - - private static final Permission ACCESS_SYSTEM_THREAD_CONTEXT_PERMISSION = new ThreadContextPermission("markAsSystemContext"); - private static final Permission STASH_AND_MERGE_THREAD_CONTEXT_PERMISSION = new ThreadContextPermission("stashAndMergeHeaders"); - private static final Permission STASH_WITH_ORIGIN_THREAD_CONTEXT_PERMISSION = new ThreadContextPermission("stashWithOrigin"); - private static final Logger logger = LogManager.getLogger(ThreadContext.class); private static final ThreadContextStruct DEFAULT_CONTEXT = new ThreadContextStruct(); private final Map defaultHeader; @@ -223,10 +216,6 @@ public Writeable captureAsWriteable() { */ @SuppressWarnings("removal") public StoredContext stashWithOrigin(String origin) { - SecurityManager sm = System.getSecurityManager(); - if (sm != null) { - sm.checkPermission(STASH_WITH_ORIGIN_THREAD_CONTEXT_PERMISSION); - } final ThreadContext.StoredContext storedContext = stashContext(); putTransient(ACTION_ORIGIN_TRANSIENT_NAME, origin); return storedContext; @@ -246,10 +235,6 @@ public StoredContext stashWithOrigin(String origin) { */ @SuppressWarnings("removal") public StoredContext stashAndMergeHeaders(Map headers) { - SecurityManager sm = System.getSecurityManager(); - if (sm != null) { - sm.checkPermission(STASH_AND_MERGE_THREAD_CONTEXT_PERMISSION); - } final ThreadContextStruct context = threadLocal.get(); Map newHeader = new HashMap<>(headers); newHeader.putAll(context.requestHeaders); @@ -605,10 +590,6 @@ boolean isDefaultContext() { */ @SuppressWarnings("removal") public void markAsSystemContext() { - SecurityManager sm = System.getSecurityManager(); - if (sm != null) { - sm.checkPermission(ACCESS_SYSTEM_THREAD_CONTEXT_PERMISSION); - } threadLocal.set(threadLocal.get().setSystemContext(propagators)); } diff --git a/server/src/main/resources/org/opensearch/bootstrap/security.policy b/server/src/main/resources/org/opensearch/bootstrap/security.policy index f521ce0011540..fbe0afb3c2a95 100644 --- a/server/src/main/resources/org/opensearch/bootstrap/security.policy +++ b/server/src/main/resources/org/opensearch/bootstrap/security.policy @@ -93,6 +93,30 @@ grant codeBase "${codebase.reactor-core}" { permission java.net.SocketPermission "*", "connect,resolve"; }; +grant codeBase "${codebase.opensearch-cli}" { + permission java.net.SocketPermission "*", "connect,resolve"; +}; + +grant codeBase "${codebase.opensearch-core}" { + permission java.net.SocketPermission "*", "connect,resolve"; +}; + +grant codeBase "${codebase.jackson-core}" { + permission java.net.SocketPermission "*", "connect,resolve"; +}; + +grant codeBase "${codebase.opensearch-common}" { + permission java.net.SocketPermission "*", "connect,resolve"; +}; + +grant codeBase "${codebase.opensearch-x-content}" { + permission java.net.SocketPermission "*", "connect,resolve"; +}; + +grant codeBase "${codebase.opensearch}" { + permission java.net.SocketPermission "*", "connect,resolve"; +}; + //// Everything else: grant { diff --git a/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy b/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy index 78f302e9b23db..5fe1a5b64e6c7 100644 --- a/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy +++ b/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy @@ -101,9 +101,14 @@ grant codeBase "${codebase.junit}" { permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; }; +grant codeBase "${codebase.opensearch-core}" { + // opensearch-nio makes and accepts socket connections + permission java.net.SocketPermission "*", "accept,resolve,connect"; +}; + grant codeBase "${codebase.opensearch-nio}" { // opensearch-nio makes and accepts socket connections - permission java.net.SocketPermission "*", "accept,connect"; + permission java.net.SocketPermission "*", "accept,resolve,connect"; }; grant codeBase "${codebase.opensearch-rest-client}" { @@ -111,16 +116,19 @@ grant codeBase "${codebase.opensearch-rest-client}" { permission java.net.SocketPermission "*", "connect"; // rest client uses system properties which gets the default proxy permission java.net.NetPermission "getProxySelector"; + permission java.net.NetPermission "accessUnixDomainSocket"; }; grant codeBase "${codebase.httpcore5}" { // httpcore makes socket connections for rest tests permission java.net.SocketPermission "*", "connect"; + permission java.net.NetPermission "accessUnixDomainSocket"; }; grant codeBase "${codebase.httpclient5}" { // httpclient5 makes socket connections for rest tests permission java.net.SocketPermission "*", "connect,resolve"; + permission java.net.NetPermission "accessUnixDomainSocket"; }; grant codeBase "${codebase.httpcore-nio}" { diff --git a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java index 9773a0dcd16a0..59d20655151c1 100644 --- a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java @@ -132,6 +132,7 @@ import java.io.EOFException; import java.io.FileNotFoundException; import java.io.IOException; +import java.net.URI; import java.net.URISyntaxException; import java.nio.file.AccessDeniedException; import java.nio.file.AtomicMoveNotSupportedException; @@ -167,7 +168,7 @@ public void testExceptionRegistration() throws ClassNotFoundException, IOExcepti final Set> notRegistered = new HashSet<>(); final Set> hasDedicatedWrite = new HashSet<>(); final Set> registered = new HashSet<>(); - final String path = "/org/opensearch"; + final String path = "org/opensearch"; final Path coreLibStartPath = PathUtils.get(OpenSearchException.class.getProtectionDomain().getCodeSource().getLocation().toURI()); final Path startPath = PathUtils.get(OpenSearchServerException.class.getProtectionDomain().getCodeSource().getLocation().toURI()) .resolve("org") @@ -255,7 +256,8 @@ public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOEx Files.walkFileTree(coreLibStartPath, visitor); // walk the server module start path Files.walkFileTree(startPath, visitor); - final Path testStartPath = PathUtils.get(ExceptionSerializationTests.class.getResource(path).toURI()); + final URI location = ExceptionSerializationTests.class.getProtectionDomain().getCodeSource().getLocation().toURI(); + final Path testStartPath = PathUtils.get(location).resolve(path); Files.walkFileTree(testStartPath, visitor); assertTrue(notRegistered.remove(TestException.class)); assertTrue(notRegistered.remove(UnknownHeaderException.class)); diff --git a/server/src/test/resources/org/opensearch/bootstrap/test.policy b/server/src/test/resources/org/opensearch/bootstrap/test.policy index c2b5a8e9c0a4e..30396afaf2ca4 100644 --- a/server/src/test/resources/org/opensearch/bootstrap/test.policy +++ b/server/src/test/resources/org/opensearch/bootstrap/test.policy @@ -10,4 +10,10 @@ grant { // allow to test Security policy and codebases permission java.util.PropertyPermission "*", "read,write"; permission java.security.SecurityPermission "createPolicy.JavaPolicy"; + permission java.net.NetPermission "accessUnixDomainSocket"; +}; + +grant codeBase "${codebase.framework}" { + permission java.net.NetPermission "accessUnixDomainSocket"; + permission java.net.SocketPermission "*", "accept,connect"; }; diff --git a/test/framework/build.gradle b/test/framework/build.gradle index e5297ca0807a4..f5ccd05294d7b 100644 --- a/test/framework/build.gradle +++ b/test/framework/build.gradle @@ -53,6 +53,9 @@ dependencies { api "org.bouncycastle:bcpkix-fips:${versions.bouncycastle_pkix}" api "org.bouncycastle:bcutil-fips:${versions.bouncycastle_util}" + compileOnly project(":libs:agent-sm:bootstrap") + compileOnly "com.github.spotbugs:spotbugs-annotations:4.9.0" + annotationProcessor "org.apache.logging.log4j:log4j-core:${versions.log4j}" } @@ -97,9 +100,8 @@ test { systemProperty 'tests.gradle_wire_compat_versions', BuildParams.bwcVersions.wireCompatible.join(',') systemProperty 'tests.gradle_unreleased_versions', BuildParams.bwcVersions.unreleased.join(',') - if (BuildParams.runtimeJavaVersion >= JavaVersion.VERSION_18) { - jvmArgs += ["-Djava.security.manager=allow"] - } + dependsOn(project(':libs:agent-sm:agent').prepareAgent) + jvmArgs += ["-javaagent:" + project(':libs:agent-sm:agent').jar.archiveFile.get()] } tasks.register("integTest", Test) { diff --git a/test/framework/src/main/java/org/opensearch/bootstrap/AgentAttach.java b/test/framework/src/main/java/org/opensearch/bootstrap/AgentAttach.java new file mode 100644 index 0000000000000..0a0df6756f21f --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/bootstrap/AgentAttach.java @@ -0,0 +1,20 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.bootstrap; + +public final class AgentAttach { + public static boolean agentIsAttached() { + try { + Class.forName("org.opensearch.javaagent.Agent", false, ClassLoader.getSystemClassLoader()); + return true; + } catch (ClassNotFoundException e) { + return false; + } + } +} diff --git a/test/framework/src/main/java/org/opensearch/bootstrap/BootstrapForTesting.java b/test/framework/src/main/java/org/opensearch/bootstrap/BootstrapForTesting.java index 76c7ce0628aac..e67b5539bb690 100644 --- a/test/framework/src/main/java/org/opensearch/bootstrap/BootstrapForTesting.java +++ b/test/framework/src/main/java/org/opensearch/bootstrap/BootstrapForTesting.java @@ -46,9 +46,8 @@ import org.opensearch.common.settings.Settings; import org.opensearch.core.common.Strings; import org.opensearch.core.util.FileSystemUtils; -import org.opensearch.mockito.plugin.PriviledgedMockMaker; +import org.opensearch.javaagent.bootstrap.AgentPolicy; import org.opensearch.plugins.PluginInfo; -import org.opensearch.secure_sm.SecureSM; import org.junit.Assert; import java.io.InputStream; @@ -87,7 +86,19 @@ */ @SuppressWarnings("removal") public class BootstrapForTesting { - + private static final String[] TEST_RUNNER_PACKAGES = new String[] { + // gradle worker + "worker\\.org\\.gradle\\.process\\.internal\\.worker\\.GradleWorkerMain*", + // surefire test runner + "org\\.apache\\.maven\\.surefire\\.booter\\..*", + // junit4 test runner + "com\\.carrotsearch\\.ant\\.tasks\\.junit4\\.slave\\..*", + // eclipse test runner + "org\\.eclipse.jdt\\.internal\\.junit\\.runner\\..*", + // intellij test runner (before IDEA version 2019.3) + "com\\.intellij\\.rt\\.execution\\.junit\\..*", + // intellij test runner (since IDEA version 2019.3) + "com\\.intellij\\.rt\\.junit\\..*" }; // TODO: can we share more code with the non-test side here // without making things complex??? @@ -168,7 +179,7 @@ public class BootstrapForTesting { final Optional testPolicy = Optional.ofNullable(Bootstrap.class.getResource("test.policy")) .map(policy -> Security.readPolicy(policy, codebases)); final Policy opensearchPolicy = new OpenSearchPolicy(codebases, perms, getPluginPermissions(), true, new Permissions()); - Policy.setPolicy(new Policy() { + AgentPolicy.setPolicy(new Policy() { @Override public boolean implies(ProtectionDomain domain, Permission permission) { // implements union @@ -176,10 +187,15 @@ public boolean implies(ProtectionDomain domain, Permission permission) { || testFramework.implies(domain, permission) || testPolicy.map(policy -> policy.implies(domain, permission)).orElse(false /* no policy */); } - }); - // Create access control context for mocking - PriviledgedMockMaker.createAccessControlContext(); - System.setSecurityManager(SecureSM.createTestSecureSM(getTrustedHosts())); + }, + getTrustedHosts(), + Set.of("jimfs"), /* mock file system */ + new AgentPolicy.AnyCanExit(TEST_RUNNER_PACKAGES) + ); + + if (!AgentAttach.agentIsAttached()) { + throw new RuntimeException("the security agent is not attached"); + } Security.selfTest(); // guarantee plugin classes are initialized first, in case they have one-time hacks. diff --git a/test/framework/src/main/java/org/opensearch/mockito/plugin/PriviledgedMockMaker.java b/test/framework/src/main/java/org/opensearch/mockito/plugin/PriviledgedMockMaker.java deleted file mode 100644 index cc2d26a598757..0000000000000 --- a/test/framework/src/main/java/org/opensearch/mockito/plugin/PriviledgedMockMaker.java +++ /dev/null @@ -1,134 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.mockito.plugin; - -import org.opensearch.common.SuppressForbidden; - -import java.security.AccessControlContext; -import java.security.AccessController; -import java.security.DomainCombiner; -import java.security.PrivilegedAction; -import java.security.ProtectionDomain; -import java.util.Arrays; -import java.util.Optional; -import java.util.function.Function; - -import org.mockito.Incubating; -import org.mockito.MockedConstruction; -import org.mockito.internal.creation.bytebuddy.ByteBuddyMockMaker; -import org.mockito.internal.util.reflection.LenientCopyTool; -import org.mockito.invocation.MockHandler; -import org.mockito.mock.MockCreationSettings; -import org.mockito.plugins.MockMaker; - -/** - * Mockito plugin which wraps the Mockito calls into priviledged execution blocks and respects - * SecurityManager presence. - */ -@SuppressWarnings("removal") -@SuppressForbidden(reason = "allow URL#getFile() to be used in tests") -public class PriviledgedMockMaker implements MockMaker { - private static AccessControlContext context; - private final ByteBuddyMockMaker delegate; - - /** - * Create dedicated AccessControlContext to use the Mockito protection domain (test only) - * so to relax the security constraints for the test cases which rely on mocks. This plugin - * wraps the mock/spy creation into priviledged action using the custom access control context - * since Mockito does not support SecurityManager out of the box. The method has to be called by - * test framework before the SecurityManager is being set, otherwise additional permissions have - * to be granted to the caller: - *

- * permission java.security.Permission "createAccessControlContext" - * - */ - public static void createAccessControlContext() { - // This combiner, if bound to an access control context, will unconditionally - // substitute the call chain protection domains with the 'mockito-core' one if it - // is present. The security checks are relaxed intentionally to trust mocking - // implementation if it is part of the call chain. - final DomainCombiner combiner = (current, assigned) -> Arrays.stream(current) - .filter(pd -> pd.getCodeSource().getLocation().getFile().contains("mockito-core") /* check mockito-core only */) - .findAny() - .map(pd -> new ProtectionDomain[] { pd }) - .orElse(current); - - // Bind combiner to an access control context (the combiner stateless and shareable) - final AccessControlContext wrapper = new AccessControlContext(AccessController.getContext(), combiner); - - // Create new access control context with dedicated combiner - context = AccessController.doPrivileged((PrivilegedAction) AccessController::getContext, wrapper); - } - - /** - * Construct an instance of the priviledged mock maker using ByteBuddyMockMaker under the hood. - */ - public PriviledgedMockMaker() { - delegate = AccessController.doPrivileged((PrivilegedAction) () -> new ByteBuddyMockMaker(), context); - } - - @SuppressWarnings("rawtypes") - @Override - public T createMock(MockCreationSettings settings, MockHandler handler) { - return AccessController.doPrivileged((PrivilegedAction) () -> delegate.createMock(settings, handler), context); - } - - @SuppressWarnings("rawtypes") - @Override - public Optional createSpy(MockCreationSettings settings, MockHandler handler, T object) { - // The ByteBuddyMockMaker does not implement createSpy and relies on Mockito's fallback - return AccessController.doPrivileged((PrivilegedAction>) () -> { - T instance = delegate.createMock(settings, handler); - new LenientCopyTool().copyToMock(object, instance); - return Optional.of(instance); - }, context); - } - - @SuppressWarnings("rawtypes") - @Override - public MockHandler getHandler(Object mock) { - return delegate.getHandler(mock); - } - - @SuppressWarnings("rawtypes") - @Override - public void resetMock(Object mock, MockHandler newHandler, MockCreationSettings settings) { - AccessController.doPrivileged((PrivilegedAction) () -> { - delegate.resetMock(mock, newHandler, settings); - return null; - }, context); - } - - @Override - @Incubating - public TypeMockability isTypeMockable(Class type) { - return delegate.isTypeMockable(type); - } - - @SuppressWarnings("rawtypes") - @Override - public StaticMockControl createStaticMock(Class type, MockCreationSettings settings, MockHandler handler) { - return delegate.createStaticMock(type, settings, handler); - } - - @Override - public ConstructionMockControl createConstructionMock( - Class type, - Function> settingsFactory, - Function> handlerFactory, - MockedConstruction.MockInitializer mockInitializer - ) { - return delegate.createConstructionMock(type, settingsFactory, handlerFactory, mockInitializer); - } - - @Override - public void clearAllCaches() { - delegate.clearAllCaches(); - } -} diff --git a/test/framework/src/main/resources/mockito-extensions/org.mockito.plugins.MockMaker b/test/framework/src/main/resources/mockito-extensions/org.mockito.plugins.MockMaker index 99b0d419fc445..70e0524ee208c 100644 --- a/test/framework/src/main/resources/mockito-extensions/org.mockito.plugins.MockMaker +++ b/test/framework/src/main/resources/mockito-extensions/org.mockito.plugins.MockMaker @@ -1 +1 @@ -org.opensearch.mockito.plugin.PriviledgedMockMaker +org.mockito.internal.creation.bytebuddy.ByteBuddyMockMaker diff --git a/test/framework/src/test/resources/org/opensearch/bootstrap/test.policy b/test/framework/src/test/resources/org/opensearch/bootstrap/test.policy new file mode 100644 index 0000000000000..07c9fe160e985 --- /dev/null +++ b/test/framework/src/test/resources/org/opensearch/bootstrap/test.policy @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +grant codeBase "${codebase.opensearch-nio}" { + permission java.net.NetPermission "accessUnixDomainSocket"; +}; + +grant { + permission java.net.NetPermission "accessUnixDomainSocket"; + permission java.net.SocketPermission "*", "accept,connect"; +}; From 58eb44e7ece913aca6de34d32f6b837a512541ae Mon Sep 17 00:00:00 2001 From: Sagar <99425694+sgup432@users.noreply.github.com> Date: Wed, 9 Apr 2025 11:21:28 -0700 Subject: [PATCH 177/550] [Tiered Cache] Using a single cache manager for all ehcache disk caches (#17513) * Using a single cache manager for all ehcache disk caches Signed-off-by: Sagar Upadhyaya * Added changelog Signed-off-by: Sagar Upadhyaya * Fixing cache manager UT Signed-off-by: Sagar Upadhyaya * Addressing comments Signed-off-by: Sagar Upadhyaya * Removing commented out code Signed-off-by: Sagar Upadhyaya * Adding changelog Signed-off-by: Sagar Upadhyaya * Changes to perform mutable changes for cache manager under a lock Signed-off-by: Sagar Upadhyaya * Changes to fix UT Signed-off-by: Sagar Upadhyaya * Addressing minor comments Signed-off-by: Sagar Upadhyaya --------- Signed-off-by: Sagar Upadhyaya Signed-off-by: Sagar <99425694+sgup432@users.noreply.github.com> --- CHANGELOG.md | 1 + .../common/tier/TieredSpilloverCache.java | 2 +- .../cache/EhcacheDiskCacheSettings.java | 16 +- .../cache/store/disk/EhcacheDiskCache.java | 186 ++++++------- .../store/disk/EhcacheDiskCacheManager.java | 253 ++++++++++++++++++ .../store/disk/EhCacheDiskCacheTests.java | 85 +++--- .../disk/EhcacheDiskCacheManagerTests.java | 186 +++++++++++++ 7 files changed, 579 insertions(+), 150 deletions(-) create mode 100644 plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCacheManager.java create mode 100644 plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhcacheDiskCacheManagerTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index fb9858ff83534..455184632ea35 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Security Manager Replacement] Phase off SecurityManager usage in favor of Java Agent ([#17861](https://github.com/opensearch-project/OpenSearch/pull/17861)) - Support AutoExpand for SearchReplica ([#17741](https://github.com/opensearch-project/OpenSearch/pull/17741)) - Implement fixed interval refresh task scheduling ([#17777](https://github.com/opensearch-project/OpenSearch/pull/17777)) +- [Tiered caching] Create a single cache manager for all the disk caches. ([#17513](https://github.com/opensearch-project/OpenSearch/pull/17513)) - Add GRPC DocumentService and Bulk endpoint ([#17727](https://github.com/opensearch-project/OpenSearch/pull/17727)) - Added scale to zero (`search_only` mode) support for OpenSearch reader writer separation ([#17299](https://github.com/opensearch-project/OpenSearch/pull/17299) - [Star Tree] [Search] Resolving numeric range aggregation with metric aggregation using star-tree ([#17273](https://github.com/opensearch-project/OpenSearch/pull/17273)) diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java index d968e61cffcff..876b07446db27 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java @@ -213,7 +213,7 @@ static class TieredSpilloverCacheSegment implements ICache { .setSegmentCount(1) // We don't need to make underlying caches multi-segmented .setStatsTrackingEnabled(false) .setMaxSizeInBytes(diskCacheSizeInBytes) - .setStoragePath(builder.cacheConfig.getStoragePath() + "/" + segmentNumber) + .setStoragePath(builder.cacheConfig.getStoragePath()) .setCacheAlias("tiered_disk_cache#" + segmentNumber) .build(), builder.cacheType, diff --git a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/EhcacheDiskCacheSettings.java b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/EhcacheDiskCacheSettings.java index e4c9dd1e96c3c..af3d634c6caad 100644 --- a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/EhcacheDiskCacheSettings.java +++ b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/EhcacheDiskCacheSettings.java @@ -12,6 +12,7 @@ import org.opensearch.common.cache.CacheType; import org.opensearch.common.settings.Setting; import org.opensearch.common.unit.TimeValue; +import org.opensearch.threadpool.ThreadPool; import java.util.HashMap; import java.util.Map; @@ -36,17 +37,26 @@ public class EhcacheDiskCacheSettings { public static final Setting.AffixSetting DISK_WRITE_MINIMUM_THREADS_SETTING = Setting.suffixKeySetting( EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME + ".min_threads", - (key) -> Setting.intSetting(key, 2, 1, 5, NodeScope) + (key) -> Setting.intSetting(key, 2, 1, ThreadPool.searchThreadPoolSize(Runtime.getRuntime().availableProcessors()), NodeScope) ); /** - * Ehcache disk write maximum threads for its pool + * Ehcache disk write maximum threads for its pool. The default value is 1.5 * CPU_CORES ie equal to number of + * search threads. Disk operations are typically I/O bound rather than CPU bound, so setting it greater than the + * number of cpu cores should ideally be fine. * * Setting pattern: {cache_type}.ehcache_disk.max_threads */ public static final Setting.AffixSetting DISK_WRITE_MAXIMUM_THREADS_SETTING = Setting.suffixKeySetting( EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME + ".max_threads", - (key) -> Setting.intSetting(key, 2, 1, 20, NodeScope) + (key) -> Setting.intSetting( + key, + ThreadPool.searchThreadPoolSize(Runtime.getRuntime().availableProcessors()), + 1, + Runtime.getRuntime().availableProcessors() * 10, // The max one can configure this in setting is 10 times + // CPU cores. Ideally won't be required, but in case one way use it. + NodeScope + ) ); /** diff --git a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java index 96a323c00d8b4..ef5337f568452 100644 --- a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java +++ b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.OpenSearchException; import org.opensearch.cache.EhcacheDiskCacheSettings; import org.opensearch.common.SuppressForbidden; @@ -153,16 +152,19 @@ public class EhcacheDiskCache implements ICache { if (this.storagePath == null || this.storagePath.isBlank()) { throw new IllegalArgumentException("Storage path shouldn't be null or empty"); } - // Delete all the previous disk cache related files/data. We don't persist data between process restart for - // now which is why need to do this. Clean up in case there was a non graceful restart and we had older disk - // cache data still lying around. - Path ehcacheDirectory = Paths.get(this.storagePath); - if (Files.exists(ehcacheDirectory)) { - try { - logger.info("Found older disk cache data lying around during initialization under path: {}", this.storagePath); - IOUtils.rm(ehcacheDirectory); - } catch (IOException e) { - throw new OpenSearchException(String.format(CACHE_DATA_CLEANUP_DURING_INITIALIZATION_EXCEPTION, this.storagePath), e); + // Delete all the previous disk cache related files/data only if cache manager doesn't exist. As we can + // create multiple caches via single cache manager for a cache type. We don't persist data between process + // restart for now which is why need to do this. Clean up in case there was a non graceful restart and we had + // older disk cache data still lying around. + if (!EhcacheDiskCacheManager.doesCacheManagerExist(cacheType)) { + Path ehcacheDirectory = Paths.get(this.storagePath); + if (Files.exists(ehcacheDirectory)) { + try { + logger.info("Found older disk cache data lying around during initialization under path: {}", this.storagePath); + IOUtils.rm(ehcacheDirectory); + } catch (IOException e) { + throw new OpenSearchException(String.format(CACHE_DATA_CLEANUP_DURING_INITIALIZATION_EXCEPTION, this.storagePath), e); + } } } if (builder.threadPoolAlias == null || builder.threadPoolAlias.isBlank()) { @@ -173,7 +175,7 @@ public class EhcacheDiskCache implements ICache { this.settings = Objects.requireNonNull(builder.getSettings(), "Settings objects shouldn't be null"); this.keySerializer = Objects.requireNonNull(builder.keySerializer, "Key serializer shouldn't be null"); this.valueSerializer = Objects.requireNonNull(builder.valueSerializer, "Value serializer shouldn't be null"); - this.cacheManager = buildCacheManager(); + this.cacheManager = EhcacheDiskCacheManager.getCacheManager(cacheType, this.storagePath, settings, this.threadPoolAlias); Objects.requireNonNull(builder.getRemovalListener(), "Removal listener can't be null"); this.removalListener = builder.getRemovalListener(); Objects.requireNonNull(builder.getWeigher(), "Weigher can't be null"); @@ -189,73 +191,54 @@ public class EhcacheDiskCache implements ICache { } } - // Package private for testing - PersistentCacheManager getCacheManager() { - return this.cacheManager; - } - @SuppressWarnings({ "rawtypes", "removal" }) private Cache buildCache(Duration expireAfterAccess, Builder builder) { // Creating the cache requires permissions specified in plugin-security.policy - return AccessController.doPrivileged((PrivilegedAction>) () -> { - try { - int segmentCount = (Integer) EhcacheDiskCacheSettings.getSettingListForCacheType(cacheType) - .get(DISK_SEGMENT_KEY) - .get(settings); - if (builder.getNumberOfSegments() > 0) { - segmentCount = builder.getNumberOfSegments(); + int segmentCount = (Integer) EhcacheDiskCacheSettings.getSettingListForCacheType(cacheType).get(DISK_SEGMENT_KEY).get(settings); + if (builder.getNumberOfSegments() > 0) { + segmentCount = builder.getNumberOfSegments(); + } + CacheConfigurationBuilder cacheConfigurationBuilder = CacheConfigurationBuilder + .newCacheConfigurationBuilder( + ICacheKey.class, + ByteArrayWrapper.class, + ResourcePoolsBuilder.newResourcePoolsBuilder().disk(maxWeightInBytes, MemoryUnit.B) + ).withExpiry(new ExpiryPolicy<>() { + @Override + public Duration getExpiryForCreation(ICacheKey key, ByteArrayWrapper value) { + return INFINITE; } - return this.cacheManager.createCache( - this.diskCacheAlias, - CacheConfigurationBuilder.newCacheConfigurationBuilder( - ICacheKey.class, - ByteArrayWrapper.class, - ResourcePoolsBuilder.newResourcePoolsBuilder().disk(maxWeightInBytes, MemoryUnit.B) - ).withExpiry(new ExpiryPolicy<>() { - @Override - public Duration getExpiryForCreation(ICacheKey key, ByteArrayWrapper value) { - return INFINITE; - } - - @Override - public Duration getExpiryForAccess(ICacheKey key, Supplier value) { - return expireAfterAccess; - } - - @Override - public Duration getExpiryForUpdate( - ICacheKey key, - Supplier oldValue, - ByteArrayWrapper newValue - ) { - return INFINITE; - } - }) - .withService(getListenerConfiguration(builder)) - .withService( - new OffHeapDiskStoreConfiguration( - this.threadPoolAlias, - (Integer) EhcacheDiskCacheSettings.getSettingListForCacheType(cacheType) - .get(DISK_WRITE_CONCURRENCY_KEY) - .get(settings), - segmentCount - ) - ) - .withKeySerializer(new KeySerializerWrapper(keySerializer)) - .withValueSerializer(new ByteArrayWrapperSerializer()) - // We pass ByteArrayWrapperSerializer as ehcache's value serializer. If V is an interface, and we pass its - // serializer directly to ehcache, ehcache requires the classes match exactly before/after serialization. - // This is not always feasible or necessary, like for BytesReference. So, we handle the value serialization - // before V hits ehcache. - ); - } catch (IllegalArgumentException ex) { - logger.error("Ehcache disk cache initialization failed due to illegal argument: {}", ex.getMessage()); - throw ex; - } catch (IllegalStateException ex) { - logger.error("Ehcache disk cache initialization failed: {}", ex.getMessage()); - throw ex; - } - }); + + @Override + public Duration getExpiryForAccess(ICacheKey key, Supplier value) { + return expireAfterAccess; + } + + @Override + public Duration getExpiryForUpdate( + ICacheKey key, + Supplier oldValue, + ByteArrayWrapper newValue + ) { + return INFINITE; + } + }) + .withService(getListenerConfiguration(builder)) + .withService( + new OffHeapDiskStoreConfiguration( + this.threadPoolAlias, + (Integer) EhcacheDiskCacheSettings.getSettingListForCacheType(cacheType).get(DISK_WRITE_CONCURRENCY_KEY).get(settings), + segmentCount + ) + ) + .withKeySerializer(new KeySerializerWrapper(keySerializer)) + .withValueSerializer(new ByteArrayWrapperSerializer()); // We pass ByteArrayWrapperSerializer as ehcache's value serializer. If + // V is an interface, and we pass its + // serializer directly to ehcache, ehcache requires the classes match exactly before/after serialization. + // This is not always feasible or necessary, like for BytesReference. So, we handle the value serialization + // before V hits ehcache. + + return EhcacheDiskCacheManager.createCache(cacheType, this.diskCacheAlias, cacheConfigurationBuilder); } private CacheEventListenerConfigurationBuilder getListenerConfiguration(Builder builder) { @@ -470,21 +453,7 @@ public void refresh() { @Override @SuppressForbidden(reason = "Ehcache uses File.io") public void close() { - try { - cacheManager.close(); - } catch (Exception e) { - logger.error(() -> new ParameterizedMessage("Exception occurred while trying to close ehcache manager"), e); - } - // Delete all the disk cache related files/data in case it is present - Path ehcacheDirectory = Paths.get(this.storagePath); - if (Files.exists(ehcacheDirectory)) { - try { - IOUtils.rm(ehcacheDirectory); - } catch (IOException e) { - logger.error(() -> new ParameterizedMessage("Failed to delete ehcache disk cache data under path: {}", this.storagePath)); - } - } - + EhcacheDiskCacheManager.closeCache(cacheType, diskCacheAlias, storagePath); } /** @@ -597,16 +566,24 @@ public void onEvent(CacheEvent, ? extends ByteArrayWrappe * Wrapper over ICacheKeySerializer which is compatible with ehcache's serializer requirements. */ @SuppressWarnings({ "rawtypes", "unchecked" }) - private class KeySerializerWrapper implements org.ehcache.spi.serialization.Serializer { + public class KeySerializerWrapper implements org.ehcache.spi.serialization.Serializer { private ICacheKeySerializer serializer; + /** + * Constructor for key serializer + * @param internalKeySerializer serializer for internal key + */ public KeySerializerWrapper(Serializer internalKeySerializer) { this.serializer = new ICacheKeySerializer<>(internalKeySerializer); } - // This constructor must be present, but does not have to work as we are not actually persisting the disk - // cache after a restart. - // See https://www.ehcache.org/documentation/3.0/serializers-copiers.html#persistent-vs-transient-caches + /** + * This constructor must be present, but does not have to work as we are not actually persisting the disk + * cache after a restart. See https://www.ehcache.org/documentation/3.0/serializers-copiers + * .html#persistent-vs-transient-caches + * @param classLoader + * @param persistenceContext + */ public KeySerializerWrapper(ClassLoader classLoader, FileBasedPersistenceContext persistenceContext) {} @Override @@ -632,12 +609,19 @@ public boolean equals(ICacheKey object, ByteBuffer binary) throws ClassNotFoundE /** * Wrapper allowing Ehcache to serialize ByteArrayWrapper. */ - private static class ByteArrayWrapperSerializer implements org.ehcache.spi.serialization.Serializer { + public static class ByteArrayWrapperSerializer implements org.ehcache.spi.serialization.Serializer { + /** + * Default constructor + */ public ByteArrayWrapperSerializer() {} - // This constructor must be present, but does not have to work as we are not actually persisting the disk - // cache after a restart. - // See https://www.ehcache.org/documentation/3.0/serializers-copiers.html#persistent-vs-transient-caches + /** + * This constructor must be present, but does not have to work as we are not actually persisting the disk + * cache after a restart. See https://www.ehcache.org/documentation/3.0/serializers-copiers + * .html#persistent-vs-transient-caches + * @param classLoader + * @param persistenceContext + */ public ByteArrayWrapperSerializer(ClassLoader classLoader, FileBasedPersistenceContext persistenceContext) {} @Override @@ -906,9 +890,13 @@ public EhcacheDiskCache build() { * A wrapper over byte[], with equals() that works using Arrays.equals(). * Necessary due to a limitation in how Ehcache compares byte[]. */ - static class ByteArrayWrapper { + public static class ByteArrayWrapper { private final byte[] value; + /** + * Constructor for byte array wrapper. + * @param value value to wrap. + */ public ByteArrayWrapper(byte[] value) { this.value = value; } diff --git a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCacheManager.java b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCacheManager.java new file mode 100644 index 0000000000000..dc6d8cfd31b7c --- /dev/null +++ b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCacheManager.java @@ -0,0 +1,253 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache.store.disk; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.cache.EhcacheDiskCacheSettings; +import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.io.IOUtils; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; + +import org.ehcache.Cache; +import org.ehcache.PersistentCacheManager; +import org.ehcache.config.builders.CacheConfigurationBuilder; +import org.ehcache.config.builders.CacheManagerBuilder; +import org.ehcache.config.builders.PooledExecutionServiceConfigurationBuilder; + +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_WRITE_MAXIMUM_THREADS_KEY; +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_WRITE_MIN_THREADS_KEY; +import static org.opensearch.cache.store.disk.EhcacheDiskCache.THREAD_POOL_ALIAS_PREFIX; +import static org.opensearch.cache.store.disk.EhcacheDiskCache.UNIQUE_ID; + +/** + * This is responsible to create a single cache manager for a cache type, and is used to create subsequent caches if + * needed. + */ +public class EhcacheDiskCacheManager { + + // Defines one cache manager per cache type. + private static final Map> cacheManagerMap = new HashMap<>(); + private static final Logger logger = LogManager.getLogger(EhcacheDiskCacheManager.class); + /** + * This lock is used to synchronize the operation where we create/remove cache and increment/decrement the + * reference counters. + */ + private static final Lock lock = new ReentrantLock(); + private static final String CACHE_MANAGER_DOES_NOT_EXIST_EXCEPTION_MSG = "Ehcache manager does not exist for " + "cache type: "; + + // For testing + static Map> getCacheManagerMap() { + return cacheManagerMap; + } + + /** + * Private Constructor + */ + private EhcacheDiskCacheManager() {} + + /** + * Used to fetch cache manager for a cache type. If it doesn't exist, it creates one. + * @param cacheType cache type + * @param storagePath storage path for the cache + * @param settings settings + * @param threadPoolAlias alias for disk thread pool + * @return persistent cache manager + */ + public static PersistentCacheManager getCacheManager( + CacheType cacheType, + String storagePath, + Settings settings, + String threadPoolAlias + ) { + try { + lock.lock(); + return cacheManagerMap.computeIfAbsent( + cacheType, + type -> new Tuple<>(createCacheManager(cacheType, storagePath, settings, threadPoolAlias), new AtomicInteger(0)) + ).v1(); + } finally { + lock.unlock(); + } + } + + /** + * Checks whether cache manager exist for a cache type. + * @param cacheType cache type + * @return true/false + */ + public static boolean doesCacheManagerExist(CacheType cacheType) { + return cacheManagerMap.get(cacheType) != null; + } + + /** + * Used to create caches using a cache manager for a specific cache type. + * @param cacheType cache type + * @param diskCacheAlias disk cache alias + * @param cacheConfigurationBuilder cache configuration + * @return disk cache + * @param key type + * @param value type + */ + @SuppressWarnings({ "removal" }) + public static Cache createCache( + CacheType cacheType, + String diskCacheAlias, + CacheConfigurationBuilder cacheConfigurationBuilder + ) { + if (cacheType == null || diskCacheAlias == null || cacheConfigurationBuilder == null) { + throw new IllegalArgumentException( + "One of the arguments passed to createCache is " + + "null. CacheType: " + + cacheType + + ", diskCacheAlias: " + + diskCacheAlias + + ", " + + "cacheConfigurationBuilder: " + + cacheConfigurationBuilder + ); + } + if (cacheManagerMap.get(cacheType) == null) { + throw new IllegalArgumentException(CACHE_MANAGER_DOES_NOT_EXIST_EXCEPTION_MSG + cacheType); + } + // Creating the cache requires permissions specified in plugin-security.policy + return AccessController.doPrivileged((PrivilegedAction>) () -> { + try { + lock.lock(); + // Check again for null cache manager, in case it got removed by another thread in below closeCache() + // method. + if (cacheManagerMap.get(cacheType) == null) { + logger.warn(CACHE_MANAGER_DOES_NOT_EXIST_EXCEPTION_MSG + cacheType); + throw new IllegalStateException(CACHE_MANAGER_DOES_NOT_EXIST_EXCEPTION_MSG + cacheType); + } + Cache cache = cacheManagerMap.get(cacheType).v1().createCache(diskCacheAlias, cacheConfigurationBuilder + // We pass ByteArrayWrapperSerializer as ehcache's value serializer. If V is an interface, and we pass its + // serializer directly to ehcache, ehcache requires the classes match exactly before/after serialization. + // This is not always feasible or necessary, like for BytesReference. So, we handle the value serialization + // before V hits ehcache. + ); + cacheManagerMap.get(cacheType).v2().incrementAndGet(); + return cache; + } catch (IllegalArgumentException ex) { + logger.error("Ehcache disk cache initialization failed due to illegal argument: {}", ex.getMessage()); + throw ex; + } catch (IllegalStateException ex) { + logger.error("Ehcache disk cache initialization failed: {}", ex.getMessage()); + throw ex; + } finally { + lock.unlock(); + } + }); + } + + /** + * Used to close cache for a specific cache type and alias. + * @param cacheType cache type + * @param diskCacheAlias disk cache alias + * @param storagePath storage path for cache + */ + @SuppressForbidden(reason = "Ehcache uses File.io") + public static void closeCache(CacheType cacheType, String diskCacheAlias, String storagePath) { + if (cacheManagerMap.get(cacheType) == null) { + logger.warn(() -> new ParameterizedMessage("Trying to close cache for: {} but cache manager does not " + "exist", cacheType)); + return; + } + PersistentCacheManager cacheManager = cacheManagerMap.get(cacheType).v1(); + try { + lock.lock(); + try { + cacheManager.removeCache(diskCacheAlias); + } catch (Exception ex) { + logger.error(() -> new ParameterizedMessage("Exception occurred while trying to close cache: " + diskCacheAlias), ex); + } + // Check again in case a different thread removed it. + if (cacheManagerMap.get(cacheType) == null) { + logger.warn( + () -> new ParameterizedMessage("Trying to close cache for: {} but cache manager does not " + "exist", cacheType) + ); + return; + } + int referenceCount = cacheManagerMap.get(cacheType).v2().decrementAndGet(); + // All caches have been closed associated with this cache manager, lets close this as well. + if (referenceCount == 0) { + try { + logger.debug("Closing cache manager for cacheType: " + cacheType); + cacheManager.close(); + } catch (Exception e) { + logger.error(() -> new ParameterizedMessage("Exception occurred while trying to close ehcache manager"), e); + } + // Delete all the disk cache related files/data in case it is present + Path ehcacheDirectory = Paths.get(storagePath); + if (Files.exists(ehcacheDirectory)) { + try { + logger.debug( + "Removing disk cache related files for cacheType: " + cacheType + " under " + "directory: " + ehcacheDirectory + ); + IOUtils.rm(ehcacheDirectory); + } catch (IOException e) { + logger.error( + () -> new ParameterizedMessage("Failed to delete ehcache disk cache data under path: {}", storagePath) + ); + } + } + cacheManagerMap.remove(cacheType); + } + } finally { + lock.unlock(); + } + } + + @SuppressWarnings("removal") + @SuppressForbidden(reason = "Ehcache uses File.io") + private static PersistentCacheManager createCacheManager( + CacheType cacheType, + String storagePath, + Settings settings, + String threadPoolAlias + ) { + + return AccessController.doPrivileged( + (PrivilegedAction) () -> CacheManagerBuilder.newCacheManagerBuilder() + .with(CacheManagerBuilder.persistence(new File(storagePath))) + + .using( + PooledExecutionServiceConfigurationBuilder.newPooledExecutionServiceConfigurationBuilder() + .defaultPool(THREAD_POOL_ALIAS_PREFIX + "Default#" + UNIQUE_ID, 1, 3) // Default pool used for other tasks + // like event listeners + .pool( + threadPoolAlias, + (Integer) EhcacheDiskCacheSettings.getSettingListForCacheType(cacheType) + .get(DISK_WRITE_MIN_THREADS_KEY) + .get(settings), + (Integer) EhcacheDiskCacheSettings.getSettingListForCacheType(cacheType) + .get(DISK_WRITE_MAXIMUM_THREADS_KEY) + .get(settings) + ) + .build() + ) + .build(true) + ); + } +} diff --git a/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java b/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java index 2f58bb5df0ebe..3b3c5fd82f87f 100644 --- a/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java +++ b/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java @@ -58,9 +58,6 @@ import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_STORAGE_PATH_KEY; import static org.opensearch.cache.store.disk.EhcacheDiskCache.MINIMUM_MAX_SIZE_IN_BYTES; import static org.hamcrest.CoreMatchers.instanceOf; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; @ThreadLeakFilters(filters = { EhcacheThreadLeakFilter.class }) @@ -74,8 +71,10 @@ public void testBasicGetAndPut() throws IOException { MockRemovalListener removalListener = new MockRemovalListener<>(); ToLongBiFunction, String> weigher = getWeigher(); try (NodeEnvironment env = newNodeEnvironment(settings)) { - ICache ehcacheTest = new EhcacheDiskCache.Builder().setThreadPoolAlias("ehcacheTest") - .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") + ICache ehcacheTest = new EhcacheDiskCache.Builder().setStoragePath( + env.nodePaths()[0].indicesPath.toString() + "/request_cache" + ) + .setDiskCacheAlias(generateRandomString(5)) .setIsEventListenerModeSync(true) .setKeyType(String.class) .setValueType(String.class) @@ -187,8 +186,7 @@ public void testConcurrentPut() throws Exception { Settings settings = Settings.builder().build(); MockRemovalListener removalListener = new MockRemovalListener<>(); try (NodeEnvironment env = newNodeEnvironment(settings)) { - ICache ehcacheTest = new EhcacheDiskCache.Builder().setDiskCacheAlias("test1") - .setThreadPoolAlias("ehcacheTest") + ICache ehcacheTest = new EhcacheDiskCache.Builder().setDiskCacheAlias(generateRandomString(5)) .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") .setIsEventListenerModeSync(true) // For accurate count .setKeyType(String.class) @@ -237,8 +235,8 @@ public void testEhcacheParallelGets() throws Exception { Settings settings = Settings.builder().build(); MockRemovalListener removalListener = new MockRemovalListener<>(); try (NodeEnvironment env = newNodeEnvironment(settings)) { - ICache ehcacheTest = new EhcacheDiskCache.Builder().setDiskCacheAlias("test1") - .setThreadPoolAlias("ehcacheTest") + ICache ehcacheTest = new EhcacheDiskCache.Builder().setDiskCacheAlias(generateRandomString(5)) + // .setThreadPoolAlias("ehcacheTest") .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") .setIsEventListenerModeSync(true) // For accurate count .setKeyType(String.class) @@ -285,8 +283,7 @@ public void testEhcacheParallelGets() throws Exception { public void testEhcacheKeyIterator() throws Exception { Settings settings = Settings.builder().build(); try (NodeEnvironment env = newNodeEnvironment(settings)) { - ICache ehcacheTest = new EhcacheDiskCache.Builder().setDiskCacheAlias("test1") - .setThreadPoolAlias("ehcacheTest") + ICache ehcacheTest = new EhcacheDiskCache.Builder().setDiskCacheAlias(generateRandomString(5)) .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") .setIsEventListenerModeSync(true) .setKeyType(String.class) @@ -327,10 +324,9 @@ public void testEvictions() throws Exception { MockRemovalListener removalListener = new MockRemovalListener<>(); ToLongBiFunction, String> weigher = getWeigher(); try (NodeEnvironment env = newNodeEnvironment(settings)) { - ICache ehcacheTest = new EhcacheDiskCache.Builder().setDiskCacheAlias("test1") + ICache ehcacheTest = new EhcacheDiskCache.Builder().setDiskCacheAlias(generateRandomString(5)) .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") .setIsEventListenerModeSync(true) - .setThreadPoolAlias("ehcacheTest") .setKeyType(String.class) .setValueType(String.class) .setKeySerializer(new StringSerializer()) @@ -365,7 +361,6 @@ public void testComputeIfAbsentConcurrently() throws Exception { ICache ehcacheTest = new EhcacheDiskCache.Builder().setDiskCacheAlias("test1") .setIsEventListenerModeSync(true) .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") - .setThreadPoolAlias("ehcacheTest") .setKeyType(String.class) .setValueType(String.class) .setKeySerializer(new StringSerializer()) @@ -451,6 +446,7 @@ public void testComputeIfAbsentConcurrentlyWithMultipleEhcacheDiskCache() throws .setDimensionNames(List.of(dimensionName)) .setWeigher(getWeigher()) .setMaxSizeInBytes(CACHE_SIZE_IN_BYTES * 100) + .setCacheAlias("ehcache_disk#" + i) .setSettings( Settings.builder() .put( @@ -463,7 +459,7 @@ public void testComputeIfAbsentConcurrentlyWithMultipleEhcacheDiskCache() throws EhcacheDiskCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) .get(DISK_STORAGE_PATH_KEY) .getKey(), - env.nodePaths()[0].indicesPath.toString() + "/request_cache/" + i + env.nodePaths()[0].indicesPath.toString() + "/request_cache/" ) .put( EhcacheDiskCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) @@ -506,10 +502,9 @@ public void testComputeIfAbsentConcurrentlyAndThrowsException() throws Exception Settings settings = Settings.builder().build(); MockRemovalListener removalListener = new MockRemovalListener<>(); try (NodeEnvironment env = newNodeEnvironment(settings)) { - ICache ehcacheTest = new EhcacheDiskCache.Builder().setDiskCacheAlias("test1") + ICache ehcacheTest = new EhcacheDiskCache.Builder().setDiskCacheAlias(generateRandomString(5)) .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") .setIsEventListenerModeSync(true) - .setThreadPoolAlias("ehcacheTest") .setKeyType(String.class) .setValueType(String.class) .setKeySerializer(new StringSerializer()) @@ -568,7 +563,6 @@ public void testComputeIfAbsentWithNullValueLoading() throws Exception { MockRemovalListener removalListener = new MockRemovalListener<>(); try (NodeEnvironment env = newNodeEnvironment(settings)) { ICache ehcacheTest = new EhcacheDiskCache.Builder().setDiskCacheAlias("test1") - .setThreadPoolAlias("ehcacheTest") .setIsEventListenerModeSync(true) .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") .setKeyType(String.class) @@ -641,8 +635,7 @@ public void testMemoryTracking() throws Exception { ); int maxEntries = 2000; try (NodeEnvironment env = newNodeEnvironment(settings)) { - ICache ehcacheTest = new EhcacheDiskCache.Builder().setDiskCacheAlias("test1") - .setThreadPoolAlias("ehcacheTest") + ICache ehcacheTest = new EhcacheDiskCache.Builder().setDiskCacheAlias(generateRandomString(5)) .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") .setKeyType(String.class) .setValueType(String.class) @@ -707,8 +700,7 @@ public void testMemoryTracking() throws Exception { public void testEhcacheKeyIteratorWithRemove() throws IOException { Settings settings = Settings.builder().build(); try (NodeEnvironment env = newNodeEnvironment(settings)) { - ICache ehcacheTest = new EhcacheDiskCache.Builder().setDiskCacheAlias("test1") - .setThreadPoolAlias("ehcacheTest") + ICache ehcacheTest = new EhcacheDiskCache.Builder().setDiskCacheAlias(generateRandomString(5)) .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") .setIsEventListenerModeSync(true) .setKeySerializer(new StringSerializer()) @@ -755,8 +747,10 @@ public void testInvalidateAll() throws Exception { Settings settings = Settings.builder().build(); MockRemovalListener removalListener = new MockRemovalListener<>(); try (NodeEnvironment env = newNodeEnvironment(settings)) { - ICache ehcacheTest = new EhcacheDiskCache.Builder().setThreadPoolAlias("ehcacheTest") - .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") + ICache ehcacheTest = new EhcacheDiskCache.Builder().setStoragePath( + env.nodePaths()[0].indicesPath.toString() + "/request_cache" + ) + .setDiskCacheAlias(generateRandomString(5)) .setIsEventListenerModeSync(true) .setKeyType(String.class) .setValueType(String.class) @@ -792,7 +786,7 @@ public void testBasicGetAndPutBytesReference() throws Exception { Settings settings = Settings.builder().build(); try (NodeEnvironment env = newNodeEnvironment(settings)) { ICache ehCacheDiskCachingTier = new EhcacheDiskCache.Builder() - .setThreadPoolAlias("ehcacheTest") + .setDiskCacheAlias(generateRandomString(5)) .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") .setKeySerializer(new StringSerializer()) .setValueSerializer(new BytesReferenceSerializer()) @@ -838,10 +832,12 @@ public void testInvalidate() throws Exception { Settings settings = Settings.builder().build(); MockRemovalListener removalListener = new MockRemovalListener<>(); try (NodeEnvironment env = newNodeEnvironment(settings)) { - ICache ehcacheTest = new EhcacheDiskCache.Builder().setThreadPoolAlias("ehcacheTest") - .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") + ICache ehcacheTest = new EhcacheDiskCache.Builder().setStoragePath( + env.nodePaths()[0].indicesPath.toString() + "/request_cache" + ) .setIsEventListenerModeSync(true) .setKeyType(String.class) + .setDiskCacheAlias(generateRandomString(5)) .setKeySerializer(new StringSerializer()) .setValueSerializer(new StringSerializer()) .setValueType(String.class) @@ -882,7 +878,9 @@ public void testInvalidateWithDropDimensions() throws Exception { Settings settings = Settings.builder().build(); List dimensionNames = List.of("dim1", "dim2"); try (NodeEnvironment env = newNodeEnvironment(settings)) { - ICache ehCacheDiskCachingTier = new EhcacheDiskCache.Builder().setThreadPoolAlias("ehcacheTest") + ICache ehCacheDiskCachingTier = new EhcacheDiskCache.Builder().setDiskCacheAlias( + generateRandomString(5) + ) .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") .setKeySerializer(new StringSerializer()) .setValueSerializer(new StringSerializer()) @@ -934,8 +932,9 @@ public void testStatsTrackingDisabled() throws Exception { MockRemovalListener removalListener = new MockRemovalListener<>(); ToLongBiFunction, String> weigher = getWeigher(); try (NodeEnvironment env = newNodeEnvironment(settings)) { - ICache ehcacheTest = new EhcacheDiskCache.Builder().setThreadPoolAlias("ehcacheTest") - .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") + ICache ehcacheTest = new EhcacheDiskCache.Builder().setStoragePath( + env.nodePaths()[0].indicesPath.toString() + "/request_cache" + ) .setIsEventListenerModeSync(true) .setKeyType(String.class) .setValueType(String.class) @@ -970,19 +969,18 @@ public void testDiskCacheFilesAreClearedUpDuringCloseAndInitialization() throws // Create a dummy file to simulate a scenario where the data is already in the disk cache storage path // beforehand. Files.createDirectory(Path.of(path)); + String diskCacheAlias = generateRandomString(5); Path dummyFilePath = Files.createFile(Path.of(path + "/testing.txt")); assertTrue(Files.exists(dummyFilePath)); - ICache ehcacheTest = new EhcacheDiskCache.Builder().setThreadPoolAlias("ehcacheTest") - .setStoragePath(path) + ICache ehcacheTest = new EhcacheDiskCache.Builder().setStoragePath(path) .setIsEventListenerModeSync(true) .setKeyType(String.class) .setValueType(String.class) .setKeySerializer(new StringSerializer()) - .setDiskCacheAlias("test1") + .setDiskCacheAlias(diskCacheAlias) .setValueSerializer(new StringSerializer()) .setDimensionNames(List.of(dimensionName)) .setCacheType(CacheType.INDICES_REQUEST_CACHE) - .setThreadPoolAlias("") .setSettings(settings) .setExpireAfterAccess(TimeValue.MAX_VALUE) .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) @@ -1005,7 +1003,7 @@ public void testDiskCacheFilesAreClearedUpDuringCloseAndInitialization() throws assertTrue(Files.exists(Path.of(path))); boolean folderExists = Files.walk(Path.of(path)) .filter(Files::isDirectory) - .anyMatch(path1 -> path1.getFileName().toString().startsWith("test1")); + .anyMatch(path1 -> path1.getFileName().toString().startsWith(diskCacheAlias)); assertTrue(folderExists); ehcacheTest.close(); assertFalse(Files.exists(Path.of(path))); // Verify everything is cleared up now after close() @@ -1024,7 +1022,7 @@ public void testDiskCacheCloseCalledTwiceAndVerifyDiskDataIsCleanedUp() throws E .setKeyType(String.class) .setValueType(String.class) .setKeySerializer(new StringSerializer()) - .setDiskCacheAlias("test1") + .setDiskCacheAlias(generateRandomString(5)) .setValueSerializer(new StringSerializer()) .setDimensionNames(List.of(dimensionName)) .setCacheType(CacheType.INDICES_REQUEST_CACHE) @@ -1061,7 +1059,7 @@ public void testDiskCacheCloseAfterCleaningUpFilesManually() throws Exception { .setKeyType(String.class) .setValueType(String.class) .setKeySerializer(new StringSerializer()) - .setDiskCacheAlias("test1") + .setDiskCacheAlias(generateRandomString(5)) .setValueSerializer(new StringSerializer()) .setDimensionNames(List.of(dimensionName)) .setCacheType(CacheType.INDICES_REQUEST_CACHE) @@ -1193,15 +1191,6 @@ public void testEhcacheWithStorageSizeZero() throws Exception { } } - public void testEhcacheCloseWithDestroyCacheMethodThrowingException() throws Exception { - EhcacheDiskCache ehcacheDiskCache = new MockEhcahceDiskCache(createDummyBuilder(null)); - PersistentCacheManager cacheManager = ehcacheDiskCache.getCacheManager(); - doNothing().when(cacheManager).removeCache(anyString()); - doNothing().when(cacheManager).close(); - doThrow(new RuntimeException("test")).when(cacheManager).destroyCache(anyString()); - ehcacheDiskCache.close(); - } - public void testWithCacheConfigSizeSettings() throws Exception { // The cache should get its size from the config if present, and otherwise should get it from the setting. long maxSizeFromSetting = between(MINIMUM_MAX_SIZE_IN_BYTES + 1000, MINIMUM_MAX_SIZE_IN_BYTES + 2000); @@ -1209,9 +1198,11 @@ public void testWithCacheConfigSizeSettings() throws Exception { EhcacheDiskCache cache = setupMaxSizeTest(maxSizeFromSetting, maxSizeFromConfig, false); assertEquals(maxSizeFromSetting, cache.getMaximumWeight()); + cache.close(); cache = setupMaxSizeTest(maxSizeFromSetting, maxSizeFromConfig, true); assertEquals(maxSizeFromConfig, cache.getMaximumWeight()); + cache.close(); } // Modified from OpenSearchOnHeapCacheTests. Can't reuse, as we can't add a dependency on the server.test module. @@ -1234,7 +1225,7 @@ private EhcacheDiskCache setupMaxSizeTest(long maxSizeFromSettin EhcacheDiskCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) .get(DISK_STORAGE_PATH_KEY) .getKey(), - env.nodePaths()[0].indicesPath.toString() + "/request_cache/" + 0 + env.nodePaths()[0].indicesPath.toString() + "/request_cache/" ) .build(); diff --git a/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhcacheDiskCacheManagerTests.java b/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhcacheDiskCacheManagerTests.java new file mode 100644 index 0000000000000..36252a0a2681d --- /dev/null +++ b/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhcacheDiskCacheManagerTests.java @@ -0,0 +1,186 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache.store.disk; + +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.settings.Settings; +import org.opensearch.env.NodeEnvironment; +import org.opensearch.test.OpenSearchSingleNodeTestCase; + +import java.io.IOException; +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Phaser; +import java.util.function.Supplier; + +import org.ehcache.config.builders.CacheConfigurationBuilder; +import org.ehcache.config.builders.CacheEventListenerConfigurationBuilder; +import org.ehcache.config.builders.ResourcePoolsBuilder; +import org.ehcache.config.units.MemoryUnit; +import org.ehcache.event.CacheEvent; +import org.ehcache.event.CacheEventListener; +import org.ehcache.event.EventType; +import org.ehcache.expiry.ExpiryPolicy; +import org.ehcache.impl.config.store.disk.OffHeapDiskStoreConfiguration; + +public class EhcacheDiskCacheManagerTests extends OpenSearchSingleNodeTestCase { + + private static final String THREAD_POOL_ALIAS = "poolAlias"; + + @SuppressWarnings("rawTypes") + public void testCreateAndCloseCacheConcurrently() throws Exception { + Settings settings = Settings.builder().build(); + String path = null; + try (NodeEnvironment env = newNodeEnvironment(settings)) { + path = env.nodePaths()[0].path.toString() + "/request_cache"; + EhcacheDiskCacheManager.getCacheManager(CacheType.INDICES_REQUEST_CACHE, path, settings, THREAD_POOL_ALIAS); + } + int randomThreads = randomIntBetween(5, 10); + Thread[] threads = new Thread[randomThreads]; + Phaser phaser = new Phaser(randomThreads + 1); + CountDownLatch countDownLatch = new CountDownLatch(randomThreads); + List diskCacheAliases = new ArrayList<>(); + for (int i = 0; i < randomThreads; i++) { + threads[i] = new Thread(() -> { + String diskCacheAlias = UUID.randomUUID().toString(); + diskCacheAliases.add(diskCacheAlias); + phaser.arriveAndAwaitAdvance(); + EhcacheDiskCacheManager.createCache(CacheType.INDICES_REQUEST_CACHE, diskCacheAlias, getCacheConfigurationBuilder()); + countDownLatch.countDown(); + }); + threads[i].start(); + } + phaser.arriveAndAwaitAdvance(); + countDownLatch.await(); + assertEquals(randomThreads, EhcacheDiskCacheManager.getCacheManagerMap().get(CacheType.INDICES_REQUEST_CACHE).v2().get()); + + threads = new Thread[randomThreads]; + Phaser phaser2 = new Phaser(randomThreads + 1); + CountDownLatch countDownLatch2 = new CountDownLatch(randomThreads); + for (int i = 0; i < randomThreads; i++) { + String finalPath = path; + int finalI = i; + threads[i] = new Thread(() -> { + phaser2.arriveAndAwaitAdvance(); + EhcacheDiskCacheManager.closeCache(CacheType.INDICES_REQUEST_CACHE, diskCacheAliases.get(finalI), finalPath); + countDownLatch2.countDown(); + }); + threads[i].start(); + } + phaser2.arriveAndAwaitAdvance(); + countDownLatch2.await(); + + assertNull(EhcacheDiskCacheManager.getCacheManagerMap().get(CacheType.INDICES_REQUEST_CACHE)); + assertFalse(EhcacheDiskCacheManager.doesCacheManagerExist(CacheType.INDICES_REQUEST_CACHE)); + } + + public void testCreateCacheWithNullArguments() { + assertThrows( + IllegalArgumentException.class, + () -> EhcacheDiskCacheManager.createCache(CacheType.INDICES_REQUEST_CACHE, "test", null) + ); + } + + public void testCreateCacheWithInvalidArgument() throws IOException { + String cacheName = "test"; + String path = null; + CacheConfigurationBuilder cacheConfigurationBuilder = CacheConfigurationBuilder.newCacheConfigurationBuilder( + String.class, + String.class, + ResourcePoolsBuilder.newResourcePoolsBuilder().disk(1024 * 100, MemoryUnit.B) + ); + try (NodeEnvironment env = newNodeEnvironment(Settings.EMPTY)) { + path = env.nodePaths()[0].path.toString() + "/request_cache"; + EhcacheDiskCacheManager.getCacheManager(CacheType.INDICES_REQUEST_CACHE, path, Settings.EMPTY, THREAD_POOL_ALIAS); + } + EhcacheDiskCacheManager.createCache(CacheType.INDICES_REQUEST_CACHE, cacheName, cacheConfigurationBuilder); + // Try creating cache with the same alias, should fail + assertThrows( + IllegalArgumentException.class, + () -> EhcacheDiskCacheManager.createCache(CacheType.INDICES_REQUEST_CACHE, cacheName, cacheConfigurationBuilder) + ); + EhcacheDiskCacheManager.closeCache(CacheType.INDICES_REQUEST_CACHE, cacheName, path); + } + + public void testCreateCacheWithInvalidCacheSize() throws Exception { + String cacheName = "test"; + String path = null; + CacheConfigurationBuilder cacheConfigurationBuilder = CacheConfigurationBuilder.newCacheConfigurationBuilder( + String.class, + String.class, + ResourcePoolsBuilder.newResourcePoolsBuilder().disk(100, MemoryUnit.B) + ); + try (NodeEnvironment env = newNodeEnvironment(Settings.EMPTY)) { + path = env.nodePaths()[0].path.toString() + "/request_cache"; + EhcacheDiskCacheManager.getCacheManager(CacheType.INDICES_REQUEST_CACHE, path, Settings.EMPTY, THREAD_POOL_ALIAS); + } + assertThrows( + IllegalStateException.class, + () -> EhcacheDiskCacheManager.createCache(CacheType.INDICES_REQUEST_CACHE, cacheName, cacheConfigurationBuilder) + ); + EhcacheDiskCacheManager.getCacheManagerMap().remove(CacheType.INDICES_REQUEST_CACHE); // Clear up + } + + public void testCreateCacheWithCacheManagerDoesNotExist() { + String cacheName = "test"; + CacheConfigurationBuilder cacheConfigurationBuilder = CacheConfigurationBuilder.newCacheConfigurationBuilder( + String.class, + String.class, + ResourcePoolsBuilder.newResourcePoolsBuilder().disk(1024 * 100, MemoryUnit.B) + ); + assertThrows( + IllegalArgumentException.class, + () -> EhcacheDiskCacheManager.createCache(CacheType.INDICES_REQUEST_CACHE, cacheName, cacheConfigurationBuilder) + ); + } + + private CacheConfigurationBuilder getCacheConfigurationBuilder() { + CacheConfigurationBuilder cacheConfigurationBuilder = CacheConfigurationBuilder.newCacheConfigurationBuilder( + String.class, + String.class, + ResourcePoolsBuilder.newResourcePoolsBuilder().disk(1024 * 101, MemoryUnit.B) + ).withExpiry(new ExpiryPolicy<>() { + @Override + public Duration getExpiryForCreation(String key, String value) { + return null; + } + + @Override + public Duration getExpiryForAccess(String key, Supplier value) { + return null; + } + + @Override + public Duration getExpiryForUpdate(String key, Supplier oldValue, String newValue) { + return null; + } + }) + .withService( + CacheEventListenerConfigurationBuilder.newEventListenerConfiguration( + new MockEhcacheEventListener(), + EventType.EVICTED, + EventType.EXPIRED, + EventType.REMOVED, + EventType.UPDATED, + EventType.CREATED + ).unordered().synchronous() + ) + .withService(new OffHeapDiskStoreConfiguration(EhcacheDiskCacheManagerTests.THREAD_POOL_ALIAS, 1)); + return cacheConfigurationBuilder; + } + + class MockEhcacheEventListener implements CacheEventListener { + + @Override + public void onEvent(CacheEvent event) {} + } +} From 087e4735fbd4644957df29fc9cab074bcaafefca Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Wed, 9 Apr 2025 12:44:20 -0700 Subject: [PATCH 178/550] Rename search_only_replica settings to search_replica (#17857) * Rename search_only_replica settings to search_replica Signed-off-by: Marc Handalian * Fix missed test cases expecting search_only exception messageA Signed-off-by: Marc Handalian * Fix tests not using setting constant Signed-off-by: Marc Handalian --------- Signed-off-by: Marc Handalian --- ...SearchReplicaReplicationAndRecoveryIT.java | 2 +- .../replication/SearchReplicaRestoreIT.java | 2 +- .../indices/settings/SearchOnlyReplicaIT.java | 24 +++++++++---------- .../snapshots/RestoreSnapshotIT.java | 8 +++---- .../cluster/metadata/IndexMetadata.java | 2 +- .../cluster/routing/OperationRouting.java | 8 +++---- .../common/settings/ClusterSettings.java | 2 +- .../metadata/SearchOnlyReplicaTests.java | 2 +- .../routing/OperationRoutingTests.java | 2 +- 9 files changed, 26 insertions(+), 26 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationAndRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationAndRecoveryIT.java index a550f6cc6586c..3d09c0ef7ef25 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationAndRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationAndRecoveryIT.java @@ -103,7 +103,7 @@ public void testSegmentReplicationStatsResponseWithSearchReplica() throws Except Settings.builder() .put("number_of_shards", 1) .put("number_of_replicas", 0) - .put("number_of_search_only_replicas", 1) + .put("number_of_search_replicas", 1) .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .build() ); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaRestoreIT.java index b544a8b602da6..cfed17cdf4967 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaRestoreIT.java @@ -173,7 +173,7 @@ private String getSnapshotExceptionMessage(ReplicationType snapshotReplicationTy + "To restore with [index.replication.type] as [" + restoreReplicationType + "], " - + "[index.number_of_search_only_replicas] must be set to [0]"; + + "[index.number_of_search_replicas] must be set to [0]"; } private int getNumberOfSearchReplicas(String index) { diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/settings/SearchOnlyReplicaIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/settings/SearchOnlyReplicaIT.java index eab3d229f576d..0e6d425de1a9f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/settings/SearchOnlyReplicaIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/settings/SearchOnlyReplicaIT.java @@ -29,6 +29,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; +import static org.opensearch.cluster.routing.OperationRouting.STRICT_SEARCH_REPLICA_ROUTING_ENABLED; import static org.opensearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.anyOf; @@ -44,8 +45,7 @@ protected Settings featureFlagSettings() { return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL, Boolean.TRUE).build(); } - private final String expectedFailureMessage = - "To set index.number_of_search_only_replicas, index.replication.type must be set to SEGMENT"; + private final String expectedFailureMessage = "To set index.number_of_search_replicas, index.replication.type must be set to SEGMENT"; @Override public Settings indexSettings() { @@ -246,8 +246,8 @@ public void testSearchReplicaRoutingPreferenceWhenSearchReplicaUnassigned() { ensureYellow(TEST_INDEX); client().prepareIndex(TEST_INDEX).setId("1").setSource("foo", "bar").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); - // By default cluster.routing.search_only.strict is set as true - // When cluster.routing.search_only.strict is set as true, and no assigned search replica is available, + // By default cluster.routing.search_replica.strict is set as true + // When cluster.routing.search_replica.strict is set as true, and no assigned search replica is available, // search request will fail since it will route only to search replica but it's not available Throwable throwable = assertThrows( SearchPhaseExecutionException.class, @@ -256,14 +256,14 @@ public void testSearchReplicaRoutingPreferenceWhenSearchReplicaUnassigned() { assertEquals("all shards failed", throwable.getMessage()); - // Set cluster.routing.search_only.strict as false + // Set cluster.routing.search_replica.strict as false client().admin() .cluster() .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put("cluster.routing.search_only.strict", false)) + .setTransientSettings(Settings.builder().put(STRICT_SEARCH_REPLICA_ROUTING_ENABLED.getKey(), false)) .get(); - // When cluster.routing.search_only.strict is set as false, and no assigned search replica is available; + // When cluster.routing.search_replica.strict is set as false, and no assigned search replica is available; // search request will fall back to querying writers SearchResponse response = client().prepareSearch(TEST_INDEX).setPreference(null).setQuery(QueryBuilders.matchAllQuery()).get(); @@ -282,8 +282,8 @@ public void testSearchReplicaRoutingPreferenceWhenSearchReplicaAssigned() { internalCluster().startSearchOnlyNode(); ensureGreen(TEST_INDEX); - // By default cluster.routing.search_only.strict is set as true - // When cluster.routing.search_only.strict is set as true, and assigned search replica is available; + // By default cluster.routing.search_replica.strict is set as true + // When cluster.routing.search_replica.strict is set as true, and assigned search replica is available; // search request will succeed SearchResponse response = client().prepareSearch(TEST_INDEX).setPreference(null).setQuery(QueryBuilders.matchAllQuery()).get(); @@ -291,14 +291,14 @@ public void testSearchReplicaRoutingPreferenceWhenSearchReplicaAssigned() { IndexShardRoutingTable indexShardRoutingTable = getIndexShardRoutingTable(); assertEquals(nodeId, indexShardRoutingTable.searchOnlyReplicas().get(0).currentNodeId()); - // Set cluster.routing.search_only.strict as false + // Set cluster.routing.search_replica.strict as false client().admin() .cluster() .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put("cluster.routing.search_only.strict", false)) + .setTransientSettings(Settings.builder().put(STRICT_SEARCH_REPLICA_ROUTING_ENABLED.getKey(), false)) .get(); - // When cluster.routing.search_only.strict is set as false, and assigned search replica is available; + // When cluster.routing.search_replica.strict is set as false, and assigned search replica is available; // search request can land on either writer or reader response = client().prepareSearch(TEST_INDEX).setPreference(null).setQuery(QueryBuilders.matchAllQuery()).get(); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java index 5431fb8b05d61..7e1e85e6cb6db 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java @@ -1200,7 +1200,7 @@ public void testInvalidRestoreRequest_MixRemovableAndUnmodifiableOnRestoreIgnore setupSnapshotRestore(); // try index restore with mix of removable and UnmodifiableOnRestore settings ignored - // index.version.created is UnmodifiableOnRestore, index.number_of_search_only_replicas is removable + // index.version.created is UnmodifiableOnRestore, index.number_of_search_replicas is removable SnapshotRestoreException exception = expectThrows( SnapshotRestoreException.class, () -> client().admin() @@ -1220,7 +1220,7 @@ public void testInvalidRestoreRequest_MixRemovableAndUserUnRemovableSettingsIgno setupSnapshotRestore(); // try index restore with mix of removable and USER_UNREMOVABLE_SETTINGS settings ignored - // index.number_of_replicas is USER_UNREMOVABLE_SETTINGS, index.number_of_search_only_replicas is removable + // index.number_of_replicas is USER_UNREMOVABLE_SETTINGS, index.number_of_search_replicas is removable SnapshotRestoreException exception = expectThrows( SnapshotRestoreException.class, () -> client().admin() @@ -1367,7 +1367,7 @@ public void testInvalidRestoreRequest_MixModifiableAndUnmodifiableOnRestoreModif setupSnapshotRestore(); // try index restore with mix of modifiable and UnmodifiableOnRestore settings modified - // index.version.created is UnmodifiableOnRestore, index.number_of_search_only_replicas is modifiable + // index.version.created is UnmodifiableOnRestore, index.number_of_search_replicas is modifiable Settings mixedSettingsUnmodifiableOnRestore = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, Version.V_EMPTY) .put(IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS, 1) @@ -1393,7 +1393,7 @@ public void testInvalidRestoreRequest_MixModifiableAndUserUnmodifiableSettingsMo setupSnapshotRestore(); // try index restore with mix of modifiable and USER_UNMODIFIABLE_SETTINGS settings modified - // index.remote_store.enabled is USER_UNMODIFIABLE_SETTINGS, index.number_of_search_only_replicas is modifiable + // index.remote_store.enabled is USER_UNMODIFIABLE_SETTINGS, index.number_of_search_replicas is modifiable Settings mixedSettingsUserUnmodifiableSettings = Settings.builder() .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, false) .put(IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS, 1) diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index 7cdfba563f25f..04b10a43ffa10 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -282,7 +282,7 @@ static Setting buildNumberOfShardsSetting() { * with their primary. Search replicas require the use of Segment Replication on the index and poll their {@link SegmentReplicationSource} for * updates. //TODO: Once physical isolation is introduced, reference the setting here. */ - public static final String SETTING_NUMBER_OF_SEARCH_REPLICAS = "index.number_of_search_only_replicas"; + public static final String SETTING_NUMBER_OF_SEARCH_REPLICAS = "index.number_of_search_replicas"; public static final Setting INDEX_NUMBER_OF_SEARCH_REPLICAS_SETTING = Setting.intSetting( SETTING_NUMBER_OF_SEARCH_REPLICAS, 0, diff --git a/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java b/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java index bcf03ec0e75b7..21087094d2be0 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java +++ b/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java @@ -118,8 +118,8 @@ public class OperationRouting { Preference.PREFER_NODES ); - public static final Setting STRICT_SEARCH_ONLY_ROUTING_ENABLED = Setting.boolSetting( - "cluster.routing.search_only.strict", + public static final Setting STRICT_SEARCH_REPLICA_ROUTING_ENABLED = Setting.boolSetting( + "cluster.routing.search_replica.strict", true, Setting.Property.Dynamic, Setting.Property.NodeScope @@ -148,14 +148,14 @@ public OperationRouting(Settings settings, ClusterSettings clusterSettings) { this.isFailOpenEnabled = WEIGHTED_ROUTING_FAILOPEN_ENABLED.get(settings); this.isStrictWeightedShardRouting = STRICT_WEIGHTED_SHARD_ROUTING_ENABLED.get(settings); this.ignoreWeightedRouting = IGNORE_WEIGHTED_SHARD_ROUTING.get(settings); - this.isStrictSearchOnlyShardRouting = STRICT_SEARCH_ONLY_ROUTING_ENABLED.get(settings); + this.isStrictSearchOnlyShardRouting = STRICT_SEARCH_REPLICA_ROUTING_ENABLED.get(settings); clusterSettings.addSettingsUpdateConsumer(USE_ADAPTIVE_REPLICA_SELECTION_SETTING, this::setUseAdaptiveReplicaSelection); clusterSettings.addSettingsUpdateConsumer(IGNORE_AWARENESS_ATTRIBUTES_SETTING, this::setIgnoreAwarenessAttributes); clusterSettings.addSettingsUpdateConsumer(WEIGHTED_ROUTING_DEFAULT_WEIGHT, this::setWeightedRoutingDefaultWeight); clusterSettings.addSettingsUpdateConsumer(WEIGHTED_ROUTING_FAILOPEN_ENABLED, this::setFailOpenEnabled); clusterSettings.addSettingsUpdateConsumer(STRICT_WEIGHTED_SHARD_ROUTING_ENABLED, this::setStrictWeightedShardRouting); clusterSettings.addSettingsUpdateConsumer(IGNORE_WEIGHTED_SHARD_ROUTING, this::setIgnoreWeightedRouting); - clusterSettings.addSettingsUpdateConsumer(STRICT_SEARCH_ONLY_ROUTING_ENABLED, this::setStrictSearchOnlyShardRouting); + clusterSettings.addSettingsUpdateConsumer(STRICT_SEARCH_REPLICA_ROUTING_ENABLED, this::setStrictSearchOnlyShardRouting); this.isReaderWriterSplitEnabled = FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL_SETTING.get(settings); } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 9d8b458d70966..5b998165c58a7 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -613,7 +613,7 @@ public void apply(Settings value, Settings current, Settings previous) { OperationRouting.WEIGHTED_ROUTING_FAILOPEN_ENABLED, OperationRouting.STRICT_WEIGHTED_SHARD_ROUTING_ENABLED, OperationRouting.IGNORE_WEIGHTED_SHARD_ROUTING, - OperationRouting.STRICT_SEARCH_ONLY_ROUTING_ENABLED, + OperationRouting.STRICT_SEARCH_REPLICA_ROUTING_ENABLED, IndexGraveyard.SETTING_MAX_TOMBSTONES, PersistentTasksClusterService.CLUSTER_TASKS_ALLOCATION_RECHECK_INTERVAL_SETTING, EnableAssignmentDecider.CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING, diff --git a/server/src/test/java/org/opensearch/cluster/metadata/SearchOnlyReplicaTests.java b/server/src/test/java/org/opensearch/cluster/metadata/SearchOnlyReplicaTests.java index 81055e01d915b..da63222a2786e 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/SearchOnlyReplicaTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/SearchOnlyReplicaTests.java @@ -105,7 +105,7 @@ public void testSearchReplicasValidationWithDocumentReplication() { ) ); assertEquals( - "To set index.number_of_search_only_replicas, index.remote_store.enabled must be set to true", + "To set index.number_of_search_replicas, index.remote_store.enabled must be set to true", exception.getCause().getMessage() ); } diff --git a/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java b/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java index e7d43de5d1d51..f0fc3dd57749d 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java @@ -1256,7 +1256,7 @@ public void testSearchReplicaRoutingWhenSearchOnlyStrictSettingIsFalse() throws assertEquals("We should have all 6 shards returned", shardIterator.size(), 6); for (ShardRouting shardRouting : shardIterator) { assertTrue( - "Any shard can exist with when cluster.routing.search_only.strict is set as false", + "Any shard can exist with when cluster.routing.search_replica.strict is set as false", shardRouting.isSearchOnly() || shardRouting.primary() || shardRouting.isSearchOnly() == false ); } From 44c5670794e63626e10a05c0d47fd7774d7ee035 Mon Sep 17 00:00:00 2001 From: Prudhvi Godithi Date: Wed, 9 Apr 2025 13:07:02 -0700 Subject: [PATCH 179/550] Fix flaky tests in `MetadataIndexStateServiceIT` (#17866) * Fix flaky tests MetadataIndexStateServiceIT Signed-off-by: Prudhvi Godithi * Update the test Signed-off-by: Prudhvi Godithi --------- Signed-off-by: Prudhvi Godithi --- .../metadata/MetadataIndexStateServiceIT.java | 52 +++++++------------ 1 file changed, 20 insertions(+), 32 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/MetadataIndexStateServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/MetadataIndexStateServiceIT.java index 026ee43454c07..b914dbff03a42 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/MetadataIndexStateServiceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/MetadataIndexStateServiceIT.java @@ -8,9 +8,6 @@ package org.opensearch.cluster.metadata; -import org.opensearch.action.admin.cluster.state.ClusterStateResponse; -import org.opensearch.action.admin.indices.open.OpenIndexResponse; -import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.WriteRequest; import org.opensearch.common.settings.Settings; @@ -54,19 +51,13 @@ public void testIndexCloseAndOpen() throws Exception { } assertAcked(client().admin().indices().prepareClose(TEST_INDEX).get()); + assertEquals( + IndexMetadata.State.CLOSE, + client().admin().cluster().prepareState().get().getState().metadata().index(TEST_INDEX).getState() + ); - ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get(); - IndexMetadata indexMetadata = clusterStateResponse.getState().metadata().index(TEST_INDEX); - assertEquals(IndexMetadata.State.CLOSE, indexMetadata.getState()); - - OpenIndexResponse openIndexResponse = client().admin().indices().prepareOpen(TEST_INDEX).get(); - - assertTrue("Open operation should be acknowledged", openIndexResponse.isAcknowledged()); - assertTrue("Open operation shards should be acknowledged", openIndexResponse.isShardsAcknowledged()); - - clusterStateResponse = client().admin().cluster().prepareState().get(); - indexMetadata = clusterStateResponse.getState().metadata().index(TEST_INDEX); - assertEquals(IndexMetadata.State.OPEN, indexMetadata.getState()); + assertAcked(client().admin().indices().prepareOpen(TEST_INDEX).get()); + ensureGreen(TEST_INDEX); assertBusy(() -> { SearchResponse searchResponse = client().prepareSearch(TEST_INDEX).get(); @@ -99,26 +90,23 @@ public void testIndexCloseAndOpenWithSearchOnlyMode() throws Exception { assertAcked(client().admin().indices().prepareScaleSearchOnly(TEST_INDEX, true).get()); ensureGreen(TEST_INDEX); - GetSettingsResponse settingsResponse = client().admin().indices().prepareGetSettings(TEST_INDEX).get(); - assertTrue(settingsResponse.getSetting(TEST_INDEX, IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey()).equals("true")); + assertTrue( + client().admin() + .indices() + .prepareGetSettings(TEST_INDEX) + .get() + .getSetting(TEST_INDEX, IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey()) + .equals("true") + ); assertAcked(client().admin().indices().prepareClose(TEST_INDEX).get()); + assertEquals( + IndexMetadata.State.CLOSE, + client().admin().cluster().prepareState().get().getState().metadata().index(TEST_INDEX).getState() + ); - ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get(); - IndexMetadata indexMetadata = clusterStateResponse.getState().metadata().index(TEST_INDEX); - assertEquals(IndexMetadata.State.CLOSE, indexMetadata.getState()); - - OpenIndexResponse openIndexResponse = client().admin().indices().prepareOpen(TEST_INDEX).get(); - - assertTrue("Open operation should be acknowledged", openIndexResponse.isAcknowledged()); - assertTrue("Open operation shards should be acknowledged", openIndexResponse.isShardsAcknowledged()); - - clusterStateResponse = client().admin().cluster().prepareState().get(); - indexMetadata = clusterStateResponse.getState().metadata().index(TEST_INDEX); - assertEquals(IndexMetadata.State.OPEN, indexMetadata.getState()); - - settingsResponse = client().admin().indices().prepareGetSettings(TEST_INDEX).get(); - assertTrue(settingsResponse.getSetting(TEST_INDEX, IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey()).equals("true")); + assertAcked(client().admin().indices().prepareOpen(TEST_INDEX).get()); + ensureGreen(TEST_INDEX); assertBusy(() -> { SearchResponse searchResponse = client().prepareSearch(TEST_INDEX).get(); From 51a217aae41440159904732f87c9ba422d94ae96 Mon Sep 17 00:00:00 2001 From: Prudhvi Godithi Date: Wed, 9 Apr 2025 14:09:51 -0700 Subject: [PATCH 180/550] Adjust the `getAutoExpandReplicaChanges` logic to honor `search_only` mode. (#17865) * Adjust auto expand replica logic Signed-off-by: Prudhvi Godithi * Add code comments Signed-off-by: Prudhvi Godithi * Add IT test Signed-off-by: Prudhvi Godithi * Fix spotless Signed-off-by: Prudhvi Godithi * Fix spotless Signed-off-by: Prudhvi Godithi --------- Signed-off-by: Prudhvi Godithi --- .../settings/UpdateNumberOfReplicasIT.java | 45 +++++++++ .../cluster/metadata/AutoExpandReplicas.java | 4 + .../metadata/AutoExpandReplicasTests.java | 94 ++++++++++++++++++- 3 files changed, 142 insertions(+), 1 deletion(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/settings/UpdateNumberOfReplicasIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/settings/UpdateNumberOfReplicasIT.java index c73168ec6ad17..6c40417bda430 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/settings/UpdateNumberOfReplicasIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/settings/UpdateNumberOfReplicasIT.java @@ -739,4 +739,49 @@ public void testAwarenessReplicaBalanceWithUseZoneForDefaultReplicaCount() { } } + public void testSkipSearchOnlyIndexForAutoExpandReplicasIT() throws Exception { + final String TEST_INDEX = "test"; + + // Create index with auto expand replicas + assertAcked(prepareCreate(TEST_INDEX, 2, Settings.builder().put("auto_expand_replicas", "0-all"))); + + int initialReplicas = client().admin().cluster().prepareState().get().getState().metadata().index(TEST_INDEX).getNumberOfReplicas(); + assertEquals(1, initialReplicas); + + // This adds 2 data nodes + allowNodes(TEST_INDEX, 3); + ensureGreen(TEST_INDEX); + + // Verify replicas were expanded + int afterExpansionReplicas = client().admin() + .cluster() + .prepareState() + .get() + .getState() + .metadata() + .index(TEST_INDEX) + .getNumberOfReplicas(); + + assertEquals(2, afterExpansionReplicas); + + // Add the search_only block + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(TEST_INDEX) + .setSettings(Settings.builder().put(IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey(), true)) + .execute() + .actionGet() + ); + // This adds 3 data nodes + allowNodes(TEST_INDEX, 4); + ensureGreen(TEST_INDEX); + + // Verify same replicas + int finalReplicas = client().admin().cluster().prepareState().get().getState().metadata().index(TEST_INDEX).getNumberOfReplicas(); + + // Assert that replica count didn't change after enabling search-only mode + assertEquals("Replica count should not change for search_only index", afterExpansionReplicas, finalReplicas); + } + } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/AutoExpandReplicas.java b/server/src/main/java/org/opensearch/cluster/metadata/AutoExpandReplicas.java index d9a3d7bf8eb3f..bfc474bc75a53 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/AutoExpandReplicas.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/AutoExpandReplicas.java @@ -182,6 +182,10 @@ public static Map> getAutoExpandReplicaChanges(Metadata me for (final IndexMetadata indexMetadata : metadata) { if (indexMetadata.getState() == IndexMetadata.State.OPEN || isIndexVerifiedBeforeClosed(indexMetadata)) { + // Skip the replica auto-expansion for indices in search_only mode with the SEARCH_ONLY block + if (indexMetadata.getSettings().getAsBoolean(IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey(), false)) { + continue; + } AutoExpandReplicas autoExpandReplicas = SETTING.get(indexMetadata.getSettings()); autoExpandReplicas.getDesiredNumberOfReplicas(indexMetadata, allocation).ifPresent(numberOfReplicas -> { if (numberOfReplicas != indexMetadata.getNumberOfReplicas()) { diff --git a/server/src/test/java/org/opensearch/cluster/metadata/AutoExpandReplicasTests.java b/server/src/test/java/org/opensearch/cluster/metadata/AutoExpandReplicasTests.java index 3c1bcf8449458..ca59f1af13279 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/AutoExpandReplicasTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/AutoExpandReplicasTests.java @@ -43,6 +43,9 @@ import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.cluster.routing.allocation.RoutingAllocation; +import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.opensearch.cluster.routing.allocation.decider.Decision; import org.opensearch.common.settings.Settings; import org.opensearch.indices.cluster.ClusterStateChanges; import org.opensearch.test.OpenSearchTestCase; @@ -54,11 +57,13 @@ import java.util.Collections; import java.util.List; import java.util.Locale; +import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; @@ -136,7 +141,7 @@ protected DiscoveryNode createNode(DiscoveryNodeRole... mustHaveRoles) { * Instead, one of the replicas on the live nodes first gets promoted to primary, and the auto-expansion (removing replicas) only * triggers in a follow-up step. */ - public void testAutoExpandWhenNodeLeavesAndPossiblyRejoins() throws InterruptedException { + public void testAutoExpandWhenNodeLeavesAndPossiblyRejoins() { final ThreadPool threadPool = new TestThreadPool(getClass().getName()); final ClusterStateChanges cluster = new ClusterStateChanges(xContentRegistry(), threadPool); @@ -297,4 +302,91 @@ public void testOnlyAutoExpandAllocationFilteringAfterAllNodesUpgraded() { terminate(threadPool); } } + + public void testSkipSearchOnlyIndexForAutoExpandReplicas() { + final ThreadPool threadPool = new TestThreadPool(getClass().getName()); + final ClusterStateChanges cluster = new ClusterStateChanges(xContentRegistry(), threadPool); + + try { + DiscoveryNode localNode = createNode(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE, DiscoveryNodeRole.DATA_ROLE); + ClusterState state = ClusterStateCreationUtils.state(localNode, localNode, localNode); + + state = cluster.createIndex( + state, + new CreateIndexRequest( + "search-only-index", + Settings.builder() + .put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .put(SETTING_AUTO_EXPAND_REPLICAS, "0-all") + .put(IndexMetadata.INDEX_BLOCKS_SEARCH_ONLY_SETTING.getKey(), true) + .build() + ).waitForActiveShards(ActiveShardCount.NONE) + ); + + state = cluster.createIndex( + state, + new CreateIndexRequest( + "regular-index", + Settings.builder() + .put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .put(SETTING_AUTO_EXPAND_REPLICAS, "0-all") + .build() + ).waitForActiveShards(ActiveShardCount.NONE) + ); + + List additionalNodes = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + additionalNodes.add(createNode(DiscoveryNodeRole.DATA_ROLE)); + } + state = cluster.addNodes(state, additionalNodes); + + while (state.routingTable().shardsWithState(ShardRoutingState.INITIALIZING).isEmpty() == false + || state.routingTable().shardsWithState(ShardRoutingState.UNASSIGNED).isEmpty() == false) { + state = cluster.applyStartedShards(state, state.routingTable().shardsWithState(ShardRoutingState.INITIALIZING)); + state = cluster.reroute(state, new ClusterRerouteRequest()); + } + + assertEquals(3, state.metadata().index("regular-index").getNumberOfReplicas()); + assertEquals(0, state.metadata().index("search-only-index").getNumberOfReplicas()); + + AllocationDeciders allocationDeciders = new AllocationDeciders(Collections.emptyList()) { + @Override + public Decision shouldAutoExpandToNode(IndexMetadata indexMetadata, DiscoveryNode node, RoutingAllocation allocation) { + return Decision.YES; + } + }; + RoutingAllocation allocation = new RoutingAllocation( + allocationDeciders, + state.getRoutingNodes(), + state, + null, + null, + System.nanoTime() + ); + + // To force the auto expand scenario as the expand might have already triggered upon adding a new node. + Metadata.Builder metadataBuilder = Metadata.builder(state.metadata()); + IndexMetadata originalMeta = state.metadata().index("regular-index"); + IndexMetadata.Builder indexMetaBuilder = IndexMetadata.builder(originalMeta); + indexMetaBuilder.numberOfReplicas(0); + metadataBuilder.put(indexMetaBuilder); + + Map> changes = AutoExpandReplicas.getAutoExpandReplicaChanges(metadataBuilder.build(), allocation); + + assertFalse( + "Search-only index should not be auto-expanded", + changes.values().stream().anyMatch(indices -> indices.contains("search-only-index")) + ); + + assertTrue( + "Regular index should be auto-expanded", + changes.values().stream().anyMatch(indices -> indices.contains("regular-index")) + ); + + } finally { + terminate(threadPool); + } + } } From cfe7bf6ec7031beaa39f43056d2e14ffff3484d6 Mon Sep 17 00:00:00 2001 From: Craig Perkins Date: Wed, 9 Apr 2025 17:11:59 -0400 Subject: [PATCH 181/550] Fix java.lang.NoClassDefFoundError: org/opensearch/secure_sm/SecureSM (#17867) Signed-off-by: Craig Perkins --- .../main/java/org/opensearch/bootstrap/BootstrapForTesting.java | 1 - 1 file changed, 1 deletion(-) diff --git a/test/framework/src/main/java/org/opensearch/bootstrap/BootstrapForTesting.java b/test/framework/src/main/java/org/opensearch/bootstrap/BootstrapForTesting.java index e67b5539bb690..0cbffc7c6949b 100644 --- a/test/framework/src/main/java/org/opensearch/bootstrap/BootstrapForTesting.java +++ b/test/framework/src/main/java/org/opensearch/bootstrap/BootstrapForTesting.java @@ -171,7 +171,6 @@ public class BootstrapForTesting { // intellij and eclipse don't package our internal libs, so we need to set the codebases for them manually addClassCodebase(codebases, "plugin-classloader", "org.opensearch.plugins.ExtendedPluginsClassLoader"); addClassCodebase(codebases, "opensearch-nio", "org.opensearch.nio.ChannelFactory"); - addClassCodebase(codebases, "opensearch-secure-sm", "org.opensearch.secure_sm.SecureSM"); addClassCodebase(codebases, "opensearch-rest-client", "org.opensearch.client.RestClient"); } final Policy testFramework = Security.readPolicy(Bootstrap.class.getResource("test-framework.policy"), codebases); From c4b1066ea6bd11d26ccfa2c8e077acfd6d46b06a Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Wed, 9 Apr 2025 17:12:23 -0400 Subject: [PATCH 182/550] Fix :libs:agent-sm:agent publication (#17869) Signed-off-by: Andriy Redko --- libs/agent-sm/agent/build.gradle | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/libs/agent-sm/agent/build.gradle b/libs/agent-sm/agent/build.gradle index 7383af76fd3bc..b06a77b2fcf5e 100644 --- a/libs/agent-sm/agent/build.gradle +++ b/libs/agent-sm/agent/build.gradle @@ -63,6 +63,10 @@ thirdPartyAudit { ) } +tasks.named('generateMetadataFileForNebulaPublication') { + dependsOn prepareAgent +} + tasks.named('validateNebulaPom') { dependsOn prepareAgent } From b7dca4aced6719f4b5b909a81388d62292deb934 Mon Sep 17 00:00:00 2001 From: Karen X Date: Wed, 9 Apr 2025 18:14:06 -0400 Subject: [PATCH 183/550] [GRPC] SearchService and Search GRPC endpoint v1 (#17830) Signed-off-by: Karen Xu Signed-off-by: Andrew Ross Co-authored-by: Andrew Ross --- CHANGELOG.md | 1 + plugins/transport-grpc/build.gradle | 2 +- .../licenses/protobufs-0.1.0.jar.sha1 | 1 - .../licenses/protobufs-0.2.0.jar.sha1 | 1 + .../plugin/transport/grpc/GrpcPlugin.java | 3 +- .../listeners/BulkRequestActionListener.java | 2 +- .../SearchRequestActionListener.java | 56 ++ .../common/FetchSourceContextProtoUtils.java | 43 +- .../request/common/ObjectMapProtoUtils.java | 2 +- .../request/common/ScriptProtoUtils.java | 27 +- .../bulk/ActiveShardCountProtoUtils.java | 14 +- .../search/CollapseBuilderProtoUtils.java | 51 ++ .../search/FieldAndFormatProtoUtils.java | 39 ++ .../search/HighlightBuilderProtoUtils.java | 45 ++ .../search/IndicesOptionsProtoUtils.java | 134 ++++ .../search/InnerHitsBuilderProtoUtils.java | 116 ++++ .../request/search/OperatorProtoUtils.java | 39 ++ .../search/PointInTimeBuilderProtoUtils.java | 44 ++ .../search/ProtoActionsProtoUtils.java | 50 ++ .../search/RescorerBuilderProtoUtils.java | 46 ++ .../search/SearchAfterBuilderProtoUtils.java | 69 ++ .../search/SearchRequestProtoUtils.java | 389 +++++++++++ .../search/SearchSourceBuilderProtoUtils.java | 237 +++++++ .../request/search/SearchTypeProtoUtils.java | 48 ++ .../search/SliceBuilderProtoUtils.java | 40 ++ .../search/StoredFieldsContextProtoUtils.java | 55 ++ .../proto/request/search/package-info.java | 15 + .../query/AbstractQueryBuilderProtoUtils.java | 51 ++ .../query/MatchAllQueryBuilderProtoUtils.java | 45 ++ .../MatchNoneQueryBuilderProtoUtils.java | 47 ++ .../query/TermQueryBuilderProtoUtils.java | 116 ++++ .../request/search/query/package-info.java | 15 + .../sort/FieldSortBuilderProtoUtils.java | 52 ++ .../search/sort/SortBuilderProtoUtils.java | 113 ++++ .../search/sort/SortOrderProtoUtils.java | 89 +++ .../request/search/sort/package-info.java | 15 + .../suggest/SuggestBuilderProtoUtils.java | 37 ++ .../TermSuggestionBuilderProtoUtils.java | 44 ++ .../request/search/suggest/package-info.java | 15 + .../response/common/FieldValueProtoUtils.java | 71 ++ .../bulk/BulkItemResponseProtoUtils.java | 2 +- .../document/common/ShardInfoProtoUtils.java | 2 +- .../OpenSearchExceptionProtoUtils.java | 6 +- .../opensearchexception/package-info.java | 15 + ...ardOperationFailedExceptionProtoUtils.java | 118 ++++ ...ionResponseShardInfoFailureProtoUtils.java | 48 ++ ...ardOperationFailedExceptionProtoUtils.java | 52 ++ .../ShardSearchFailureProtoUtils.java | 44 ++ .../SnapshotShardFailureProtoUtils.java | 42 ++ .../package-info.java | 15 + .../search/HighlightFieldProtoUtils.java | 41 ++ .../search/ProtoActionsProtoUtils.java | 54 ++ .../response/search/SearchHitProtoUtils.java | 219 +++++++ .../response/search/SearchHitsProtoUtils.java | 76 +++ .../search/SearchResponseProtoUtils.java | 186 ++++++ .../SearchResponseSectionsProtoUtils.java | 66 ++ .../search/SearchSortValuesProtoUtils.java | 40 ++ .../search/ShardStatisticsProtoUtils.java | 66 ++ .../proto/response/search/package-info.java | 15 + .../grpc/services/SearchServiceImpl.java | 62 ++ .../SearchRequestActionListenerTests.java | 78 +++ .../FetchSourceContextProtoUtilsTests.java | 178 +++++ .../request/common/ScriptProtoUtilsTests.java | 5 +- .../CollapseBuilderProtoUtilsTests.java | 106 +++ .../search/IndicesOptionsProtoUtilsTests.java | 292 +++++++++ .../InnerHitsBuilderProtoUtilsTests.java | 242 +++++++ .../search/OperatorProtoUtilsTests.java | 49 ++ .../PointInTimeBuilderProtoUtilsTests.java | 80 +++ .../search/ScriptFieldProtoUtilsTests.java | 213 ++++++ .../SearchAfterBuilderProtoUtilsTests.java | 162 +++++ .../search/SearchRequestProtoUtilsTests.java | 424 ++++++++++++ .../SearchSourceBuilderProtoUtilsTests.java | 607 ++++++++++++++++++ .../search/SortBuilderProtoUtilsTests.java | 28 + .../StoredFieldsContextProtoUtilsTests.java | 129 ++++ .../MatchAllQueryBuilderProtoUtilsTests.java | 73 +++ .../MatchNoneQueryBuilderProtoUtilsTests.java | 73 +++ .../TermQueryBuilderProtoUtilsTests.java | 306 +++++++++ .../sort/FieldSortBuilderProtoUtilsTests.java | 112 ++++ .../search/sort/SortOrderProtoUtilsTests.java | 89 +++ .../SuggestBuilderProtoUtilsTests.java | 27 + .../TermSuggestionBuilderProtoUtilsTests.java | 51 ++ .../common/FieldValueProtoUtilsTests.java | 192 ++++++ .../OpenSearchExceptionProtoUtilsTests.java | 3 +- ...erationFailedExceptionProtoUtilsTests.java | 125 ++++ ...erationFailedExceptionProtoUtilsTests.java | 122 ++++ .../search/HighlightFieldProtoUtilsTests.java | 61 ++ ...earchHitNestedIdentityProtoUtilsTests.java | 96 +++ .../search/SearchHitProtoUtilsTests.java | 253 ++++++++ .../search/SearchHitsProtoUtilsTests.java | 138 ++++ .../search/SearchResponseProtoUtilsTests.java | 282 ++++++++ .../grpc/services/SearchServiceImplTests.java | 73 +++ .../action/search/SearchResponse.java | 4 + .../org/opensearch/index/query/Operator.java | 2 +- .../java/org/opensearch/search/SearchHit.java | 28 + 94 files changed, 8124 insertions(+), 27 deletions(-) delete mode 100644 plugins/transport-grpc/licenses/protobufs-0.1.0.jar.sha1 create mode 100644 plugins/transport-grpc/licenses/protobufs-0.2.0.jar.sha1 create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/listeners/SearchRequestActionListener.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/CollapseBuilderProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/FieldAndFormatProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/HighlightBuilderProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/IndicesOptionsProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/InnerHitsBuilderProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/OperatorProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/PointInTimeBuilderProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/ProtoActionsProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/RescorerBuilderProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/SearchAfterBuilderProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/SearchRequestProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/SearchSourceBuilderProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/SearchTypeProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/SliceBuilderProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/StoredFieldsContextProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/package-info.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/AbstractQueryBuilderProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/MatchAllQueryBuilderProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/MatchNoneQueryBuilderProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/TermQueryBuilderProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/package-info.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/sort/FieldSortBuilderProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/sort/SortBuilderProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/sort/SortOrderProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/sort/package-info.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/suggest/SuggestBuilderProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/suggest/TermSuggestionBuilderProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/suggest/package-info.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/common/FieldValueProtoUtils.java rename plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/{common => exceptions/opensearchexception}/OpenSearchExceptionProtoUtils.java (97%) create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/opensearchexception/package-info.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/shardoperationfailedexception/DefaultShardOperationFailedExceptionProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/shardoperationfailedexception/ReplicationResponseShardInfoFailureProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/shardoperationfailedexception/ShardOperationFailedExceptionProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/shardoperationfailedexception/ShardSearchFailureProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/shardoperationfailedexception/SnapshotShardFailureProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/shardoperationfailedexception/package-info.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/HighlightFieldProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/ProtoActionsProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/SearchHitProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/SearchHitsProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/SearchResponseProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/SearchResponseSectionsProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/SearchSortValuesProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/ShardStatisticsProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/package-info.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/services/SearchServiceImpl.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/listeners/SearchRequestActionListenerTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/CollapseBuilderProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/IndicesOptionsProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/InnerHitsBuilderProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/OperatorProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/PointInTimeBuilderProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/ScriptFieldProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/SearchAfterBuilderProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/SearchRequestProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/SearchSourceBuilderProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/SortBuilderProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/StoredFieldsContextProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/MatchAllQueryBuilderProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/MatchNoneQueryBuilderProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/TermQueryBuilderProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/sort/FieldSortBuilderProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/sort/SortOrderProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/suggest/SuggestBuilderProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/suggest/TermSuggestionBuilderProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/common/FieldValueProtoUtilsTests.java rename plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/{common => exceptions}/OpenSearchExceptionProtoUtilsTests.java (99%) create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/shardoperationfailedexception/DefaultShardOperationFailedExceptionProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/shardoperationfailedexception/ShardOperationFailedExceptionProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/search/HighlightFieldProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/search/SearchHitNestedIdentityProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/search/SearchHitProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/search/SearchHitsProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/search/SearchResponseProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/services/SearchServiceImplTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 455184632ea35..c764c495051b6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,6 +30,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Star Tree] [Search] Resolving numeric range aggregation with metric aggregation using star-tree ([#17273](https://github.com/opensearch-project/OpenSearch/pull/17273)) - Added Search Only strict routing setting ([#17803](https://github.com/opensearch-project/OpenSearch/pull/17803)) - Disable the index API for ingestion engine ([#17768](https://github.com/opensearch-project/OpenSearch/pull/17768)) +- Add SearchService and Search GRPC endpoint ([#17830](https://github.com/opensearch-project/OpenSearch/pull/17830)) - Add update and delete support in pull-based ingestion ([#17822](https://github.com/opensearch-project/OpenSearch/pull/17822)) ### Changed diff --git a/plugins/transport-grpc/build.gradle b/plugins/transport-grpc/build.gradle index 2db49c237b75f..3beed0ddc1bb0 100644 --- a/plugins/transport-grpc/build.gradle +++ b/plugins/transport-grpc/build.gradle @@ -29,7 +29,7 @@ dependencies { implementation "io.grpc:grpc-stub:${versions.grpc}" implementation "io.grpc:grpc-util:${versions.grpc}" implementation "io.perfmark:perfmark-api:0.26.0" - implementation "org.opensearch:protobufs:0.1.0" + implementation "org.opensearch:protobufs:0.2.0" } tasks.named("dependencyLicenses").configure { diff --git a/plugins/transport-grpc/licenses/protobufs-0.1.0.jar.sha1 b/plugins/transport-grpc/licenses/protobufs-0.1.0.jar.sha1 deleted file mode 100644 index 8eec0266319df..0000000000000 --- a/plugins/transport-grpc/licenses/protobufs-0.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -48ba4377d529cf71ec7d8716e155b928118325d5 \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/protobufs-0.2.0.jar.sha1 b/plugins/transport-grpc/licenses/protobufs-0.2.0.jar.sha1 new file mode 100644 index 0000000000000..3fe6d0cdbabd4 --- /dev/null +++ b/plugins/transport-grpc/licenses/protobufs-0.2.0.jar.sha1 @@ -0,0 +1 @@ +a29095657b4a0f9b59659d71e7e540e9b07fd044 \ No newline at end of file diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/GrpcPlugin.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/GrpcPlugin.java index 257b252ca23d1..26e9721da4f44 100644 --- a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/GrpcPlugin.java +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/GrpcPlugin.java @@ -19,6 +19,7 @@ import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; import org.opensearch.plugin.transport.grpc.services.DocumentServiceImpl; +import org.opensearch.plugin.transport.grpc.services.SearchServiceImpl; import org.opensearch.plugins.NetworkPlugin; import org.opensearch.plugins.Plugin; import org.opensearch.repositories.RepositoriesService; @@ -80,7 +81,7 @@ public Map> getAuxTransports( if (client == null) { throw new RuntimeException("client cannot be null"); } - List grpcServices = registerGRPCServices(new DocumentServiceImpl(client)); + List grpcServices = registerGRPCServices(new DocumentServiceImpl(client), new SearchServiceImpl(client)); return Collections.singletonMap( GRPC_TRANSPORT_SETTING_KEY, () -> new Netty4GrpcServerTransport(settings, grpcServices, networkService) diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/listeners/BulkRequestActionListener.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/listeners/BulkRequestActionListener.java index 589e15d2db489..fa7bbaf94c574 100644 --- a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/listeners/BulkRequestActionListener.java +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/listeners/BulkRequestActionListener.java @@ -23,7 +23,7 @@ */ public class BulkRequestActionListener implements ActionListener { private static final Logger logger = LogManager.getLogger(BulkRequestActionListener.class); - private StreamObserver responseObserver; + private final StreamObserver responseObserver; /** * Creates a new BulkRequestActionListener. diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/listeners/SearchRequestActionListener.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/listeners/SearchRequestActionListener.java new file mode 100644 index 0000000000000..bcdfaa0833a99 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/listeners/SearchRequestActionListener.java @@ -0,0 +1,56 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.plugin.transport.grpc.listeners; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.core.action.ActionListener; +import org.opensearch.plugin.transport.grpc.proto.response.search.SearchResponseProtoUtils; + +import java.io.IOException; + +import io.grpc.stub.StreamObserver; + +/** + * Listener for search request execution completion, handling successful and failure scenarios. + */ +public class SearchRequestActionListener implements ActionListener { + private static final Logger logger = LogManager.getLogger(SearchRequestActionListener.class); + + private final StreamObserver responseObserver; + + /** + * Constructs a new SearchRequestActionListener. + * + * @param responseObserver the gRPC stream observer to send the search response to + */ + public SearchRequestActionListener(StreamObserver responseObserver) { + super(); + this.responseObserver = responseObserver; + } + + @Override + public void onResponse(SearchResponse response) { + // Search execution succeeded. Convert the opensearch internal response to protobuf + try { + org.opensearch.protobufs.SearchResponse protoResponse = SearchResponseProtoUtils.toProto(response); + responseObserver.onNext(protoResponse); + responseObserver.onCompleted(); + } catch (RuntimeException | IOException e) { + responseObserver.onError(e); + } + } + + @Override + public void onFailure(Exception e) { + logger.error("SearchRequestActionListener failed to process search request:" + e.getMessage()); + responseObserver.onError(e); + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/common/FetchSourceContextProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/common/FetchSourceContextProtoUtils.java index d24e62ed09d99..1c289bff3235d 100644 --- a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/common/FetchSourceContextProtoUtils.java +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/common/FetchSourceContextProtoUtils.java @@ -34,8 +34,8 @@ private FetchSourceContextProtoUtils() { * Converts a SourceConfig Protocol Buffer to a FetchSourceContext object. * Similar to {@link FetchSourceContext#parseFromRestRequest(RestRequest)} * - * @param request - * @return + * @param request The BulkRequest Protocol Buffer containing source configuration + * @return A FetchSourceContext object based on the request parameters, or null if no source parameters are provided */ public static FetchSourceContext parseFromProtoRequest(org.opensearch.protobufs.BulkRequest request) { Boolean fetchSource = true; @@ -69,6 +69,42 @@ public static FetchSourceContext parseFromProtoRequest(org.opensearch.protobufs. return null; } + /** + * Converts a SourceConfig Protocol Buffer to a FetchSourceContext object. + * Similar to {@link FetchSourceContext#parseFromRestRequest(RestRequest)} + * + * @param request The SearchRequest Protocol Buffer containing source configuration + * @return A FetchSourceContext object based on the request parameters, or null if no source parameters are provided + */ + public static FetchSourceContext parseFromProtoRequest(org.opensearch.protobufs.SearchRequest request) { + Boolean fetchSource = null; + String[] sourceExcludes = null; + String[] sourceIncludes = null; + + if (request.hasSource()) { + SourceConfigParam source = request.getSource(); + + if (source.hasBoolValue()) { + fetchSource = source.getBoolValue(); + } else { + sourceIncludes = source.getStringArray().getStringArrayList().toArray(new String[0]); + } + } + + if (request.getSourceIncludesCount() > 0) { + sourceIncludes = request.getSourceIncludesList().toArray(new String[0]); + } + + if (request.getSourceExcludesCount() > 0) { + sourceExcludes = request.getSourceExcludesList().toArray(new String[0]); + } + + if (fetchSource != null || sourceIncludes != null || sourceExcludes != null) { + return new FetchSourceContext(fetchSource == null ? true : fetchSource, sourceIncludes, sourceExcludes); + } + return null; + } + /** * Converts a SourceConfig Protocol Buffer to a FetchSourceContext object. * Similar to {@link FetchSourceContext#fromXContent(XContentParser)}. @@ -96,7 +132,8 @@ public static FetchSourceContext fromProto(SourceConfig sourceConfig) { includesList.add(s); } includes = includesList.toArray(new String[0]); - } else if (!sourceFilter.getExcludesList().isEmpty()) { + } + if (!sourceFilter.getExcludesList().isEmpty()) { List excludesList = new ArrayList<>(); for (String s : sourceFilter.getExcludesList()) { excludesList.add(s); diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/common/ObjectMapProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/common/ObjectMapProtoUtils.java index 4fd8288fd8d63..8efc057ec4cf1 100644 --- a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/common/ObjectMapProtoUtils.java +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/common/ObjectMapProtoUtils.java @@ -49,7 +49,7 @@ public static Map fromProto(ObjectMap objectMap) { * @param value The generic protobuf ObjectMap.Value to convert * @return A Protobuf builder .google.protobuf.Struct representation */ - private static Object fromProto(ObjectMap.Value value) { + public static Object fromProto(ObjectMap.Value value) { if (value.hasNullValue()) { // Null throw new UnsupportedOperationException("Cannot add null value in ObjectMap.value " + value.toString() + " to a Java map."); diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/common/ScriptProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/common/ScriptProtoUtils.java index 408b548fa6fbf..f8d7abc8effbf 100644 --- a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/common/ScriptProtoUtils.java +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/common/ScriptProtoUtils.java @@ -48,6 +48,10 @@ public static Script parseFromProtoRequest(org.opensearch.protobufs.Script scrip /** * Converts a Script Protocol Buffer to a Script object. * Similar to {@link Script#parse(XContentParser, String)}, which internally calls Script#build(). + * + * @param script the Protocol Buffer Script to convert + * @param defaultLang the default script language to use if not specified + * @return the converted Script object */ private static Script parseFromProtoRequest(org.opensearch.protobufs.Script script, String defaultLang) { Objects.requireNonNull(defaultLang); @@ -63,8 +67,12 @@ private static Script parseFromProtoRequest(org.opensearch.protobufs.Script scri /** * Parses a protobuf InlineScript to a Script object + * + * @param inlineScript the Protocol Buffer InlineScript to convert + * @param defaultLang the default script language to use if not specified + * @return the converted Script object */ - private static Script parseInlineScript(InlineScript inlineScript, String defaultLang) { + public static Script parseInlineScript(InlineScript inlineScript, String defaultLang) { ScriptType type = ScriptType.INLINE; @@ -85,8 +93,11 @@ private static Script parseInlineScript(InlineScript inlineScript, String defaul /** * Parses a protobuf StoredScriptId to a Script object + * + * @param storedScriptId the Protocol Buffer StoredScriptId to convert + * @return the converted Script object */ - private static Script parseStoredScriptId(StoredScriptId storedScriptId) { + public static Script parseStoredScriptId(StoredScriptId storedScriptId) { ScriptType type = ScriptType.STORED; String lang = null; String idOrCode = storedScriptId.getId(); @@ -98,7 +109,15 @@ private static Script parseStoredScriptId(StoredScriptId storedScriptId) { return new Script(type, lang, idOrCode, options, params); } - private static String parseScriptLanguage(ScriptLanguage language, String defaultLang) { + /** + * Parses a protobuf ScriptLanguage to a String representation + * + * @param language the Protocol Buffer ScriptLanguage to convert + * @param defaultLang the default script language to use if not specified + * @return the string representation of the script language + * @throws UnsupportedOperationException if no language was specified + */ + public static String parseScriptLanguage(ScriptLanguage language, String defaultLang) { if (language.hasStringValue()) { return language.getStringValue(); } @@ -113,7 +132,7 @@ private static String parseScriptLanguage(ScriptLanguage language, String defaul return "painless"; case BUILTIN_SCRIPT_LANGUAGE_UNSPECIFIED: default: - throw new UnsupportedOperationException("no language was specified"); + return defaultLang; } } } diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/ActiveShardCountProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/ActiveShardCountProtoUtils.java index 56bc53ccbc422..df337be9f5e84 100644 --- a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/ActiveShardCountProtoUtils.java +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/ActiveShardCountProtoUtils.java @@ -13,7 +13,9 @@ import org.opensearch.protobufs.WaitForActiveShards; /** - * Handler for bulk requests in gRPC. + * Utility class for handling active shard count settings in gRPC bulk requests. + * This class provides methods to convert between Protocol Buffer representations + * and OpenSearch ActiveShardCount objects. */ public class ActiveShardCountProtoUtils { // protected final Settings settings; @@ -27,11 +29,13 @@ protected ActiveShardCountProtoUtils() { /** * Sets the active shard count on the bulk request based on the protobuf request. - * Similar to {@link ActiveShardCount#parseString(String)} + * Similar to {@link ActiveShardCount#parseString(String)}, this method interprets + * the wait_for_active_shards parameter from the Protocol Buffer request and applies + * the appropriate ActiveShardCount setting to the OpenSearch bulk request. * - * @param bulkRequest The bulk request to modify - * @param request The protobuf request containing the active shard count - * @return The modified bulk request + * @param bulkRequest The OpenSearch bulk request to modify + * @param request The Protocol Buffer request containing the active shard count settings + * @return The modified OpenSearch bulk request with updated active shard count settings */ public static org.opensearch.action.bulk.BulkRequest getActiveShardCount( org.opensearch.action.bulk.BulkRequest bulkRequest, diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/CollapseBuilderProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/CollapseBuilderProtoUtils.java new file mode 100644 index 0000000000000..adc43cef9294a --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/CollapseBuilderProtoUtils.java @@ -0,0 +1,51 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.request.search; + +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.protobufs.FieldCollapse; +import org.opensearch.search.collapse.CollapseBuilder; + +import java.io.IOException; + +/** + * Utility class for converting CollapseBuilder Protocol Buffers to OpenSearch objects. + * This class provides methods to transform Protocol Buffer representations of field collapse + * specifications into their corresponding OpenSearch CollapseBuilder implementations for + * search result field collapsing and grouping. + */ +public class CollapseBuilderProtoUtils { + + private CollapseBuilderProtoUtils() { + // Utility class, no instances + } + + /** + * Converts a Protocol Buffer FieldCollapse to an OpenSearch CollapseBuilder. + * Similar to {@link CollapseBuilder#fromXContent(XContentParser)}, this method + * parses the Protocol Buffer representation and creates a properly configured + * CollapseBuilder with the appropriate field, max concurrent group searches, + * and inner hits settings. + * + * @param collapseProto The Protocol Buffer FieldCollapse to convert + * @return A configured CollapseBuilder instance + * @throws IOException if there's an error during parsing or conversion + */ + protected static CollapseBuilder fromProto(FieldCollapse collapseProto) throws IOException { + CollapseBuilder collapseBuilder = new CollapseBuilder(collapseProto.getField()); + + if (collapseProto.hasMaxConcurrentGroupSearches()) { + collapseBuilder.setMaxConcurrentGroupRequests(collapseProto.getMaxConcurrentGroupSearches()); + } + if (collapseProto.getInnerHitsCount() > 0) { + collapseBuilder.setInnerHits(InnerHitsBuilderProtoUtils.fromProto(collapseProto.getInnerHitsList())); + } + + return collapseBuilder; + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/FieldAndFormatProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/FieldAndFormatProtoUtils.java new file mode 100644 index 0000000000000..e009537e40179 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/FieldAndFormatProtoUtils.java @@ -0,0 +1,39 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.request.search; + +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.search.fetch.subphase.FieldAndFormat; + +/** + * Utility class for converting FieldAndFormat Protocol Buffers to OpenSearch objects. + * This class provides methods to transform Protocol Buffer representations of field and format + * specifications into their corresponding OpenSearch FieldAndFormat implementations for search operations. + */ +public class FieldAndFormatProtoUtils { + + private FieldAndFormatProtoUtils() { + // Utility class, no instances + } + + /** + * Converts a Protocol Buffer FieldAndFormat to an OpenSearch FieldAndFormat object. + * Similar to {@link FieldAndFormat#fromXContent(XContentParser)}, this method + * parses the Protocol Buffer representation and creates a properly configured + * FieldAndFormat with the appropriate field name and format settings. + * + * @param fieldAndFormatProto The Protocol Buffer FieldAndFormat to convert + * @return A configured FieldAndFormat instance + */ + protected static FieldAndFormat fromProto(org.opensearch.protobufs.FieldAndFormat fieldAndFormatProto) { + + // TODO how is this field used? + // fieldAndFormatProto.getIncludeUnmapped(); + return new FieldAndFormat(fieldAndFormatProto.getField(), fieldAndFormatProto.getFormat()); + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/HighlightBuilderProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/HighlightBuilderProtoUtils.java new file mode 100644 index 0000000000000..f12b55db8870c --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/HighlightBuilderProtoUtils.java @@ -0,0 +1,45 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.request.search; + +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.protobufs.Highlight; +import org.opensearch.search.fetch.subphase.highlight.HighlightBuilder; + +/** + * Utility class for converting Highlight Protocol Buffers to OpenSearch objects. + * This class provides methods to transform Protocol Buffer representations of highlights + * into their corresponding OpenSearch HighlightBuilder implementations for search result highlighting. + */ +public class HighlightBuilderProtoUtils { + + private HighlightBuilderProtoUtils() { + // Utility class, no instances + } + + /** + * Converts a Protocol Buffer Highlight to an OpenSearch HighlightBuilder. + * Similar to {@link HighlightBuilder#fromXContent(XContentParser)}, this method + * would parse the Protocol Buffer representation and create a properly configured + * HighlightBuilder with the appropriate settings. + * + * @param highlightProto The Protocol Buffer Highlight to convert + * @return A configured HighlightBuilder instance + * @throws UnsupportedOperationException as highlight functionality is not yet implemented + */ + protected static HighlightBuilder fromProto(Highlight highlightProto) { + + throw new UnsupportedOperationException("highlight not supported yet"); + + /* + HighlightBuilder highlightBuilder = new HighlightBuilder(); + // TODO populate highlightBuilder + return highlightBuilder; + */ + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/IndicesOptionsProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/IndicesOptionsProtoUtils.java new file mode 100644 index 0000000000000..acf277bf7015d --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/IndicesOptionsProtoUtils.java @@ -0,0 +1,134 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.search; + +import org.opensearch.action.support.IndicesOptions; +import org.opensearch.protobufs.SearchRequest; +import org.opensearch.rest.RestRequest; + +import java.util.EnumSet; +import java.util.List; + +import static org.opensearch.action.support.IndicesOptions.WildcardStates.CLOSED; +import static org.opensearch.action.support.IndicesOptions.WildcardStates.HIDDEN; +import static org.opensearch.action.support.IndicesOptions.WildcardStates.OPEN; +import static org.opensearch.action.support.IndicesOptions.fromOptions; + +/** + * Utility class for converting IndicesOptions between OpenSearch and Protocol Buffers formats. + * This class provides methods to extract and transform indices options from Protocol Buffer requests + * to ensure proper handling of index wildcards, unavailable indices, and other index-related settings. + */ +public class IndicesOptionsProtoUtils { + + private IndicesOptionsProtoUtils() { + // Utility class, no instances + } + + /** + * Extracts indices options from a Protocol Buffer SearchRequest. + * Similar to {@link IndicesOptions#fromRequest(RestRequest, IndicesOptions)} + * + * @param request the Protocol Buffer SearchRequest to extract options from + * @param defaultSettings the default IndicesOptions to use if not specified in the request + * @return the IndicesOptions based on the request parameters + */ + protected static IndicesOptions fromRequest(org.opensearch.protobufs.SearchRequest request, IndicesOptions defaultSettings) { + return fromProtoParameters(request, defaultSettings); + } + + /** + * Creates IndicesOptions from Protocol Buffer SearchRequest parameters. + * Similar to {@link IndicesOptions#fromParameters(Object, Object, Object, Object, IndicesOptions)} + * + * @param request the Protocol Buffer SearchRequest to extract parameters from + * @param defaultSettings the default IndicesOptions to use if not specified in the request + * @return the IndicesOptions based on the request parameters + */ + protected static IndicesOptions fromProtoParameters(SearchRequest request, IndicesOptions defaultSettings) { + if (!(request.getExpandWildcardsCount() > 0) + && !request.hasIgnoreUnavailable() + && !request.hasAllowNoIndices() + && !request.hasIgnoreThrottled()) { + return defaultSettings; + } + + // TODO double check this works + EnumSet wildcards = parseProtoParameter( + request.getExpandWildcardsList(), + defaultSettings.getExpandWildcards() + ); + + // note that allowAliasesToMultipleIndices is not exposed, always true (only for internal use) + return fromOptions( + request.hasIgnoreUnavailable() ? request.getIgnoreUnavailable() : defaultSettings.ignoreUnavailable(), + request.hasAllowNoIndices() ? request.getAllowNoIndices() : defaultSettings.allowNoIndices(), + wildcards.contains(OPEN), + wildcards.contains(CLOSED), + wildcards.contains(IndicesOptions.WildcardStates.HIDDEN), + defaultSettings.allowAliasesToMultipleIndices(), + defaultSettings.forbidClosedIndices(), + defaultSettings.ignoreAliases(), + request.hasIgnoreThrottled() ? request.getIgnoreThrottled() : defaultSettings.ignoreThrottled() + ); + } + + /** + * Parses a list of ExpandWildcard values into an EnumSet of WildcardStates. + * Similar to {@link IndicesOptions.WildcardStates#parseParameter(Object, EnumSet)} + * + * @param wildcardList the list of ExpandWildcard values to parse + * @param defaultStates the default WildcardStates to use if the list is empty + * @return an EnumSet of WildcardStates based on the provided wildcardList + */ + protected static EnumSet parseProtoParameter( + List wildcardList, + EnumSet defaultStates + ) { + if (wildcardList.isEmpty()) { + return defaultStates; + } + + EnumSet states = EnumSet.noneOf(IndicesOptions.WildcardStates.class); + for (SearchRequest.ExpandWildcard wildcard : wildcardList) { + updateSetForValue(states, wildcard); + } + + return states; + } + + /** + * Updates an EnumSet of WildcardStates based on the provided ExpandWildcard value. + * Keep implementation consistent with {@link IndicesOptions.WildcardStates#updateSetForValue(EnumSet, String)} + * + * @param states the EnumSet of WildcardStates to update + * @param wildcard the ExpandWildcard value to use for updating the states + */ + protected static void updateSetForValue(EnumSet states, SearchRequest.ExpandWildcard wildcard) { + switch (wildcard) { + case EXPAND_WILDCARD_OPEN: + states.add(OPEN); + break; + case EXPAND_WILDCARD_CLOSED: + states.add(CLOSED); + break; + case EXPAND_WILDCARD_HIDDEN: + states.add(HIDDEN); + break; + case EXPAND_WILDCARD_NONE: + states.clear(); + break; + case EXPAND_WILDCARD_ALL: + states.addAll(EnumSet.allOf(IndicesOptions.WildcardStates.class)); + break; + default: + throw new IllegalArgumentException("No valid expand wildcard value [" + wildcard + "]"); + } + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/InnerHitsBuilderProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/InnerHitsBuilderProtoUtils.java new file mode 100644 index 0000000000000..5adbd1be99948 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/InnerHitsBuilderProtoUtils.java @@ -0,0 +1,116 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.request.search; + +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.index.query.InnerHitBuilder; +import org.opensearch.plugin.transport.grpc.proto.request.common.FetchSourceContextProtoUtils; +import org.opensearch.plugin.transport.grpc.proto.request.search.sort.SortBuilderProtoUtils; +import org.opensearch.protobufs.InnerHits; +import org.opensearch.protobufs.ScriptField; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.search.fetch.subphase.FieldAndFormat; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +/** + * Utility class for converting SearchSourceBuilder Protocol Buffers to objects + * + */ +public class InnerHitsBuilderProtoUtils { + + private InnerHitsBuilderProtoUtils() { + // Utility class, no instances + } + + /** + * Similar to {@link InnerHitBuilder#fromXContent(XContentParser)} + * + * @param innerHits + * @throws IOException if there's an error during parsing + */ + protected static InnerHitBuilder fromProto(List innerHits) throws IOException { + InnerHitBuilder innerHitBuilder = new InnerHitBuilder(); + + for (InnerHits innerHit : innerHits) { + if (innerHit.hasName()) { + innerHitBuilder.setName(innerHit.getName()); + } + if (innerHit.hasIgnoreUnmapped()) { + innerHitBuilder.setIgnoreUnmapped(innerHit.getIgnoreUnmapped()); + } + if (innerHit.hasFrom()) { + innerHitBuilder.setFrom(innerHit.getFrom()); + } + if (innerHit.hasSize()) { + innerHitBuilder.setSize(innerHit.getSize()); + } + if (innerHit.hasExplain()) { + innerHitBuilder.setExplain(innerHit.getExplain()); + } + if (innerHit.hasVersion()) { + innerHitBuilder.setVersion(innerHit.getVersion()); + } + if (innerHit.hasSeqNoPrimaryTerm()) { + innerHitBuilder.setSeqNoAndPrimaryTerm(innerHit.getSeqNoPrimaryTerm()); + } + if (innerHit.hasTrackScores()) { + innerHitBuilder.setTrackScores(innerHit.getTrackScores()); + } + if (innerHit.getStoredFieldsCount() > 0) { + innerHitBuilder.setStoredFieldNames(innerHit.getStoredFieldsList()); + } + if (innerHit.getDocvalueFieldsCount() > 0) { + List fieldAndFormatList = new ArrayList<>(); + for (org.opensearch.protobufs.FieldAndFormat fieldAndFormat : innerHit.getDocvalueFieldsList()) { + fieldAndFormatList.add(FieldAndFormatProtoUtils.fromProto(fieldAndFormat)); + } + innerHitBuilder.setDocValueFields(fieldAndFormatList); + } + if (innerHit.getFieldsCount() > 0) { + List fieldAndFormatList = new ArrayList<>(); + for (org.opensearch.protobufs.FieldAndFormat fieldAndFormat : innerHit.getFieldsList()) { + fieldAndFormatList.add(FieldAndFormatProtoUtils.fromProto(fieldAndFormat)); + } + innerHitBuilder.setFetchFields(fieldAndFormatList); + } + if (innerHit.getScriptFieldsCount() > 0) { + Set scriptFields = new HashSet<>(); + for (Map.Entry entry : innerHit.getScriptFieldsMap().entrySet()) { + String name = entry.getKey(); + ScriptField scriptFieldProto = entry.getValue(); + SearchSourceBuilder.ScriptField scriptField = SearchSourceBuilderProtoUtils.ScriptFieldProtoUtils.fromProto( + name, + scriptFieldProto + ); + scriptFields.add(scriptField); + } + innerHitBuilder.setScriptFields(scriptFields); + } + if (innerHit.getSortCount() > 0) { + innerHitBuilder.setSorts(SortBuilderProtoUtils.fromProto(innerHit.getSortList())); + } + if (innerHit.hasSource()) { + innerHitBuilder.setFetchSourceContext(FetchSourceContextProtoUtils.fromProto(innerHit.getSource())); + } + if (innerHit.hasHighlight()) { + innerHitBuilder.setHighlightBuilder(HighlightBuilderProtoUtils.fromProto(innerHit.getHighlight())); + } + if (innerHit.hasCollapse()) { + innerHitBuilder.setInnerCollapse(CollapseBuilderProtoUtils.fromProto(innerHit.getCollapse())); + } + } + return innerHitBuilder; + } + +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/OperatorProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/OperatorProtoUtils.java new file mode 100644 index 0000000000000..8a71113201769 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/OperatorProtoUtils.java @@ -0,0 +1,39 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.search; + +import org.opensearch.index.query.Operator; + +/** + * Utility class for converting Operator enums between OpenSearch and Protocol Buffers formats. + * This class provides methods to transform operator values to ensure proper query behavior + * in search operations. + */ +public class OperatorProtoUtils { + private OperatorProtoUtils() { + // Utility class, no instances + } + + /** + * Similar to {@link Operator#fromString(String)} + * + * @param op + * @return + */ + protected static Operator fromEnum(org.opensearch.protobufs.SearchRequest.Operator op) { + switch (op) { + case OPERATOR_AND: + return Operator.AND; + case OPERATOR_OR: + return Operator.OR; + default: + throw Operator.newOperatorException(op.toString()); + } + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/PointInTimeBuilderProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/PointInTimeBuilderProtoUtils.java new file mode 100644 index 0000000000000..254f196b65bc9 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/PointInTimeBuilderProtoUtils.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.request.search; + +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.protobufs.PointInTimeReference; +import org.opensearch.search.builder.PointInTimeBuilder; + +import static org.opensearch.search.builder.SearchSourceBuilder.POINT_IN_TIME; + +/** + * Utility class for converting PointInTimeBuilder Protocol Buffers to OpenSearch objects. + * This class provides methods to transform Protocol Buffer representations of point-in-time + * references into their corresponding OpenSearch PointInTimeBuilder implementations for + * search operations with consistent snapshots. + */ +public class PointInTimeBuilderProtoUtils { + + private PointInTimeBuilderProtoUtils() { + // Utility class, no instances + } + + /** + * Converts a Protocol Buffer PointInTimeReference to an OpenSearch PointInTimeBuilder. + * Similar to {@link PointInTimeBuilder#fromXContent(XContentParser)}, this method + * parses the Protocol Buffer representation and creates a properly configured + * PointInTimeBuilder with the appropriate ID and keep-alive settings. + * + * @param pointInTime The Protocol Buffer PointInTimeReference to convert + * @return A configured PointInTimeBuilder instance + */ + protected static PointInTimeBuilder fromProto(PointInTimeReference pointInTime) { + + TimeValue keepAlive = TimeValue.parseTimeValue(pointInTime.getKeepAlive(), null, POINT_IN_TIME.getPreferredName()); + return new PointInTimeBuilder(pointInTime.getId()).setKeepAlive(keepAlive); + } + +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/ProtoActionsProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/ProtoActionsProtoUtils.java new file mode 100644 index 0000000000000..45f5367d1fe05 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/ProtoActionsProtoUtils.java @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.search; + +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.index.query.QueryStringQueryBuilder; +import org.opensearch.protobufs.SearchRequest; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestActions; + +/** + * Utility class for converting REST-like actions between OpenSearch and Protocol Buffers formats. + * This class provides methods to transform URL parameters from Protocol Buffer requests into + * query builders and other OpenSearch constructs. + */ +public class ProtoActionsProtoUtils { + + private ProtoActionsProtoUtils() { + // Utility class, no instances + } + + /** + * Similar to {@link RestActions#urlParamsToQueryBuilder(RestRequest)} + * + * @param request + * @return + */ + protected static QueryBuilder urlParamsToQueryBuilder(SearchRequest request) { + if (!request.hasQ()) { + return null; + } + + QueryStringQueryBuilder queryBuilder = QueryBuilders.queryStringQuery(request.getQ()); + queryBuilder.defaultField(request.hasDf() ? request.getDf() : null); + queryBuilder.analyzer(request.hasAnalyzer() ? request.getAnalyzer() : null); + queryBuilder.analyzeWildcard(request.hasAnalyzeWildcard() ? request.getAnalyzeWildcard() : false); + queryBuilder.lenient(request.hasLenient() ? request.getLenient() : null); + if (request.hasDefaultOperator()) { + queryBuilder.defaultOperator(OperatorProtoUtils.fromEnum(request.getDefaultOperator())); + } + return queryBuilder; + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/RescorerBuilderProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/RescorerBuilderProtoUtils.java new file mode 100644 index 0000000000000..38f22f05a94e9 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/RescorerBuilderProtoUtils.java @@ -0,0 +1,46 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.request.search; + +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.protobufs.Rescore; +import org.opensearch.search.rescore.RescorerBuilder; + +/** + * Utility class for converting Rescore Protocol Buffers to OpenSearch objects. + * This class provides methods to transform Protocol Buffer representations of rescorers + * into their corresponding OpenSearch RescorerBuilder implementations for search result rescoring. + */ +public class RescorerBuilderProtoUtils { + + private RescorerBuilderProtoUtils() { + // Utility class, no instances + } + + /** + * Converts a Protocol Buffer Rescore to an OpenSearch RescorerBuilder. + * Similar to {@link RescorerBuilder#parseFromXContent(XContentParser)}, this method + * would parse the Protocol Buffer representation and create a properly configured + * RescorerBuilder with the appropriate settings. + * + * @param rescoreProto The Protocol Buffer Rescore to convert + * @return A configured RescorerBuilder instance + * @throws UnsupportedOperationException as rescore functionality is not yet implemented + */ + protected static RescorerBuilder parseFromProto(Rescore rescoreProto) { + throw new UnsupportedOperationException("rescore is not supported yet"); + /* + RescorerBuilder rescorer = null; + // TODO populate rescorerBuilder + + return rescorer; + + */ + } + +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/SearchAfterBuilderProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/SearchAfterBuilderProtoUtils.java new file mode 100644 index 0000000000000..4e58ca7527db5 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/SearchAfterBuilderProtoUtils.java @@ -0,0 +1,69 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.request.search; + +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.protobufs.FieldValue; +import org.opensearch.protobufs.GeneralNumber; +import org.opensearch.search.searchafter.SearchAfterBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +/** + * Utility class for converting SearchAfterBuilder Protocol Buffers to OpenSearch objects. + * This class provides methods to transform Protocol Buffer representations of search_after + * values into their corresponding OpenSearch object arrays for pagination in search operations. + */ +public class SearchAfterBuilderProtoUtils { + + private SearchAfterBuilderProtoUtils() { + // Utility class, no instances + } + + /** + * Converts a list of Protocol Buffer FieldValue objects to an array of Java objects + * that can be used for search_after pagination. + * Similar to {@link SearchAfterBuilder#fromXContent(XContentParser)}, this method + * parses the Protocol Buffer representation and creates an array of values + * that can be used for search_after pagination. + * + * @param searchAfterProto The list of Protocol Buffer FieldValue objects to convert + * @return An array of Java objects representing the search_after values + * @throws IOException if there's an error during parsing or conversion + */ + protected static Object[] fromProto(List searchAfterProto) throws IOException { + List values = new ArrayList<>(); + + for (FieldValue fieldValue : searchAfterProto) { + if (fieldValue.hasGeneralNumber()) { + GeneralNumber generalNumber = fieldValue.getGeneralNumber(); + if (generalNumber.hasInt32Value()) { + values.add(generalNumber.getInt32Value()); + } else if (generalNumber.hasInt64Value()) { + values.add(generalNumber.getInt64Value()); + } else if (generalNumber.hasDoubleValue()) { + values.add(generalNumber.getDoubleValue()); + } else if (generalNumber.hasFloatValue()) { + values.add(generalNumber.getFloatValue()); + } + } else if (fieldValue.hasStringValue()) { + values.add(fieldValue.getStringValue()); + } else if (fieldValue.hasBoolValue()) { + values.add(fieldValue.getBoolValue()); + } + // TODO missing null value + // else if(fieldValue.hasNullValue ()){ + // values.add(fieldValue.getNullValue()); + // } + } + return values.toArray(); + } + +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/SearchRequestProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/SearchRequestProtoUtils.java new file mode 100644 index 0000000000000..d8765d59d604c --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/SearchRequestProtoUtils.java @@ -0,0 +1,389 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.search; + +import org.opensearch.ExceptionsHelper; +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.search.SearchContextId; +import org.opensearch.action.support.IndicesOptions; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.plugin.transport.grpc.proto.request.common.FetchSourceContextProtoUtils; +import org.opensearch.plugin.transport.grpc.proto.request.search.suggest.TermSuggestionBuilderProtoUtils; +import org.opensearch.protobufs.SearchRequest; +import org.opensearch.protobufs.SearchRequestBody; +import org.opensearch.protobufs.TrackHits; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.search.RestSearchAction; +import org.opensearch.search.Scroll; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.search.fetch.StoredFieldsContext; +import org.opensearch.search.fetch.subphase.FetchSourceContext; +import org.opensearch.search.internal.SearchContext; +import org.opensearch.search.sort.SortOrder; +import org.opensearch.search.suggest.SuggestBuilder; +import org.opensearch.transport.client.Client; +import org.opensearch.transport.client.node.NodeClient; + +import java.io.IOException; +import java.util.function.IntConsumer; + +import static org.opensearch.action.ValidateActions.addValidationError; +import static org.opensearch.common.unit.TimeValue.parseTimeValue; +import static org.opensearch.search.suggest.SuggestBuilders.termSuggestion; + +/** + * Utility class for converting SearchRequest objects between OpenSearch and Protocol Buffers formats. + * This class provides methods to prepare, parse, and transform search requests to ensure proper + * communication between gRPC clients and the OpenSearch server. + */ +public class SearchRequestProtoUtils { + + private SearchRequestProtoUtils() { + // Utility class, no instances + } + + /** + * Prepare the request for execution. + *

+ * Similar to {@link RestSearchAction#prepareRequest(RestRequest, NodeClient)} + * Please ensure to keep both implementations consistent. + * + * @param request the Protocol Buffer SearchRequest to execute + * @param client the client to use for execution + * @return the SearchRequest to execute + * @throws IOException if an I/O exception occurred parsing the request and preparing for + * execution + */ + public static org.opensearch.action.search.SearchRequest prepareRequest(SearchRequest request, Client client) throws IOException { + org.opensearch.action.search.SearchRequest searchRequest = new org.opensearch.action.search.SearchRequest(); + + /* + * We have to pull out the call to `source().size(size)` because + * _update_by_query and _delete_by_query uses this same parsing + * path but sets a different variable when it sees the `size` + * url parameter. + * + * Note that we can't use `searchRequest.source()::size` because + * `searchRequest.source()` is null right now. We don't have to + * guard against it being null in the IntConsumer because it can't + * be null later. If that is confusing to you then you are in good + * company. + */ + IntConsumer setSize = size -> searchRequest.source().size(size); + // TODO avoid hidden cast to NodeClient here + parseSearchRequest(searchRequest, request, ((NodeClient) client).getNamedWriteableRegistry(), setSize); + return searchRequest; + } + + /** + * Parses a protobuf {@link org.opensearch.protobufs.SearchRequest} to a {@link org.opensearch.action.search.SearchRequest}. + * This method is similar to the logic in {@link RestSearchAction#parseSearchRequest(org.opensearch.action.search.SearchRequest, RestRequest, XContentParser, NamedWriteableRegistry, IntConsumer)} + * Specifically, this method handles the URL parameters, and internally calls {@link SearchSourceBuilderProtoUtils#parseProto(SearchSourceBuilder, SearchRequestBody)} + * + * @param searchRequest the SearchRequest to populate + * @param request the Protocol Buffer SearchRequest to parse + * @param namedWriteableRegistry the registry for named writeables + * @param setSize consumer for setting the size parameter + * @throws IOException if an I/O exception occurred during parsing + */ + protected static void parseSearchRequest( + org.opensearch.action.search.SearchRequest searchRequest, + org.opensearch.protobufs.SearchRequest request, + NamedWriteableRegistry namedWriteableRegistry, + IntConsumer setSize + ) throws IOException { + if (searchRequest.source() == null) { + searchRequest.source(new SearchSourceBuilder()); + } + + String[] indexArr = new String[request.getIndexCount()]; + for (int i = 0; i < request.getIndexCount(); i++) { + indexArr[i] = request.getIndex(i); + } + searchRequest.indices(indexArr); + + SearchSourceBuilderProtoUtils.parseProto(searchRequest.source(), request.getRequestBody()); + + final int batchedReduceSize = request.hasBatchedReduceSize() + ? request.getBatchedReduceSize() + : searchRequest.getBatchedReduceSize(); + searchRequest.setBatchedReduceSize(batchedReduceSize); + + if (request.hasPreFilterShardSize()) { + searchRequest.setPreFilterShardSize(request.getPreFilterShardSize()); + } + + if (request.hasMaxConcurrentShardRequests()) { + // only set if we have the parameter since we auto adjust the max concurrency on the coordinator + // based on the number of nodes in the cluster + searchRequest.setMaxConcurrentShardRequests(request.getMaxConcurrentShardRequests()); + } + + if (request.hasAllowPartialSearchResults()) { + // only set if we have the parameter passed to override the cluster-level default + searchRequest.allowPartialSearchResults(request.getAllowPartialSearchResults()); + } + if (request.hasPhaseTook()) { + // only set if we have the parameter passed to override the cluster-level default + // else phaseTook = null + searchRequest.setPhaseTook(request.getPhaseTook()); + } + // do not allow 'query_and_fetch' or 'dfs_query_and_fetch' search types + // from the REST layer. these modes are an internal optimization and should + // not be specified explicitly by the user. + if (request.hasSearchType()) { + searchRequest.searchType(SearchTypeProtoUtils.fromProto(request)); + } + parseSearchSource(searchRequest.source(), request, setSize); + + if (request.hasRequestCache()) { + searchRequest.requestCache(request.getRequestCache()); + } + + if (request.hasScroll()) { + searchRequest.scroll(new Scroll(parseTimeValue(request.getScroll(), null, "scroll"))); + } + + if (request.getRoutingCount() > 0) { + // Pass to {@link SearchRequest#routing(String ... routings)} + searchRequest.routing(request.getRoutingList().toArray(new String[0])); + } else { + // Pass to {@link SearchRequest#routing(String routing)} + searchRequest.routing((String) null); + } + searchRequest.preference(request.hasPreference() ? request.getPreference() : null); + searchRequest.indicesOptions(IndicesOptionsProtoUtils.fromRequest(request, searchRequest.indicesOptions())); + searchRequest.pipeline(request.hasSearchPipeline() ? request.getSearchPipeline() : searchRequest.source().pipeline()); + + checkProtoTotalHits(request, searchRequest); + + // TODO what does this line do? + // request.paramAsBoolean(INCLUDE_NAMED_QUERIES_SCORE_PARAM, false); + + if (searchRequest.pointInTimeBuilder() != null) { + preparePointInTime(searchRequest, request, namedWriteableRegistry); + } else { + searchRequest.setCcsMinimizeRoundtrips( + request.hasCcsMinimizeRoundtrips() ? request.getCcsMinimizeRoundtrips() : searchRequest.isCcsMinimizeRoundtrips() + ); + } + + searchRequest.setCancelAfterTimeInterval( + request.hasCancelAfterTimeInterval() + ? parseTimeValue(request.getCancelAfterTimeInterval(), null, "cancel_after_time_interval") + : null + ); + } + + /** + * Parses the search source from a Protocol Buffer SearchRequest. + * Similar to {@link RestSearchAction#parseSearchSource(SearchSourceBuilder, RestRequest, IntConsumer)} + * + * @param searchSourceBuilder the SearchSourceBuilder to populate + * @param request the Protocol Buffer SearchRequest to parse + * @param setSize consumer for setting the size parameter + */ + protected static void parseSearchSource( + final SearchSourceBuilder searchSourceBuilder, + org.opensearch.protobufs.SearchRequest request, + IntConsumer setSize + ) { + QueryBuilder queryBuilder = ProtoActionsProtoUtils.urlParamsToQueryBuilder(request); + if (queryBuilder != null) { + searchSourceBuilder.query(queryBuilder); + } + if (request.hasFrom()) { + searchSourceBuilder.from(request.getFrom()); + } + if (request.hasSize()) { + setSize.accept(request.getSize()); + } + + if (request.hasExplain()) { + searchSourceBuilder.explain(request.getExplain()); + } + + if (request.hasVersion()) { + searchSourceBuilder.version(request.getVersion()); + } + + if (request.hasSeqNoPrimaryTerm()) { + searchSourceBuilder.seqNoAndPrimaryTerm(request.getSeqNoPrimaryTerm()); + } + + if (request.hasTimeout()) { + searchSourceBuilder.timeout(parseTimeValue(request.getTimeout(), null, "timeout")); + } + + if (request.hasVerbosePipeline()) { + searchSourceBuilder.verbosePipeline(request.getVerbosePipeline()); + } + + if (request.hasTerminateAfter()) { + int terminateAfter = request.getTerminateAfter(); + if (terminateAfter < 0) { + throw new IllegalArgumentException("terminateAfter must be > 0"); + } else if (terminateAfter > 0) { + searchSourceBuilder.terminateAfter(terminateAfter); + } + } + StoredFieldsContext storedFieldsContext = StoredFieldsContextProtoUtils.fromProtoRequest(request); + if (storedFieldsContext != null) { + searchSourceBuilder.storedFields(storedFieldsContext); + } + if (request.getDocvalueFieldsCount() > 0) { + for (String field : request.getDocvalueFieldsList()) { + searchSourceBuilder.docValueField(field, null); + } + } + FetchSourceContext fetchSourceContext = FetchSourceContextProtoUtils.parseFromProtoRequest(request); + if (fetchSourceContext != null) { + searchSourceBuilder.fetchSource(fetchSourceContext); + } + + if (request.hasTrackScores()) { + searchSourceBuilder.trackScores(request.getTrackScores()); + } + + if (request.hasIncludeNamedQueriesScore()) { + searchSourceBuilder.includeNamedQueriesScores(request.getIncludeNamedQueriesScore()); + } + + if (request.hasTrackTotalHits()) { + if (request.getTrackTotalHits().getTrackHitsCase() == TrackHits.TrackHitsCase.BOOL_VALUE) { + searchSourceBuilder.trackTotalHits(request.getTrackTotalHits().getBoolValue()); + } else if (request.getTrackTotalHits().getTrackHitsCase() == TrackHits.TrackHitsCase.INT32_VALUE) { + searchSourceBuilder.trackTotalHitsUpTo(request.getTrackTotalHits().getInt32Value()); + } + } + + if (request.getSortCount() > 0) { + for (SearchRequest.SortOrder sort : request.getSortList()) { + String sortField = sort.getField(); + + if (sort.hasDirection()) { + SearchRequest.SortOrder.Direction direction = sort.getDirection(); + switch (direction) { + case DIRECTION_ASC: + searchSourceBuilder.sort(sortField, SortOrder.ASC); + break; + case DIRECTION_DESC: + searchSourceBuilder.sort(sortField, SortOrder.DESC); + break; + default: + throw new IllegalArgumentException("Unsupported sort direction " + direction.toString()); + } + } else { + searchSourceBuilder.sort(sortField); + } + } + } + + if (request.getStatsCount() > 0) { + searchSourceBuilder.stats(request.getStatsList()); + } + + if (request.hasSuggestField()) { + String suggestField = request.getSuggestField(); + String suggestText = request.hasSuggestText() ? request.getSuggestText() : request.getQ(); + int suggestSize = request.hasSuggestSize() ? request.getSuggestSize() : 5; + SearchRequest.SuggestMode suggestMode = request.getSuggestMode(); + searchSourceBuilder.suggest( + new SuggestBuilder().addSuggestion( + suggestField, + termSuggestion(suggestField).text(suggestText) + .size(suggestSize) + .suggestMode(TermSuggestionBuilderProtoUtils.resolve(suggestMode)) + ) + ); + } + } + + /** + * Prepares a point in time search request. + * Similar to {@link RestSearchAction#preparePointInTime(org.opensearch.action.search.SearchRequest, RestRequest, NamedWriteableRegistry)} + * + * @param request the SearchRequest to prepare + * @param protoRequest the Protocol Buffer SearchRequest + * @param namedWriteableRegistry the registry for named writeables + */ + private static void preparePointInTime( + org.opensearch.action.search.SearchRequest request, + org.opensearch.protobufs.SearchRequest protoRequest, + NamedWriteableRegistry namedWriteableRegistry + ) { + assert request.pointInTimeBuilder() != null; + ActionRequestValidationException validationException = null; + if (request.indices().length > 0) { + validationException = addValidationError("[indices] cannot be used with point in time", validationException); + } + if (request.indicesOptions() != org.opensearch.action.search.SearchRequest.DEFAULT_INDICES_OPTIONS) { + validationException = addValidationError("[indicesOptions] cannot be used with point in time", validationException); + } + if (request.routing() != null) { + validationException = addValidationError("[routing] cannot be used with point in time", validationException); + } + if (request.preference() != null) { + validationException = addValidationError("[preference] cannot be used with point in time", validationException); + } + if (protoRequest.hasCcsMinimizeRoundtrips() && protoRequest.getCcsMinimizeRoundtrips()) { + validationException = addValidationError("[ccs_minimize_roundtrips] cannot be used with point in time", validationException); + request.setCcsMinimizeRoundtrips(false); + } + ExceptionsHelper.reThrowIfNotNull(validationException); + + final IndicesOptions indicesOptions = request.indicesOptions(); + final IndicesOptions stricterIndicesOptions = IndicesOptions.fromOptions( + indicesOptions.ignoreUnavailable(), + indicesOptions.allowNoIndices(), + false, + false, + false, + true, + true, + indicesOptions.ignoreThrottled() + ); + request.indicesOptions(stricterIndicesOptions); + final SearchContextId searchContextId = SearchContextId.decode(namedWriteableRegistry, request.pointInTimeBuilder().getId()); + request.indices(searchContextId.getActualIndices()); + } + + /** + * Checks and configures total hits tracking in the search request. + * Keep implementation consistent with {@link RestSearchAction#checkRestTotalHits(RestRequest, org.opensearch.action.search.SearchRequest)} + * + * @param protoRequest the Protocol Buffer SearchRequest + * @param searchRequest the SearchRequest to configure + */ + protected static void checkProtoTotalHits(SearchRequest protoRequest, org.opensearch.action.search.SearchRequest searchRequest) { + + boolean totalHitsAsInt = protoRequest.hasRestTotalHitsAsInt() ? protoRequest.getRestTotalHitsAsInt() : false; + if (totalHitsAsInt == false) { + return; + } + if (searchRequest.source() == null) { + searchRequest.source(new SearchSourceBuilder()); + } + Integer trackTotalHitsUpTo = searchRequest.source().trackTotalHitsUpTo(); + if (trackTotalHitsUpTo == null) { + searchRequest.source().trackTotalHits(true); + } else if (trackTotalHitsUpTo != SearchContext.TRACK_TOTAL_HITS_ACCURATE + && trackTotalHitsUpTo != SearchContext.TRACK_TOTAL_HITS_DISABLED) { + throw new IllegalArgumentException( + "[" + + "rest_total_hits_as_int" + + "] cannot be used " + + "if the tracking of total hits is not accurate, got " + + trackTotalHitsUpTo + ); + } + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/SearchSourceBuilderProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/SearchSourceBuilderProtoUtils.java new file mode 100644 index 0000000000000..d2c010dcb15fc --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/SearchSourceBuilderProtoUtils.java @@ -0,0 +1,237 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.request.search; + +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.plugin.transport.grpc.proto.request.common.FetchSourceContextProtoUtils; +import org.opensearch.plugin.transport.grpc.proto.request.common.ScriptProtoUtils; +import org.opensearch.plugin.transport.grpc.proto.request.search.query.AbstractQueryBuilderProtoUtils; +import org.opensearch.plugin.transport.grpc.proto.request.search.sort.SortBuilderProtoUtils; +import org.opensearch.plugin.transport.grpc.proto.request.search.suggest.SuggestBuilderProtoUtils; +import org.opensearch.protobufs.DerivedField; +import org.opensearch.protobufs.FieldAndFormat; +import org.opensearch.protobufs.NumberMap; +import org.opensearch.protobufs.Rescore; +import org.opensearch.protobufs.ScriptField; +import org.opensearch.protobufs.SearchRequestBody; +import org.opensearch.protobufs.TrackHits; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.search.sort.SortBuilder; + +import java.io.IOException; +import java.util.Map; + +import static org.opensearch.search.builder.SearchSourceBuilder.TIMEOUT_FIELD; +import static org.opensearch.search.internal.SearchContext.TRACK_TOTAL_HITS_ACCURATE; +import static org.opensearch.search.internal.SearchContext.TRACK_TOTAL_HITS_DISABLED; + +/** + * Utility class for converting SearchSourceBuilder Protocol Buffers to objects + * + */ +public class SearchSourceBuilderProtoUtils { + + private SearchSourceBuilderProtoUtils() { + // Utility class, no instances + } + + /** + * Parses a protobuf SearchRequestBody into a SearchSourceBuilder. + * This method is equivalent to {@link SearchSourceBuilder#parseXContent(XContentParser, boolean)} + * + * @param searchSourceBuilder The SearchSourceBuilder to populate + * @param protoRequest The Protocol Buffer SearchRequest to parse + * @throws IOException if there's an error during parsing + */ + protected static void parseProto(SearchSourceBuilder searchSourceBuilder, SearchRequestBody protoRequest) throws IOException { + // TODO what to do about parser.getDeprecationHandler() for protos? + + if (protoRequest.hasFrom()) { + searchSourceBuilder.from(protoRequest.getFrom()); + } + if (protoRequest.hasSize()) { + searchSourceBuilder.size(protoRequest.getSize()); + } + if (protoRequest.hasTimeout()) { + searchSourceBuilder.timeout(TimeValue.parseTimeValue(protoRequest.getTimeout(), null, TIMEOUT_FIELD.getPreferredName())); + } + if (protoRequest.hasTerminateAfter()) { + searchSourceBuilder.terminateAfter(protoRequest.getTerminateAfter()); + } + if (protoRequest.hasMinScore()) { + searchSourceBuilder.minScore(protoRequest.getMinScore()); + } + if (protoRequest.hasVersion()) { + searchSourceBuilder.version(protoRequest.getVersion()); + } + if (protoRequest.hasSeqNoPrimaryTerm()) { + searchSourceBuilder.seqNoAndPrimaryTerm(protoRequest.getSeqNoPrimaryTerm()); + } + if (protoRequest.hasExplain()) { + searchSourceBuilder.explain(protoRequest.getExplain()); + } + if (protoRequest.hasTrackScores()) { + searchSourceBuilder.trackScores(protoRequest.getTrackScores()); + } + if (protoRequest.hasIncludeNamedQueriesScore()) { + searchSourceBuilder.includeNamedQueriesScores(protoRequest.getIncludeNamedQueriesScore()); + } + if (protoRequest.hasTrackTotalHits()) { + if (protoRequest.getTrackTotalHits().getTrackHitsCase() == TrackHits.TrackHitsCase.BOOL_VALUE) { + searchSourceBuilder.trackTotalHitsUpTo( + protoRequest.getTrackTotalHits().getBoolValue() ? TRACK_TOTAL_HITS_ACCURATE : TRACK_TOTAL_HITS_DISABLED + ); + } else { + searchSourceBuilder.trackTotalHitsUpTo(protoRequest.getTrackTotalHits().getInt32Value()); + } + } + if (protoRequest.hasSource()) { + searchSourceBuilder.fetchSource(FetchSourceContextProtoUtils.fromProto(protoRequest.getSource())); + } + if (protoRequest.getStoredFieldsCount() > 0) { + searchSourceBuilder.storedFields(StoredFieldsContextProtoUtils.fromProto(protoRequest.getStoredFieldsList())); + } + if (protoRequest.getSortCount() > 0) { + for (SortBuilder sortBuilder : SortBuilderProtoUtils.fromProto(protoRequest.getSortList())) { + searchSourceBuilder.sort(sortBuilder); + } + } + if (protoRequest.hasProfile()) { + searchSourceBuilder.profile(protoRequest.getProfile()); + } + if (protoRequest.hasSearchPipeline()) { + searchSourceBuilder.pipeline(protoRequest.getSearchPipeline()); + } + if (protoRequest.hasVerbosePipeline()) { + searchSourceBuilder.verbosePipeline(protoRequest.getVerbosePipeline()); + } + if (protoRequest.hasQuery()) { + searchSourceBuilder.query(AbstractQueryBuilderProtoUtils.parseInnerQueryBuilderProto(protoRequest.getQuery())); + } + if (protoRequest.hasPostFilter()) { + searchSourceBuilder.postFilter(AbstractQueryBuilderProtoUtils.parseInnerQueryBuilderProto(protoRequest.getPostFilter())); + } + if (protoRequest.hasSource()) { + searchSourceBuilder.fetchSource(FetchSourceContextProtoUtils.fromProto(protoRequest.getSource())); + } + if (protoRequest.getScriptFieldsCount() > 0) { + for (Map.Entry entry : protoRequest.getScriptFieldsMap().entrySet()) { + String name = entry.getKey(); + ScriptField scriptFieldProto = entry.getValue(); + SearchSourceBuilder.ScriptField scriptField = ScriptFieldProtoUtils.fromProto(name, scriptFieldProto); + searchSourceBuilder.scriptField(name, scriptField.script(), scriptField.ignoreFailure()); + } + } + if (protoRequest.getIndicesBoostCount() > 0) { + /** + * Similar to {@link SearchSourceBuilder.IndexBoost#IndexBoost(XContentParser)} + */ + for (NumberMap numberMap : protoRequest.getIndicesBoostList()) { + for (Map.Entry entry : numberMap.getNumberMapMap().entrySet()) { + searchSourceBuilder.indexBoost(entry.getKey(), entry.getValue()); + } + } + } + + // TODO support aggregations + /* + if(protoRequest.hasAggs()){} + */ + + if (protoRequest.hasHighlight()) { + searchSourceBuilder.highlighter(HighlightBuilderProtoUtils.fromProto(protoRequest.getHighlight())); + } + if (protoRequest.hasSuggest()) { + searchSourceBuilder.suggest(SuggestBuilderProtoUtils.fromProto(protoRequest.getSuggest())); + } + if (protoRequest.getRescoreCount() > 0) { + for (Rescore rescore : protoRequest.getRescoreList()) { + searchSourceBuilder.addRescorer(RescorerBuilderProtoUtils.parseFromProto(rescore)); + } + } + + if (protoRequest.hasExt()) { + // TODO support ext + throw new UnsupportedOperationException("ext param is not supported yet"); + } + if (protoRequest.hasSlice()) { + searchSourceBuilder.slice(SliceBuilderProtoUtils.fromProto(protoRequest.getSlice())); + } + if (protoRequest.hasCollapse()) { + searchSourceBuilder.collapse(CollapseBuilderProtoUtils.fromProto(protoRequest.getCollapse())); + } + if (protoRequest.hasPit()) { + searchSourceBuilder.pointInTimeBuilder(PointInTimeBuilderProtoUtils.fromProto(protoRequest.getPit())); + } + if (protoRequest.getDerivedCount() > 0) { + for (Map.Entry entry : protoRequest.getDerivedMap().entrySet()) { + String name = entry.getKey(); + DerivedField derivedField = entry.getValue(); + searchSourceBuilder.derivedField( + name, + derivedField.getType(), + ScriptProtoUtils.parseFromProtoRequest(derivedField.getScript()) + ); + } + } + if (protoRequest.getDocvalueFieldsCount() > 0) { + for (FieldAndFormat fieldAndFormatProto : protoRequest.getDocvalueFieldsList()) { + /** + * Similar to {@link org.opensearch.search.fetch.subphase.FieldAndFormat#fromXContent(XContentParser)} + */ + searchSourceBuilder.docValueField(fieldAndFormatProto.getField(), fieldAndFormatProto.getFormat()); + } + + } + if (protoRequest.getFieldsCount() > 0) { + for (FieldAndFormat fieldAndFormatProto : protoRequest.getFieldsList()) { + /** + * Similar to {@link org.opensearch.search.fetch.subphase.FieldAndFormat#fromXContent(XContentParser)} + */ + searchSourceBuilder.fetchField(fieldAndFormatProto.getField(), fieldAndFormatProto.getFormat()); + } + } + if (protoRequest.getStatsCount() > 0) { + searchSourceBuilder.stats(protoRequest.getStatsList()); + } + if (protoRequest.getSearchAfterCount() > 0) { + searchSourceBuilder.searchAfter(SearchAfterBuilderProtoUtils.fromProto(protoRequest.getSearchAfterList())); + } + } + + /** + * Utility class for converting ScriptField Protocol Buffers to OpenSearch objects. + * This class handles the transformation of script field definitions between the two formats. + */ + public static class ScriptFieldProtoUtils { + /** + * Private constructor to prevent instantiation. + * This is a utility class with only static methods. + */ + private ScriptFieldProtoUtils() { + // Utility class, no instances + } + + /** + * Similar to {@link SearchSourceBuilder.ScriptField#ScriptField(XContentParser)} + * + * @param scriptFieldName + * @param scriptFieldProto + * @throws IOException if there's an error during parsing + */ + + public static SearchSourceBuilder.ScriptField fromProto(String scriptFieldName, ScriptField scriptFieldProto) throws IOException { + org.opensearch.script.Script script = ScriptProtoUtils.parseFromProtoRequest(scriptFieldProto.getScript()); + boolean ignoreFailure = scriptFieldProto.hasIgnoreFailure() ? scriptFieldProto.getIgnoreFailure() : false; + + return new SearchSourceBuilder.ScriptField(scriptFieldName, script, ignoreFailure); + } + + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/SearchTypeProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/SearchTypeProtoUtils.java new file mode 100644 index 0000000000000..073e146c6625f --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/SearchTypeProtoUtils.java @@ -0,0 +1,48 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.search; + +import org.opensearch.action.search.SearchType; +import org.opensearch.protobufs.SearchRequest; + +/** + * Utility class for converting SearchType enums between OpenSearch and Protocol Buffers formats. + * This class provides methods to transform search type values to ensure proper execution + * of search operations with the correct search strategy. + */ +public class SearchTypeProtoUtils { + + private SearchTypeProtoUtils() { + // Utility class, no instances + } + + /** + * Converts a Protocol Buffer SearchRequest to a SearchType. + * + * Similar to {@link SearchType#fromString(String)} + * Please ensure to keep both implementations consistent. + * + * @param request the Protocol Buffer SearchRequest to convert + * @return the corresponding SearchType + */ + protected static SearchType fromProto(SearchRequest request) { + if (!request.hasSearchType()) { + return SearchType.DEFAULT; + } + SearchRequest.SearchType searchType = request.getSearchType(); + switch (searchType) { + case SEARCH_TYPE_DFS_QUERY_THEN_FETCH: + return SearchType.DFS_QUERY_THEN_FETCH; + case SEARCH_TYPE_QUERY_THEN_FETCH: + return SearchType.QUERY_THEN_FETCH; + default: + throw new IllegalArgumentException("No search type for [" + searchType + "]"); + } + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/SliceBuilderProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/SliceBuilderProtoUtils.java new file mode 100644 index 0000000000000..5a08acfb7f988 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/SliceBuilderProtoUtils.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.request.search; + +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.protobufs.SlicedScroll; +import org.opensearch.search.slice.SliceBuilder; + +/** + * Utility class for converting Highlight Protocol Buffers to objects + * + */ +public class SliceBuilderProtoUtils { + + private SliceBuilderProtoUtils() { + // Utility class, no instances + } + + /** + * Similar to {@link SliceBuilder#fromXContent(XContentParser)} + * + * @param sliceProto + */ + + protected static SliceBuilder fromProto(SlicedScroll sliceProto) { + SliceBuilder sliceBuilder; + if (sliceProto.hasField()) { + sliceBuilder = new SliceBuilder(sliceProto.getField(), sliceProto.getId(), sliceProto.getMax()); + } else { + sliceBuilder = new SliceBuilder(sliceProto.getId(), sliceProto.getMax()); + } + + return sliceBuilder; + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/StoredFieldsContextProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/StoredFieldsContextProtoUtils.java new file mode 100644 index 0000000000000..b55e149d51f0a --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/StoredFieldsContextProtoUtils.java @@ -0,0 +1,55 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.search; + +import org.opensearch.rest.RestRequest; +import org.opensearch.search.fetch.StoredFieldsContext; + +import java.io.IOException; +import java.util.List; + +/** + * Utility class for converting StoredFieldsContext between OpenSearch and Protocol Buffers formats. + * This class provides methods to create StoredFieldsContext objects from Protocol Buffer requests + * to ensure proper handling of stored fields in search operations. + */ +public class StoredFieldsContextProtoUtils { + + private StoredFieldsContextProtoUtils() { + // Utility class, no instances + } + + /** + * Create a StoredFieldsContext from a Protocol Buffer list of field names + * + * @param storedFields the list of field names + * @return a StoredFieldsContext + * @throws IOException if an I/O exception occurred + */ + protected static StoredFieldsContext fromProto(List storedFields) throws IOException { + if (storedFields == null || storedFields.isEmpty()) { + return null; + } + return StoredFieldsContext.fromList(storedFields); + } + + /** + * Create a StoredFieldsContext from a Protocol Buffer SearchRequest + * Similar to {@link StoredFieldsContext#fromRestRequest(String, RestRequest)} + * + * @param request the Protocol Buffer SearchRequest + * @return a StoredFieldsContext + */ + protected static StoredFieldsContext fromProtoRequest(org.opensearch.protobufs.SearchRequest request) { + if (request.getStoredFieldsCount() > 0) { + return StoredFieldsContext.fromList(request.getStoredFieldsList()); + } + return null; + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/package-info.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/package-info.java new file mode 100644 index 0000000000000..960673a02a29d --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/package-info.java @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains utility classes for converting search requests between OpenSearch + * and Protocol Buffers formats. These utilities handle the transformation of search request + * parameters, options, and configurations to ensure proper communication between gRPC clients + * and the OpenSearch server. + */ +package org.opensearch.plugin.transport.grpc.proto.request.search; diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/AbstractQueryBuilderProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/AbstractQueryBuilderProtoUtils.java new file mode 100644 index 0000000000000..5e4d4ac778bdb --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/AbstractQueryBuilderProtoUtils.java @@ -0,0 +1,51 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.request.search.query; + +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.index.query.AbstractQueryBuilder; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.protobufs.QueryContainer; + +import java.io.IOException; + +/** + * Utility class for converting AbstractQueryBuilder Protocol Buffers to objects. + */ +public class AbstractQueryBuilderProtoUtils { + + private AbstractQueryBuilderProtoUtils() { + // Utility class, no instances + } + + /** + * Parse a query from its Protocol Buffer representation + * Similar to {@link AbstractQueryBuilder#parseInnerQueryBuilder(XContentParser)} + * + * @param queryContainer The Protocol Buffer query container + * @return A QueryBuilder instance + * @throws IOException if there's an error during parsing + */ + public static QueryBuilder parseInnerQueryBuilderProto(QueryContainer queryContainer) throws IOException { + QueryBuilder result; + + if (queryContainer.hasMatchAll()) { + result = MatchAllQueryBuilderProtoUtils.fromProto(queryContainer.getMatchAll()); + } else if (queryContainer.hasMatchNone()) { + result = MatchNoneQueryBuilderProtoUtils.fromProto(queryContainer.getMatchNone()); + } else if (queryContainer.getTermCount() > 0) { + result = TermQueryBuilderProtoUtils.fromProto(queryContainer.getTermMap()); + } + // TODO add more query types + else { + throw new UnsupportedOperationException("Search query type not supported yet."); + } + + return result; + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/MatchAllQueryBuilderProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/MatchAllQueryBuilderProtoUtils.java new file mode 100644 index 0000000000000..82babebf1cf66 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/MatchAllQueryBuilderProtoUtils.java @@ -0,0 +1,45 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.request.search.query; + +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.index.query.MatchAllQueryBuilder; +import org.opensearch.protobufs.MatchAllQuery; + +/** + * Utility class for converting MatchAllQuery Protocol Buffers to OpenSearch query objects. + */ +public class MatchAllQueryBuilderProtoUtils { + + private MatchAllQueryBuilderProtoUtils() { + // Utility class, no instances + } + + /** + * Converts a Protocol Buffer MatchAllQuery to an OpenSearch MatchAllQueryBuilder. + * Similar to {@link MatchAllQueryBuilder#fromXContent(XContentParser)}, this method + * parses the Protocol Buffer representation and creates a properly configured + * MatchAllQueryBuilder with the appropriate boost and name settings. + * + * @param matchAllQueryProto The Protocol Buffer MatchAllQuery to convert + * @return A configured MatchAllQueryBuilder instance + */ + protected static MatchAllQueryBuilder fromProto(MatchAllQuery matchAllQueryProto) { + MatchAllQueryBuilder matchAllQueryBuilder = new MatchAllQueryBuilder(); + + if (matchAllQueryProto.hasBoost()) { + matchAllQueryBuilder.boost(matchAllQueryProto.getBoost()); + } + + if (matchAllQueryProto.hasName()) { + matchAllQueryBuilder.queryName(matchAllQueryProto.getName()); + } + + return matchAllQueryBuilder; + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/MatchNoneQueryBuilderProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/MatchNoneQueryBuilderProtoUtils.java new file mode 100644 index 0000000000000..476b63daca906 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/MatchNoneQueryBuilderProtoUtils.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.request.search.query; + +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.index.query.MatchNoneQueryBuilder; +import org.opensearch.protobufs.MatchNoneQuery; + +/** + * Utility class for converting MatchNoneQuery Protocol Buffers to OpenSearch objects. + * This class provides methods to transform Protocol Buffer representations of match_none queries + * into their corresponding OpenSearch MatchNoneQueryBuilder implementations for search operations. + */ +public class MatchNoneQueryBuilderProtoUtils { + + private MatchNoneQueryBuilderProtoUtils() { + // Utility class, no instances + } + + /** + * Converts a Protocol Buffer MatchNoneQuery to an OpenSearch MatchNoneQueryBuilder. + * Similar to {@link MatchNoneQueryBuilder#fromXContent(XContentParser)}, this method + * parses the Protocol Buffer representation and creates a properly configured + * MatchNoneQueryBuilder with the appropriate boost and name settings. + * + * @param matchNoneQueryProto The Protocol Buffer MatchNoneQuery to convert + * @return A configured MatchNoneQueryBuilder instance + */ + protected static MatchNoneQueryBuilder fromProto(MatchNoneQuery matchNoneQueryProto) { + MatchNoneQueryBuilder matchNoneQueryBuilder = new MatchNoneQueryBuilder(); + + if (matchNoneQueryProto.hasBoost()) { + matchNoneQueryBuilder.boost(matchNoneQueryProto.getBoost()); + } + + if (matchNoneQueryProto.hasName()) { + matchNoneQueryBuilder.queryName(matchNoneQueryProto.getName()); + } + + return matchNoneQueryBuilder; + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/TermQueryBuilderProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/TermQueryBuilderProtoUtils.java new file mode 100644 index 0000000000000..328ceea4a65c1 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/TermQueryBuilderProtoUtils.java @@ -0,0 +1,116 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.request.search.query; + +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.index.query.AbstractQueryBuilder; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.plugin.transport.grpc.proto.request.common.ObjectMapProtoUtils; +import org.opensearch.protobufs.FieldValue; +import org.opensearch.protobufs.TermQuery; + +import java.util.Map; + +/** + * Utility class for converting TermQuery Protocol Buffers to OpenSearch objects. + * This class provides methods to transform Protocol Buffer representations of term queries + * into their corresponding OpenSearch TermQueryBuilder implementations for search operations. + */ +public class TermQueryBuilderProtoUtils { + + private TermQueryBuilderProtoUtils() { + // Utility class, no instances + } + + /** + * Converts a Protocol Buffer TermQuery map to an OpenSearch TermQueryBuilder. + * Similar to {@link TermQueryBuilder#fromXContent(XContentParser)}, this method + * parses the Protocol Buffer representation and creates a properly configured + * TermQueryBuilder with the appropriate field name, value, boost, query name, + * and case sensitivity settings. + * + * @param termQueryProto The map of field names to Protocol Buffer TermQuery objects + * @return A configured TermQueryBuilder instance + * @throws IllegalArgumentException if the term query map has more than one element, + * if the field value type is not supported, or if the term query field value is not recognized + */ + protected static TermQueryBuilder fromProto(Map termQueryProto) { + String queryName = null; + String fieldName = null; + Object value = null; + float boost = AbstractQueryBuilder.DEFAULT_BOOST; + boolean caseInsensitive = TermQueryBuilder.DEFAULT_CASE_INSENSITIVITY; + + if (termQueryProto.size() > 1) { + throw new IllegalArgumentException("Term query can only have 1 element in the map"); + } + + for (Map.Entry entry : termQueryProto.entrySet()) { + + fieldName = entry.getKey(); + + TermQuery termQuery = entry.getValue(); + + if (termQuery.hasName()) { + queryName = termQuery.getName(); + } + if (termQuery.hasBoost()) { + boost = termQuery.getBoost(); + } + + FieldValue fieldValue = termQuery.getValue(); + + switch (fieldValue.getTypeCase()) { + case GENERAL_NUMBER: + switch (fieldValue.getGeneralNumber().getValueCase()) { + case INT32_VALUE: + value = fieldValue.getGeneralNumber().getInt32Value(); + break; + case INT64_VALUE: + value = fieldValue.getGeneralNumber().getInt64Value(); + break; + case FLOAT_VALUE: + value = fieldValue.getGeneralNumber().getFloatValue(); + break; + case DOUBLE_VALUE: + value = fieldValue.getGeneralNumber().getDoubleValue(); + break; + default: + throw new IllegalArgumentException( + "Unsupported general nunber type: " + fieldValue.getGeneralNumber().getValueCase() + ); + } + break; + case STRING_VALUE: + value = fieldValue.getStringValue(); + break; + case OBJECT_MAP: + value = ObjectMapProtoUtils.fromProto(fieldValue.getObjectMap()); + break; + case BOOL_VALUE: + value = fieldValue.getBoolValue(); + break; + default: + throw new IllegalArgumentException("TermQuery field value not recognized"); + } + + if (termQuery.hasCaseInsensitive()) { + caseInsensitive = termQuery.getCaseInsensitive(); + } + + } + TermQueryBuilder termQuery = new TermQueryBuilder(fieldName, value); + termQuery.boost(boost); + if (queryName != null) { + termQuery.queryName(queryName); + } + termQuery.caseInsensitive(caseInsensitive); + + return termQuery; + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/package-info.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/package-info.java new file mode 100644 index 0000000000000..40819cd1d6e37 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/package-info.java @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains utility classes for converting search query components between OpenSearch + * and Protocol Buffers formats. These utilities handle the transformation of query builders, + * query parameters, and query configurations to ensure proper communication between gRPC clients + * and the OpenSearch server. + */ +package org.opensearch.plugin.transport.grpc.proto.request.search.query; diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/sort/FieldSortBuilderProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/sort/FieldSortBuilderProtoUtils.java new file mode 100644 index 0000000000000..384bf5352c027 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/sort/FieldSortBuilderProtoUtils.java @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.search.sort; + +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.protobufs.FieldWithOrderMap; +import org.opensearch.protobufs.ScoreSort; +import org.opensearch.search.sort.FieldSortBuilder; +import org.opensearch.search.sort.SortBuilder; +import org.opensearch.search.sort.SortOrder; + +import java.util.List; +import java.util.Map; + +import static org.opensearch.plugin.transport.grpc.proto.request.search.sort.SortBuilderProtoUtils.fieldOrScoreSort; + +/** + * Utility class for converting FieldSortBuilder components between OpenSearch and Protocol Buffers formats. + * This class provides methods to transform field sort definitions and parameters to ensure proper + * sorting behavior in search operations. + */ +public class FieldSortBuilderProtoUtils { + private FieldSortBuilderProtoUtils() { + // Utility class, no instances + } + + /** + * Converts a Protocol Buffer field sort representation to OpenSearch SortBuilder objects. + * Similar to {@link FieldSortBuilder#fromXContent(XContentParser, String)}, this method + * parses field sort definitions from Protocol Buffers and adds them to the provided list. + * + * @param sortBuilder The list of SortBuilder objects to add the parsed field sorts to + * @param fieldWithOrderMap The Protocol Buffer map containing field names and their sort orders + */ + public static void fromProto(List> sortBuilder, FieldWithOrderMap fieldWithOrderMap) { + for (Map.Entry entry : fieldWithOrderMap.getFieldWithOrderMapMap().entrySet()) { + + String fieldName = entry.getKey(); + ScoreSort scoreSort = entry.getValue(); + + SortOrder order = SortOrderProtoUtils.fromProto(scoreSort.getOrder()); + + sortBuilder.add(fieldOrScoreSort(fieldName).order(order)); + } + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/sort/SortBuilderProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/sort/SortBuilderProtoUtils.java new file mode 100644 index 0000000000000..b2ee89b9b43f2 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/sort/SortBuilderProtoUtils.java @@ -0,0 +1,113 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.request.search.sort; + +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.protobufs.FieldWithOrderMap; +import org.opensearch.protobufs.SortCombinations; +import org.opensearch.search.sort.FieldSortBuilder; +import org.opensearch.search.sort.ScoreSortBuilder; +import org.opensearch.search.sort.SortBuilder; + +import java.util.ArrayList; +import java.util.List; + +/** + * Utility class for converting SortBuilder Protocol Buffers to OpenSearch objects. + * This class provides methods to transform Protocol Buffer representations of sort + * specifications into their corresponding OpenSearch SortBuilder implementations for + * search result sorting. + */ +public class SortBuilderProtoUtils { + + private SortBuilderProtoUtils() { + // Utility class, no instances + } + + /** + * Converts a list of Protocol Buffer SortCombinations to a list of OpenSearch SortBuilder objects. + * Similar to {@link SortBuilder#fromXContent(XContentParser)}, this method + * parses the Protocol Buffer representation and creates properly configured + * SortBuilder instances with the appropriate settings. + * + * @param sortProto The list of Protocol Buffer SortCombinations to convert + * @return A list of configured SortBuilder instances + * @throws IllegalArgumentException if invalid sort combinations are provided + * @throws UnsupportedOperationException if sort options are not yet supported + */ + public static List> fromProto(List sortProto) { + List> sortFields = new ArrayList<>(2); + + for (SortCombinations sortCombinations : sortProto) { + switch (sortCombinations.getSortCombinationsCase()) { + case STRING_VALUE: + String name = sortCombinations.getStringValue(); + sortFields.add(fieldOrScoreSort(name)); + break; + case FIELD_WITH_ORDER_MAP: + FieldWithOrderMap fieldWithOrderMap = sortCombinations.getFieldWithOrderMap(); + FieldSortBuilderProtoUtils.fromProto(sortFields, fieldWithOrderMap); + break; + case SORT_OPTIONS: + throw new UnsupportedOperationException("sort options not supported yet"); + /* + SortOptions sortOptions = sortCombinations.getSortOptions(); + String fieldName; + SortOrder order; + switch(sortOptions.getSortOptionsCase()) { + case SCORE: + fieldName = ScoreSortBuilder.NAME; + order = SortOrderProtoUtils.fromProto(sortOptions.getScore().getOrder()); + // TODO add other fields from ScoreSortBuilder + break; + case DOC: + fieldName = FieldSortBuilder.DOC_FIELD_NAME; + order = SortOrderProtoUtils.fromProto(sortOptions.getDoc().getOrder()); + // TODO add other fields from FieldSortBuilder + break; + case GEO_DISTANCE: + fieldName = GeoDistanceAggregationBuilder.NAME; + order = SortOrderProtoUtils.fromProto(sortOptions.getGeoDistance().getOrder()); + // TODO add other fields from GeoDistanceBuilder + break; + case SCRIPT: + fieldName = ScriptSortBuilder.NAME; + order = SortOrderProtoUtils.fromProto(sortOptions.getScript().getOrder()); + // TODO add other fields from ScriptSortBuilder + break; + default: + throw new IllegalArgumentException("Invalid sort options provided: "+ sortCombinations.getSortOptions().getSortOptionsCase()); + } + // TODO add other fields from ScoreSortBuilder, FieldSortBuilder, GeoDistanceBuilder, ScriptSortBuilder too + sortFields.add(fieldOrScoreSort(fieldName).order(order)); + break; + */ + default: + throw new IllegalArgumentException("Invalid sort combinations provided: " + sortCombinations.getSortCombinationsCase()); + } + } + return sortFields; + } + + /** + * Creates either a ScoreSortBuilder or FieldSortBuilder based on the field name. + * Similar to {@link SortBuilder#fieldOrScoreSort(String)}, this method returns + * a ScoreSortBuilder if the field name is "score", otherwise it returns a + * FieldSortBuilder with the specified field name. + * + * @param fieldName The name of the field to sort by, or "score" for score-based sorting + * @return A SortBuilder instance (either ScoreSortBuilder or FieldSortBuilder) + */ + public static SortBuilder fieldOrScoreSort(String fieldName) { + if (fieldName.equals("score")) { + return new ScoreSortBuilder(); + } else { + return new FieldSortBuilder(fieldName); + } + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/sort/SortOrderProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/sort/SortOrderProtoUtils.java new file mode 100644 index 0000000000000..6dc40dd5b700b --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/sort/SortOrderProtoUtils.java @@ -0,0 +1,89 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.request.search.sort; + +import org.opensearch.protobufs.GeoDistanceSort; +import org.opensearch.protobufs.ScoreSort; +import org.opensearch.protobufs.ScriptSort; +import org.opensearch.search.sort.SortOrder; + +/** + * Utility class for converting SortOrder Protocol Buffers to OpenSearch objects. + * This class provides methods to transform Protocol Buffer representations of sort orders + * from various sort types into their corresponding OpenSearch SortOrder enums for search operations. + */ +public class SortOrderProtoUtils { + + private SortOrderProtoUtils() { + // Utility class, no instances + } + + /** + * Converts a Protocol Buffer ScoreSort.SortOrder to an OpenSearch SortOrder. + * Similar to {@link SortOrder#fromString(String)}, this method maps the Protocol Buffer + * sort order enum values to their corresponding OpenSearch SortOrder values. + * + * @param sortOrder The Protocol Buffer ScoreSort.SortOrder to convert + * @return The corresponding OpenSearch SortOrder + * @throws IllegalArgumentException if the sort order is unspecified or invalid + */ + public static SortOrder fromProto(ScoreSort.SortOrder sortOrder) { + switch (sortOrder) { + case SORT_ORDER_ASC: + return SortOrder.ASC; + case SORT_ORDER_DESC: + return SortOrder.DESC; + case SORT_ORDER_UNSPECIFIED: + default: + throw new IllegalArgumentException("Must provide oneof sort combinations"); + } + } + + /** + * Converts a Protocol Buffer GeoDistanceSort.SortOrder to an OpenSearch SortOrder. + * Similar to {@link SortOrder#fromString(String)}, this method maps the Protocol Buffer + * sort order enum values to their corresponding OpenSearch SortOrder values. + * + * @param sortOrder The Protocol Buffer GeoDistanceSort.SortOrder to convert + * @return The corresponding OpenSearch SortOrder + * @throws IllegalArgumentException if the sort order is unspecified or invalid + */ + public static SortOrder fromProto(GeoDistanceSort.SortOrder sortOrder) { + switch (sortOrder) { + case SORT_ORDER_ASC: + return SortOrder.ASC; + case SORT_ORDER_DESC: + return SortOrder.DESC; + case SORT_ORDER_UNSPECIFIED: + default: + throw new IllegalArgumentException("Must provide oneof sort combinations"); + } + } + + /** + * Converts a Protocol Buffer ScriptSort.SortOrder to an OpenSearch SortOrder. + * Similar to {@link SortOrder#fromString(String)}, this method maps the Protocol Buffer + * sort order enum values to their corresponding OpenSearch SortOrder values. + * + * @param sortOrder The Protocol Buffer ScriptSort.SortOrder to convert + * @return The corresponding OpenSearch SortOrder + * @throws IllegalArgumentException if the sort order is unspecified or invalid + */ + public static SortOrder fromProto(ScriptSort.SortOrder sortOrder) { + switch (sortOrder) { + case SORT_ORDER_ASC: + return SortOrder.ASC; + case SORT_ORDER_DESC: + return SortOrder.DESC; + case SORT_ORDER_UNSPECIFIED: + default: + throw new IllegalArgumentException("Must provide oneof sort combinations"); + } + } + +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/sort/package-info.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/sort/package-info.java new file mode 100644 index 0000000000000..dd6eb970ed52e --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/sort/package-info.java @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains utility classes for converting search sort components between OpenSearch + * and Protocol Buffers formats. These utilities handle the transformation of sort builders, + * sort parameters, and sort configurations to ensure proper communication between gRPC clients + * and the OpenSearch server. + */ +package org.opensearch.plugin.transport.grpc.proto.request.search.sort; diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/suggest/SuggestBuilderProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/suggest/SuggestBuilderProtoUtils.java new file mode 100644 index 0000000000000..5523c24949639 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/suggest/SuggestBuilderProtoUtils.java @@ -0,0 +1,37 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.request.search.suggest; + +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.protobufs.Suggester; +import org.opensearch.search.suggest.SuggestBuilder; + +/** + * Utility class for converting Highlight Protocol Buffers to objects + * + */ +public class SuggestBuilderProtoUtils { + + private SuggestBuilderProtoUtils() { + // Utility class, no instances + } + + /** + * Similar to {@link SuggestBuilder#fromXContent(XContentParser)} + * + * @param suggesterProto + */ + + public static SuggestBuilder fromProto(Suggester suggesterProto) { + SuggestBuilder suggestBuilder = new SuggestBuilder(); + + // TODO populate suggestBuilder + + return suggestBuilder; + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/suggest/TermSuggestionBuilderProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/suggest/TermSuggestionBuilderProtoUtils.java new file mode 100644 index 0000000000000..df496c6c6ffc6 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/suggest/TermSuggestionBuilderProtoUtils.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.search.suggest; + +import org.opensearch.protobufs.SearchRequest; +import org.opensearch.search.suggest.term.TermSuggestionBuilder; + +/** + * Utility class for converting TermSuggestionBuilder components between OpenSearch and Protocol Buffers formats. + * This class provides methods to transform suggestion modes and other term suggestion parameters + * to ensure proper handling of term suggestions in search operations. + */ +public class TermSuggestionBuilderProtoUtils { + private TermSuggestionBuilderProtoUtils() { + // Utility class, no instances + } + + /** + * Resolves a Protocol Buffer SuggestMode to a TermSuggestionBuilder.SuggestMode. + * Similar to {@link TermSuggestionBuilder.SuggestMode#resolve(String)} + * + * @param suggest_mode the Protocol Buffer SuggestMode to resolve + * @return the corresponding TermSuggestionBuilder.SuggestMode + * @throws IllegalArgumentException if the suggest_mode is invalid + */ + public static TermSuggestionBuilder.SuggestMode resolve(final SearchRequest.SuggestMode suggest_mode) { + switch (suggest_mode) { + case SUGGEST_MODE_ALWAYS: + return TermSuggestionBuilder.SuggestMode.ALWAYS; + case SUGGEST_MODE_MISSING: + return TermSuggestionBuilder.SuggestMode.MISSING; + case SUGGEST_MODE_POPULAR: + return TermSuggestionBuilder.SuggestMode.POPULAR; + default: + throw new IllegalArgumentException("Invalid suggest_mode " + suggest_mode.toString()); + } + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/suggest/package-info.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/suggest/package-info.java new file mode 100644 index 0000000000000..b2493fbf27f18 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/suggest/package-info.java @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains utility classes for converting search suggestion components between OpenSearch + * and Protocol Buffers formats. These utilities handle the transformation of suggestion builders, + * suggestion parameters, and suggestion configurations to ensure proper communication between gRPC clients + * and the OpenSearch server. + */ +package org.opensearch.plugin.transport.grpc.proto.request.search.suggest; diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/common/FieldValueProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/common/FieldValueProtoUtils.java new file mode 100644 index 0000000000000..61524011af950 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/common/FieldValueProtoUtils.java @@ -0,0 +1,71 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.response.common; + +import org.opensearch.protobufs.FieldValue; +import org.opensearch.protobufs.GeneralNumber; +import org.opensearch.protobufs.ObjectMap; + +import java.util.Map; + +/** + * Utility class for converting generic Java objects to google.protobuf.Struct Protobuf type. + */ +public class FieldValueProtoUtils { + + private FieldValueProtoUtils() { + // Utility class, no instances + } + + /** + * Converts a generic Java Object to its Protocol Buffer representation. + * + * @param javaObject The java object to convert + * @return A Protobuf builder .google.protobuf.Struct representation + */ + public static FieldValue toProto(Object javaObject) { + FieldValue.Builder fieldValueBuilder = FieldValue.newBuilder(); + + if (javaObject instanceof Integer) { + // Integer + fieldValueBuilder.setGeneralNumber(GeneralNumber.newBuilder().setInt32Value((int) javaObject).build()); + } else if (javaObject instanceof Long) { + // Long + fieldValueBuilder.setGeneralNumber(GeneralNumber.newBuilder().setInt64Value((long) javaObject).build()); + } else if (javaObject instanceof Double) { + // Double + fieldValueBuilder.setGeneralNumber(GeneralNumber.newBuilder().setDoubleValue((double) javaObject).build()); + } else if (javaObject instanceof Float) { + // Float + fieldValueBuilder.setGeneralNumber(GeneralNumber.newBuilder().setFloatValue((float) javaObject).build()); + } else if (javaObject instanceof String) { + // String + fieldValueBuilder.setStringValue((String) javaObject); + } else if (javaObject instanceof Boolean) { + // Boolean + fieldValueBuilder.setBoolValue((Boolean) javaObject); + } else if (javaObject instanceof Enum) { + // Enum + fieldValueBuilder.setStringValue(javaObject.toString()); + } else if (javaObject instanceof Map) { + // Map + ObjectMap.Builder objectMapBuilder = ObjectMap.newBuilder(); + + @SuppressWarnings("unchecked") + Map fieldMap = (Map) javaObject; + for (Map.Entry entry : fieldMap.entrySet()) { + objectMapBuilder.putFields(entry.getKey(), ObjectMapProtoUtils.toProto(entry.getValue())); + } + fieldValueBuilder.setObjectMap(objectMapBuilder.build()); + } else { + throw new IllegalArgumentException("Cannot convert " + javaObject.toString() + " to google.protobuf.Struct"); + } + + return fieldValueBuilder.build(); + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/bulk/BulkItemResponseProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/bulk/BulkItemResponseProtoUtils.java index 16a611e5b3113..b9d6f420c1553 100644 --- a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/bulk/BulkItemResponseProtoUtils.java +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/bulk/BulkItemResponseProtoUtils.java @@ -13,9 +13,9 @@ import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.get.GetResult; -import org.opensearch.plugin.transport.grpc.proto.response.common.OpenSearchExceptionProtoUtils; import org.opensearch.plugin.transport.grpc.proto.response.document.common.DocWriteResponseProtoUtils; import org.opensearch.plugin.transport.grpc.proto.response.document.get.GetResultProtoUtils; +import org.opensearch.plugin.transport.grpc.proto.response.exceptions.opensearchexception.OpenSearchExceptionProtoUtils; import org.opensearch.protobufs.ErrorCause; import org.opensearch.protobufs.Item; import org.opensearch.protobufs.NullValue; diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/ShardInfoProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/ShardInfoProtoUtils.java index d8308216bc659..62499d5b235f2 100644 --- a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/ShardInfoProtoUtils.java +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/ShardInfoProtoUtils.java @@ -10,7 +10,7 @@ import org.opensearch.action.support.replication.ReplicationResponse; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.plugin.transport.grpc.proto.response.common.OpenSearchExceptionProtoUtils; +import org.opensearch.plugin.transport.grpc.proto.response.exceptions.opensearchexception.OpenSearchExceptionProtoUtils; import org.opensearch.protobufs.ShardFailure; import org.opensearch.protobufs.ShardInfo; diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/common/OpenSearchExceptionProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/opensearchexception/OpenSearchExceptionProtoUtils.java similarity index 97% rename from plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/common/OpenSearchExceptionProtoUtils.java rename to plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/opensearchexception/OpenSearchExceptionProtoUtils.java index 69720ae86c43f..f57a9049ddb6d 100644 --- a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/common/OpenSearchExceptionProtoUtils.java +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/opensearchexception/OpenSearchExceptionProtoUtils.java @@ -5,7 +5,7 @@ * this file be licensed under the Apache-2.0 license or a * compatible open source license. */ -package org.opensearch.plugin.transport.grpc.proto.response.common; +package org.opensearch.plugin.transport.grpc.proto.response.exceptions.opensearchexception; import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; @@ -43,9 +43,7 @@ import static org.opensearch.OpenSearchException.getExceptionName; /** - * Utility class for converting Exception objects to Protocol Buffers. - * This class handles the conversion of OpenSearchException and other Throwable instances - * to their Protocol Buffer representation. + * Utility class for converting OpenSearchException objects to Protocol Buffers. */ public class OpenSearchExceptionProtoUtils { diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/opensearchexception/package-info.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/opensearchexception/package-info.java new file mode 100644 index 0000000000000..3a2b7145603de --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/opensearchexception/package-info.java @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains utility classes for converting OpenSearch exceptions between OpenSearch + * and Protocol Buffers formats. These utilities handle the transformation of general exception details, + * error messages, and stack traces to ensure proper error reporting between the OpenSearch + * server and gRPC clients. + */ +package org.opensearch.plugin.transport.grpc.proto.response.exceptions.opensearchexception; diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/shardoperationfailedexception/DefaultShardOperationFailedExceptionProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/shardoperationfailedexception/DefaultShardOperationFailedExceptionProtoUtils.java new file mode 100644 index 0000000000000..2b7361233e1b3 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/shardoperationfailedexception/DefaultShardOperationFailedExceptionProtoUtils.java @@ -0,0 +1,118 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.response.exceptions.shardoperationfailedexception; + +import org.opensearch.action.admin.indices.close.CloseIndexResponse; +import org.opensearch.action.admin.indices.readonly.AddIndexBlockResponse; +import org.opensearch.action.admin.indices.shards.IndicesShardStoresResponse; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.plugin.transport.grpc.proto.response.exceptions.opensearchexception.OpenSearchExceptionProtoUtils; +import org.opensearch.protobufs.ShardFailure; + +import java.io.IOException; + +/** + * Utility class for converting DefaultShardOperationFailedException objects to Protocol Buffers. + */ +public class DefaultShardOperationFailedExceptionProtoUtils { + + private DefaultShardOperationFailedExceptionProtoUtils() { + // Utility class, no instances + } + + /** + * Converts the metadata from a DefaultShardOperationFailedException to a Protocol Buffer Struct. + * Similar to {@link DefaultShardOperationFailedException#toXContent(XContentBuilder, ToXContent.Params)} * + * This method is overridden by various exception classes, which are hardcoded here. + * + * @param exception The DefaultShardOperationFailedException to convert + * @return A Protocol Buffer Struct containing the exception metadata + */ + public static ShardFailure toProto(DefaultShardOperationFailedException exception) throws IOException { + ShardFailure.Builder shardFailureBuilder = ShardFailure.newBuilder(); + + if (exception instanceof AddIndexBlockResponse.AddBlockShardResult.Failure) { + innerToProto(shardFailureBuilder, (AddIndexBlockResponse.AddBlockShardResult.Failure) exception); + } else if (exception instanceof IndicesShardStoresResponse.Failure) { + innerToProto(shardFailureBuilder, (IndicesShardStoresResponse.Failure) exception); + } else if (exception instanceof CloseIndexResponse.ShardResult.Failure) { + innerToProto(shardFailureBuilder, (CloseIndexResponse.ShardResult.Failure) exception); + } else { + parentInnerToProto(shardFailureBuilder, exception); + } + return shardFailureBuilder.build(); + } + + /** + * Converts the metadata from a AddIndexBlockResponse.AddBlockShardResult.Failure to a Protocol Buffer Struct. + * Similar to {@link AddIndexBlockResponse.AddBlockShardResult.Failure#innerToXContent(XContentBuilder, ToXContent.Params)} + * + * @param shardFailureBuilder the builder to populate with failure information + * @param exception The AddIndexBlockResponse.AddBlockShardResult.Failure to convert + * @throws IOException if there's an error during conversion + */ + public static void innerToProto(ShardFailure.Builder shardFailureBuilder, AddIndexBlockResponse.AddBlockShardResult.Failure exception) + throws IOException { + if (exception.getNodeId() != null) { + shardFailureBuilder.setNode(exception.getNodeId()); + } + parentInnerToProto(shardFailureBuilder, exception); + } + + /** + * Converts the metadata from a IndicesShardStoresResponse.Failure to a Protocol Buffer Struct. + * Similar to {@link IndicesShardStoresResponse.Failure#innerToXContent(XContentBuilder, ToXContent.Params)} + * + * @param shardFailureBuilder the builder to populate with failure information + * @param exception The IndicesShardStoresResponse.Failure to convert + * @throws IOException if there's an error during conversion + */ + public static void innerToProto(ShardFailure.Builder shardFailureBuilder, IndicesShardStoresResponse.Failure exception) + throws IOException { + shardFailureBuilder.setNode(exception.nodeId()); + parentInnerToProto(shardFailureBuilder, exception); + } + + /** + * Converts the metadata from a CloseIndexResponse.ShardResult.Failure to a Protocol Buffer Struct. + * Similar to {@link CloseIndexResponse.ShardResult.Failure#innerToXContent(XContentBuilder, ToXContent.Params)} + * + * @param shardFailureBuilder the builder to populate with failure information + * @param exception The CloseIndexResponse.ShardResult.Failure to convert + * @throws IOException if there's an error during conversion + */ + public static void innerToProto(ShardFailure.Builder shardFailureBuilder, CloseIndexResponse.ShardResult.Failure exception) + throws IOException { + if (exception.getNodeId() != null) { + shardFailureBuilder.setNode(exception.getNodeId()); + } + parentInnerToProto(shardFailureBuilder, exception); + } + + /** + * Converts the metadata from a DefaultShardOperationFailedException to a Protocol Buffer Struct. + * Similar to {@link DefaultShardOperationFailedException#innerToXContent(XContentBuilder, ToXContent.Params)} + * + * @param shardFailureBuilder the builder to populate with failure information + * @param exception The DefaultShardOperationFailedException to convert + * @throws IOException if there's an error during conversion + */ + public static void parentInnerToProto(ShardFailure.Builder shardFailureBuilder, DefaultShardOperationFailedException exception) + throws IOException { + shardFailureBuilder.setShard(exception.shardId()); + if (exception.index() != null) { + shardFailureBuilder.setIndex(exception.index()); + } + shardFailureBuilder.setStatus(exception.status().name()); + if (exception.reason() != null) { + shardFailureBuilder.setReason(OpenSearchExceptionProtoUtils.generateThrowableProto(exception.getCause())); + } + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/shardoperationfailedexception/ReplicationResponseShardInfoFailureProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/shardoperationfailedexception/ReplicationResponseShardInfoFailureProtoUtils.java new file mode 100644 index 0000000000000..0853f9d2137e4 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/shardoperationfailedexception/ReplicationResponseShardInfoFailureProtoUtils.java @@ -0,0 +1,48 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.response.exceptions.shardoperationfailedexception; + +import org.opensearch.action.support.replication.ReplicationResponse; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.plugin.transport.grpc.proto.response.exceptions.opensearchexception.OpenSearchExceptionProtoUtils; +import org.opensearch.protobufs.ShardFailure; + +import java.io.IOException; + +/** + * Utility class for converting Exception objects to Protocol Buffers. + */ +public class ReplicationResponseShardInfoFailureProtoUtils { + + private ReplicationResponseShardInfoFailureProtoUtils() { + // Utility class, no instances + } + + /** + * This method is similar to {@link ReplicationResponse.ShardInfo.Failure#toXContent(XContentBuilder, ToXContent.Params)} + * This method is overridden by various exception classes, which are hardcoded here. + * + * @param exception The ReplicationResponse.ShardInfo.Failure to convert metadata from + * @return A map containing the exception's metadata as ObjectMap.Value objects + */ + public static ShardFailure toProto(ReplicationResponse.ShardInfo.Failure exception) throws IOException { + ShardFailure.Builder shardFailure = ShardFailure.newBuilder(); + if (exception.index() != null) { + shardFailure.setIndex(exception.index()); + } + shardFailure.setShard(exception.shardId()); + if (exception.nodeId() != null) { + shardFailure.setNode(exception.nodeId()); + } + shardFailure.setReason(OpenSearchExceptionProtoUtils.generateThrowableProto(exception.getCause())); + shardFailure.setStatus(exception.status().name()); + shardFailure.setPrimary(exception.primary()); + return shardFailure.build(); + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/shardoperationfailedexception/ShardOperationFailedExceptionProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/shardoperationfailedexception/ShardOperationFailedExceptionProtoUtils.java new file mode 100644 index 0000000000000..c5a26930d9300 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/shardoperationfailedexception/ShardOperationFailedExceptionProtoUtils.java @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.response.exceptions.shardoperationfailedexception; + +import org.opensearch.action.search.ShardSearchFailure; +import org.opensearch.action.support.replication.ReplicationResponse; +import org.opensearch.core.action.ShardOperationFailedException; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.protobufs.ShardFailure; +import org.opensearch.snapshots.SnapshotShardFailure; + +import java.io.IOException; + +/** + * Utility class for converting ShardOperationFailedException objects to Protocol Buffers. + */ +public class ShardOperationFailedExceptionProtoUtils { + + private ShardOperationFailedExceptionProtoUtils() { + // Utility class, no instances + } + + /** + * This method is similar to {@link org.opensearch.core.action.ShardOperationFailedException#toXContent(XContentBuilder, ToXContent.Params)} + * This method is overridden by various exception classes, which are hardcoded here. + * + * @param exception The ShardOperationFailedException to convert metadata from + * @return ShardFailure + */ + public static ShardFailure toProto(ShardOperationFailedException exception) throws IOException { + if (exception instanceof ShardSearchFailure) { + return ShardSearchFailureProtoUtils.toProto((ShardSearchFailure) exception); + } else if (exception instanceof SnapshotShardFailure) { + return SnapshotShardFailureProtoUtils.toProto((SnapshotShardFailure) exception); + } else if (exception instanceof DefaultShardOperationFailedException) { + return DefaultShardOperationFailedExceptionProtoUtils.toProto((DefaultShardOperationFailedException) exception); + } else if (exception instanceof ReplicationResponse.ShardInfo.Failure) { + return ReplicationResponseShardInfoFailureProtoUtils.toProto((ReplicationResponse.ShardInfo.Failure) exception); + } else { + throw new UnsupportedOperationException( + "Unsupported ShardOperationFailedException " + exception.getClass().getName() + "cannot be converted to proto." + ); + } + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/shardoperationfailedexception/ShardSearchFailureProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/shardoperationfailedexception/ShardSearchFailureProtoUtils.java new file mode 100644 index 0000000000000..748e6a38089b9 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/shardoperationfailedexception/ShardSearchFailureProtoUtils.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.response.exceptions.shardoperationfailedexception; + +import org.opensearch.action.search.ShardSearchFailure; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.plugin.transport.grpc.proto.response.exceptions.opensearchexception.OpenSearchExceptionProtoUtils; +import org.opensearch.protobufs.ShardFailure; + +import java.io.IOException; + +/** + * Utility class for converting ShardSearchFailure objects to Protocol Buffers. + */ +public class ShardSearchFailureProtoUtils { + + private ShardSearchFailureProtoUtils() { + // Utility class, no instances + } + + /** + * Converts the metadata from a ShardSearchFailure to a Protocol Buffer Struct. + * Similar to {@link ShardSearchFailure#toXContent(XContentBuilder, ToXContent.Params)} * + * + * @param exception The ShardSearchFailure to convert + * @return A Protocol Buffer Struct containing the exception metadata + */ + public static ShardFailure toProto(ShardSearchFailure exception) throws IOException { + ShardFailure.Builder shardFailure = ShardFailure.newBuilder(); + shardFailure.setShard(exception.shardId()); + shardFailure.setIndex(exception.index()); + if (exception.shard() != null && exception.shard().getNodeId() != null) { + shardFailure.setNode(exception.shard().getNodeId()); + } + shardFailure.setReason(OpenSearchExceptionProtoUtils.generateThrowableProto(exception.getCause())); + return shardFailure.build(); + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/shardoperationfailedexception/SnapshotShardFailureProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/shardoperationfailedexception/SnapshotShardFailureProtoUtils.java new file mode 100644 index 0000000000000..e3419b0e974fa --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/shardoperationfailedexception/SnapshotShardFailureProtoUtils.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.response.exceptions.shardoperationfailedexception; + +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.protobufs.ShardFailure; +import org.opensearch.snapshots.SnapshotShardFailure; + +/** + * Utility class for converting SnapshotShardFailure objects to Protocol Buffers. + */ +public class SnapshotShardFailureProtoUtils { + + private SnapshotShardFailureProtoUtils() { + // Utility class, no instances + } + + /** + * Converts the metadata from a SnapshotShardFailure to a Protocol Buffer Struct. + * Similar to {@link SnapshotShardFailure#toXContent(XContentBuilder, ToXContent.Params)} * + * + * @param exception The SnapshotShardFailure to convert + * @return A Protocol Buffer Struct containing the exception metadata + */ + public static ShardFailure toProto(SnapshotShardFailure exception) { + ShardFailure.Builder shardFailure = ShardFailure.newBuilder(); + shardFailure.setIndex(exception.index()); + // shardFailure.setIndexUuid(exception.index()); // TODO no field called index_uuid in ShardFailure protos + shardFailure.setShard(exception.shardId()); + // shardFailure.setReason(exception.reason()); // TODO ErrorCause type in ShardFailure, not string + shardFailure.setIndex(exception.index()); + shardFailure.setNode(exception.nodeId()); + shardFailure.setStatus(exception.status().name()); + return shardFailure.build(); + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/shardoperationfailedexception/package-info.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/shardoperationfailedexception/package-info.java new file mode 100644 index 0000000000000..50076536b5be9 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/shardoperationfailedexception/package-info.java @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains utility classes for converting shard operation failed exceptions between OpenSearch + * and Protocol Buffers formats. These utilities handle the transformation of exception details, + * error messages, and stack traces to ensure proper error reporting between the OpenSearch + * server and gRPC clients. + */ +package org.opensearch.plugin.transport.grpc.proto.response.exceptions.shardoperationfailedexception; diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/HighlightFieldProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/HighlightFieldProtoUtils.java new file mode 100644 index 0000000000000..da41a124205f0 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/HighlightFieldProtoUtils.java @@ -0,0 +1,41 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.response.search; + +import org.opensearch.core.common.text.Text; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.protobufs.StringArray; +import org.opensearch.search.fetch.subphase.highlight.HighlightField; + +/** + * Utility class for converting HighlightField objects to Protocol Buffers. + * This class handles the conversion of document get operation results to their + * Protocol Buffer representation. + */ +public class HighlightFieldProtoUtils { + + private HighlightFieldProtoUtils() { + // Utility class, no instances + } + + /** + * Converts a HighlightField values (list of objects) to its Protocol Buffer representation. + * This method is equivalent to the {@link HighlightField#toXContent(XContentBuilder, ToXContent.Params)} + * + * @param fragments The list of HighlightField values to convert + * @return A Protobuf Value representation + */ + protected static StringArray toProto(Text[] fragments) { + StringArray.Builder stringArrayBuilder = StringArray.newBuilder(); + for (Text text : fragments) { + stringArrayBuilder.addStringArray(text.string()); + } + return stringArrayBuilder.build(); + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/ProtoActionsProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/ProtoActionsProtoUtils.java new file mode 100644 index 0000000000000..7c07af2a4357c --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/ProtoActionsProtoUtils.java @@ -0,0 +1,54 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.response.search; + +import org.opensearch.core.action.ShardOperationFailedException; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.protobufs.ResponseBody; +import org.opensearch.rest.action.RestActions; + +import java.io.IOException; + +/** + * Utility class for converting REST-like actions between OpenSearch and Protocol Buffers formats. + * This class provides methods to transform response components such as shard statistics and + * broadcast headers to ensure proper communication between the OpenSearch server and gRPC clients. + */ +public class ProtoActionsProtoUtils { + + private ProtoActionsProtoUtils() { + // Utility class, no instances + } + + /** + * Similar to {@link RestActions#buildBroadcastShardsHeader(XContentBuilder, ToXContent.Params, int, int, int, int, ShardOperationFailedException[])} + * + * @param searchResponseBodyProtoBuilder the response body builder to populate with shard statistics + * @param total the total number of shards + * @param successful the number of successful shards + * @param skipped the number of skipped shards + * @param failed the number of failed shards + * @param shardFailures the array of shard operation failures + * @throws IOException if there's an error during conversion + */ + protected static void buildBroadcastShardsHeader( + ResponseBody.Builder searchResponseBodyProtoBuilder, + int total, + int successful, + int skipped, + int failed, + ShardOperationFailedException[] shardFailures + ) throws IOException { + searchResponseBodyProtoBuilder.setShards( + ShardStatisticsProtoUtils.getShardStats(total, successful, skipped, failed, shardFailures) + ); + + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/SearchHitProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/SearchHitProtoUtils.java new file mode 100644 index 0000000000000..2aa49201a28ad --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/SearchHitProtoUtils.java @@ -0,0 +1,219 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.response.search; + +import com.google.protobuf.ByteString; +import org.apache.lucene.search.Explanation; +import org.opensearch.common.document.DocumentField; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.plugin.transport.grpc.proto.response.common.ObjectMapProtoUtils; +import org.opensearch.protobufs.InnerHitsResult; +import org.opensearch.protobufs.NestedIdentity; +import org.opensearch.protobufs.NullValue; +import org.opensearch.protobufs.ObjectMap; +import org.opensearch.search.SearchHit; +import org.opensearch.search.SearchHits; +import org.opensearch.search.fetch.subphase.highlight.HighlightField; +import org.opensearch.transport.RemoteClusterAware; + +import java.io.IOException; +import java.util.Map; + +/** + * Utility class for converting SearchResponse objects to Protocol Buffers. + * This class handles the conversion of search operation responses to their + * Protocol Buffer representation. + */ +public class SearchHitProtoUtils { + + private SearchHitProtoUtils() { + // Utility class, no instances + } + + /** + * Converts a SearchHit to its Protocol Buffer representation. + * This method is equivalent to {@link SearchHit#toXContent(XContentBuilder, ToXContent.Params)} + * + * @param hit The SearchHit to convert + * @return A Protocol Buffer Hit representation + * @throws IOException if there's an error during conversion + */ + protected static org.opensearch.protobufs.Hit toProto(SearchHit hit) throws IOException { + return toInnerProto(hit); + } + + /** + * Converts a SearchHit to its Protocol Buffer representation. + * Similar to {@link SearchHit#toInnerXContent(XContentBuilder, ToXContent.Params)} + * + * @param hit The SearchHit to convert + * @return A Protocol Buffer Hit representation + * @throws IOException if there's an error during conversion + */ + protected static org.opensearch.protobufs.Hit toInnerProto(SearchHit hit) throws IOException { + org.opensearch.protobufs.Hit.Builder hitBuilder = org.opensearch.protobufs.Hit.newBuilder(); + + // For inner_hit hits shard is null and that is ok, because the parent search hit has all this information. + // Even if this was included in the inner_hit hits this would be the same, so better leave it out. + if (hit.getExplanation() != null && hit.getShard() != null) { + hitBuilder.setShard(String.valueOf(hit.getShard().getShardId().id())); + hitBuilder.setNode(hit.getShard().getNodeIdText().string()); + } + + if (hit.getIndex() != null) { + hitBuilder.setIndex(RemoteClusterAware.buildRemoteIndexName(hit.getClusterAlias(), hit.getIndex())); + } + + if (hit.getId() != null) { + hitBuilder.setId(hit.getId()); + } + + if (hit.getNestedIdentity() != null) { + hitBuilder.setNested(NestedIdentityProtoUtils.toProto(hit.getNestedIdentity())); + } + + if (hit.getVersion() != -1) { + hitBuilder.setVersion(hit.getVersion()); + } + + if (hit.getSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) { + hitBuilder.setSeqNo(hit.getSeqNo()); + hitBuilder.setPrimaryTerm(hit.getPrimaryTerm()); + } + + if (Float.isNaN(hit.getScore())) { + hitBuilder.setScore(org.opensearch.protobufs.Hit.Score.newBuilder().setNullValue(NullValue.NULL_VALUE_NULL).build()); + } else { + hitBuilder.setScore(org.opensearch.protobufs.Hit.Score.newBuilder().setFloatValue(hit.getScore()).build()); + } + + ObjectMap.Builder objectMapBuilder = ObjectMap.newBuilder(); + for (DocumentField field : hit.getMetaFields().values()) { + // ignore empty metadata fields + if (field.getValues().isEmpty()) { + continue; + } + + objectMapBuilder.putFields(field.getName(), ObjectMapProtoUtils.toProto(field.getValues())); + } + hitBuilder.setMetaFields(objectMapBuilder.build()); + + if (hit.getSourceRef() != null) { + hitBuilder.setSource(ByteString.copyFrom(BytesReference.toBytes(hit.getSourceRef()))); + } + if (!hit.getDocumentFields().isEmpty() && + // ignore fields all together if they are all empty + hit.getDocumentFields().values().stream().anyMatch(df -> !df.getValues().isEmpty())) { + ObjectMap.Builder fieldsStructBuilder = ObjectMap.newBuilder(); + for (DocumentField field : hit.getDocumentFields().values()) { + if (!field.getValues().isEmpty()) { + fieldsStructBuilder.putFields(field.getName(), ObjectMapProtoUtils.toProto(field.getValues())); + } + } + hitBuilder.setFields(fieldsStructBuilder.build()); + } + if (hit.getHighlightFields() != null && !hit.getHighlightFields().isEmpty()) { + for (HighlightField field : hit.getHighlightFields().values()) { + hitBuilder.putHighlight(field.getName(), HighlightFieldProtoUtils.toProto(field.getFragments())); + } + } + SearchSortValuesProtoUtils.toProto(hitBuilder, hit.getSortValues()); + if (hit.getMatchedQueries().length > 0) { + // TODO pass params in + // boolean includeMatchedQueriesScore = params.paramAsBoolean(RestSearchAction.INCLUDE_NAMED_QUERIES_SCORE_PARAM, false); + boolean includeMatchedQueriesScore = false; + if (includeMatchedQueriesScore) { + // TODO map type is missing in spec + // for (Map.Entry entry : matchedQueries.entrySet()) { + // hitBuilder.putMatchedqueires(entry.getKey(), entry.getValue()); + } else { + for (String matchedFilter : hit.getMatchedQueries()) { + hitBuilder.addMatchedQueries(matchedFilter); + } + } + } + if (hit.getExplanation() != null) { + hitBuilder.setExplanation(buildExplanation(hit.getExplanation())); + } + if (hit.getInnerHits() != null) { + for (Map.Entry entry : hit.getInnerHits().entrySet()) { + hitBuilder.putInnerHits( + entry.getKey(), + InnerHitsResult.newBuilder().setHits(SearchHitsProtoUtils.toProto(entry.getValue())).build() + ); + } + } + return hitBuilder.build(); + } + + private static org.opensearch.protobufs.Explanation buildExplanation(org.apache.lucene.search.Explanation explanation) + throws IOException { + org.opensearch.protobufs.Explanation.Builder protoExplanationBuilder = org.opensearch.protobufs.Explanation.newBuilder(); + protoExplanationBuilder.setValue(explanation.getValue().doubleValue()); + protoExplanationBuilder.setDescription(explanation.getDescription()); + + org.apache.lucene.search.Explanation[] innerExps = explanation.getDetails(); + if (innerExps != null) { + for (Explanation exp : innerExps) { + protoExplanationBuilder.addDetails(buildExplanation(exp)); + } + } + return protoExplanationBuilder.build(); + } + + /** + * Utility class for converting NestedIdentity components between OpenSearch and Protocol Buffers formats. + * This class handles the transformation of nested document identity information to ensure proper + * representation of nested search hits. + */ + protected static class NestedIdentityProtoUtils { + /** + * Private constructor to prevent instantiation. + * This is a utility class with only static methods. + */ + private NestedIdentityProtoUtils() { + // Utility class, no instances + } + + /** + * Converts a SearchHit.NestedIdentity to its Protocol Buffer representation. + * Similar to {@link SearchHit.NestedIdentity#toXContent(XContentBuilder, ToXContent.Params)} + * + * @param nestedIdentity The NestedIdentity to convert + * @return A Protocol Buffer NestedIdentity representation + */ + protected static NestedIdentity toProto(SearchHit.NestedIdentity nestedIdentity) { + return innerToProto(nestedIdentity); + } + + /** + * Converts a SearchHit.NestedIdentity to its Protocol Buffer representation. + * Similar to {@link SearchHit.NestedIdentity#innerToXContent(XContentBuilder, ToXContent.Params)} + * + * @param nestedIdentity The NestedIdentity to convert + * @return A Protocol Buffer NestedIdentity representation + */ + protected static NestedIdentity innerToProto(SearchHit.NestedIdentity nestedIdentity) { + NestedIdentity.Builder nestedIdentityBuilder = NestedIdentity.newBuilder(); + if (nestedIdentity.getField() != null) { + nestedIdentityBuilder.setField(nestedIdentity.getField().string()); + } + if (nestedIdentity.getOffset() != -1) { + nestedIdentityBuilder.setOffset(nestedIdentity.getOffset()); + } + if (nestedIdentity.getChild() != null) { + nestedIdentityBuilder.setNested(toProto(nestedIdentity.getChild())); + } + + return nestedIdentityBuilder.build(); + } + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/SearchHitsProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/SearchHitsProtoUtils.java new file mode 100644 index 0000000000000..a7e4e52d98bd5 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/SearchHitsProtoUtils.java @@ -0,0 +1,76 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.response.search; + +import org.apache.lucene.search.TotalHits; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.protobufs.NullValue; +import org.opensearch.search.SearchHit; +import org.opensearch.search.SearchHits; + +import java.io.IOException; + +/** + * Utility class for converting SearchHits objects to Protocol Buffers. + * This class handles the conversion of search operation responses to their + * Protocol Buffer representation. + */ +public class SearchHitsProtoUtils { + + private SearchHitsProtoUtils() { + // Utility class, no instances + } + + /** + * Converts a SearchHits to its Protocol Buffer representation. + * This method is equivalent to {@link SearchHits#toXContent(XContentBuilder, ToXContent.Params)} + * + * @param hits The SearchHits to convert + * @return A Protocol Buffer HitsMetadata representation + * @throws IOException if there's an error during conversion + */ + protected static org.opensearch.protobufs.HitsMetadata toProto(SearchHits hits) throws IOException { + + org.opensearch.protobufs.HitsMetadata.Builder hitsMetaData = org.opensearch.protobufs.HitsMetadata.newBuilder(); + + org.opensearch.protobufs.HitsMetadata.Total.Builder totalBuilder = org.opensearch.protobufs.HitsMetadata.Total.newBuilder(); + + // TODO need to pass parameters + // boolean totalHitAsInt = params.paramAsBoolean(RestSearchAction.TOTAL_HITS_AS_INT_PARAM, false); + boolean totalHitAsInt = false; + if (totalHitAsInt) { + long total = hits.getTotalHits() == null ? -1 : hits.getTotalHits().value(); + totalBuilder.setDoubleValue(total); + } else if (hits.getTotalHits() != null) { + org.opensearch.protobufs.TotalHits.Builder totalHitsBuilder = org.opensearch.protobufs.TotalHits.newBuilder(); + totalHitsBuilder.setValue(hits.getTotalHits().value()); + totalHitsBuilder.setRelation( + hits.getTotalHits().relation() == TotalHits.Relation.EQUAL_TO + ? org.opensearch.protobufs.TotalHits.TotalHitsRelation.TOTAL_HITS_RELATION_EQ + : org.opensearch.protobufs.TotalHits.TotalHitsRelation.TOTAL_HITS_RELATION_GTE + ); + totalBuilder.setTotalHits(totalHitsBuilder.build()); + } + + hitsMetaData.setTotal(totalBuilder.build()); + + org.opensearch.protobufs.HitsMetadata.MaxScore.Builder maxScoreBuilder = org.opensearch.protobufs.HitsMetadata.MaxScore + .newBuilder(); + if (Float.isNaN(hits.getMaxScore())) { + hitsMetaData.setMaxScore(maxScoreBuilder.setNullValue(NullValue.NULL_VALUE_NULL).build()).build(); + } else { + hitsMetaData.setMaxScore(maxScoreBuilder.setFloatValue(hits.getMaxScore()).build()).build(); + } + for (SearchHit h : hits) { + hitsMetaData.addHits(SearchHitProtoUtils.toProto(h)); + } + + return hitsMetaData.build(); + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/SearchResponseProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/SearchResponseProtoUtils.java new file mode 100644 index 0000000000000..72fcbf2ef8737 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/SearchResponseProtoUtils.java @@ -0,0 +1,186 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.response.search; + +import org.opensearch.action.search.SearchPhaseName; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.protobufs.ClusterStatistics; + +import java.io.IOException; + +/** + * Utility class for converting SearchResponse objects to Protocol Buffers. + * This class handles the conversion of search operation responses to their + * Protocol Buffer representation. + */ +public class SearchResponseProtoUtils { + + private SearchResponseProtoUtils() { + // Utility class, no instances + } + + /** + * Converts a SearchResponse to its Protocol Buffer representation. + * This method is equivalent to {@link SearchResponse#toXContent(XContentBuilder, ToXContent.Params)} + * + * @param response The SearchResponse to convert + * @return A Protocol Buffer SearchResponse representation + * @throws IOException if there's an error during conversion + */ + public static org.opensearch.protobufs.SearchResponse toProto(SearchResponse response) throws IOException { + return innerToProto(response); + } + + /** + * Similar to {@link SearchResponse#innerToXContent(XContentBuilder, ToXContent.Params)} + */ + private static org.opensearch.protobufs.SearchResponse innerToProto(SearchResponse response) throws IOException { + org.opensearch.protobufs.SearchResponse.Builder searchResponseProtoBuilder = org.opensearch.protobufs.SearchResponse.newBuilder(); + org.opensearch.protobufs.ResponseBody.Builder searchResponseBodyProtoBuilder = org.opensearch.protobufs.ResponseBody.newBuilder(); + + if (response.getScrollId() != null) { + searchResponseBodyProtoBuilder.setScrollId(response.getScrollId()); + } + if (response.pointInTimeId() != null) { + searchResponseBodyProtoBuilder.setPitId(response.pointInTimeId()); + } + + searchResponseBodyProtoBuilder.setTook(response.getTook().getMillis()); + + if (response.getPhaseTook() != null) { + searchResponseBodyProtoBuilder.setPhaseTook(PhaseTookProtoUtils.toProto(response.getPhaseTook())); + } + + searchResponseBodyProtoBuilder.setTimedOut(response.isTimedOut()); + + if (response.isTerminatedEarly() != null) { + searchResponseBodyProtoBuilder.setTerminatedEarly(response.isTerminatedEarly()); + } + if (response.getNumReducePhases() != 1) { + searchResponseBodyProtoBuilder.setNumReducePhases(response.getNumReducePhases()); + } + + ProtoActionsProtoUtils.buildBroadcastShardsHeader( + searchResponseBodyProtoBuilder, + response.getTotalShards(), + response.getSuccessfulShards(), + response.getSkippedShards(), + response.getFailedShards(), + response.getShardFailures() + ); + + ClustersProtoUtils.toProto(searchResponseBodyProtoBuilder, response.getClusters()); + SearchResponseSectionsProtoUtils.toProto(searchResponseBodyProtoBuilder, response); + + searchResponseProtoBuilder.setResponseBody(searchResponseBodyProtoBuilder.build()); + + return searchResponseProtoBuilder.build(); + } + + /** + * Utility class for converting PhaseTook components between OpenSearch and Protocol Buffers formats. + * This class handles the transformation of phase timing information to ensure proper reporting + * of search phase execution times. + */ + protected static class PhaseTookProtoUtils { + /** + * Private constructor to prevent instantiation. + * This is a utility class with only static methods. + */ + private PhaseTookProtoUtils() { + // Utility class, no instances + } + + /** + * Similar to {@link SearchResponse.PhaseTook#toXContent(XContentBuilder, ToXContent.Params)} + * + * @param phaseTook + * @return + */ + protected static org.opensearch.protobufs.PhaseTook toProto(SearchResponse.PhaseTook phaseTook) { + + org.opensearch.protobufs.PhaseTook.Builder phaseTookProtoBuilder = org.opensearch.protobufs.PhaseTook.newBuilder(); + + if (phaseTook == null) { + return phaseTookProtoBuilder.build(); + } + + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + long value; + if (phaseTook.getPhaseTookMap().containsKey(searchPhaseName.getName())) { + value = phaseTook.getPhaseTookMap().get(searchPhaseName.getName()); + } else { + value = 0; + } + + switch (searchPhaseName) { + case DFS_PRE_QUERY: + phaseTookProtoBuilder.setDfsPreQuery(value); + break; + case QUERY: + phaseTookProtoBuilder.setQuery(value); + break; + case FETCH: + phaseTookProtoBuilder.setFetch(value); + break; + case DFS_QUERY: + phaseTookProtoBuilder.setDfsQuery(value); + break; + case EXPAND: + phaseTookProtoBuilder.setExpand(value); + break; + case CAN_MATCH: + phaseTookProtoBuilder.setCanMatch(value); + break; + default: + throw new UnsupportedOperationException("searchPhaseName cannot be converted to phaseTook protobuf type"); + } + } + return phaseTookProtoBuilder.build(); + } + + } + + /** + * Utility class for converting Clusters components between OpenSearch and Protocol Buffers formats. + * This class handles the transformation of cluster statistics information to ensure proper reporting + * of cross-cluster search results. + */ + protected static class ClustersProtoUtils { + /** + * Private constructor to prevent instantiation. + * This is a utility class with only static methods. + */ + private ClustersProtoUtils() { + // Utility class, no instances + } + + /** + * Similar to {@link SearchResponse.Clusters#toXContent(XContentBuilder, ToXContent.Params)} + * + * @param protoResponseBuilder + * @param clusters + * @throws IOException + */ + protected static void toProto(org.opensearch.protobufs.ResponseBody.Builder protoResponseBuilder, SearchResponse.Clusters clusters) + throws IOException { + + if (clusters.getTotal() > 0) { + ClusterStatistics.Builder clusterStatistics = ClusterStatistics.newBuilder(); + clusterStatistics.setTotal(clusters.getTotal()); + clusterStatistics.setSuccessful(clusters.getSuccessful()); + clusterStatistics.setSkipped(clusters.getSkipped()); + + protoResponseBuilder.setClusters(clusterStatistics.build()); + } + + } + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/SearchResponseSectionsProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/SearchResponseSectionsProtoUtils.java new file mode 100644 index 0000000000000..85a3f582a65c4 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/SearchResponseSectionsProtoUtils.java @@ -0,0 +1,66 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.response.search; + +import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.search.SearchResponseSections; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; + +/** + * Utility class for converting SearchResponse objects to Protocol Buffers. + * This class handles the conversion of search operation responses to their + * Protocol Buffer representation. + */ +public class SearchResponseSectionsProtoUtils { + + private SearchResponseSectionsProtoUtils() { + // Utility class, no instances + } + + /** + * Converts a SearchResponse to its Protocol Buffer representation. + * Similar to {@link SearchResponseSections#toXContent(XContentBuilder, ToXContent.Params)} + * + * @param builder The Protocol Buffer SearchResponse builder to populate + * @param response The SearchResponse to convert + * @return The populated Protocol Buffer SearchResponse builder + * @throws IOException if there's an error during conversion + */ + protected static org.opensearch.protobufs.ResponseBody.Builder toProto( + org.opensearch.protobufs.ResponseBody.Builder builder, + SearchResponse response + ) throws IOException { + builder.setHits(SearchHitsProtoUtils.toProto(response.getHits())); + + // TODO: Implement aggregations conversion + if (response.getAggregations() != null) { + throw new UnsupportedOperationException("aggregation responses are not supported yet"); + } + + // TODO: Implement suggest conversion + if (response.getSuggest() != null) { + throw new UnsupportedOperationException("suggest responses are not supported yet"); + } + + // TODO: Implement profile results conversion + if (response.getProfileResults() != null && !response.getProfileResults().isEmpty()) { + throw new UnsupportedOperationException("profile results are not supported yet"); + } + + // TODO: Implement search ext builders conversion + if (response.getInternalResponse().getSearchExtBuilders() != null + && !response.getInternalResponse().getSearchExtBuilders().isEmpty()) { + throw new UnsupportedOperationException("ext builder responses are not supported yet"); + } + + return builder; + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/SearchSortValuesProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/SearchSortValuesProtoUtils.java new file mode 100644 index 0000000000000..7f0d88da2808d --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/SearchSortValuesProtoUtils.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.response.search; + +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.plugin.transport.grpc.proto.response.common.FieldValueProtoUtils; +import org.opensearch.protobufs.Hit; +import org.opensearch.search.SearchSortValues; + +/** + * Utility class for converting SearchSortVaues objects to Protocol Buffers. + * This class handles the conversion of document get operation results to their + * Protocol Buffer representation. + */ +public class SearchSortValuesProtoUtils { + + private SearchSortValuesProtoUtils() { + // Utility class, no instances + } + + /** + * Converts a SearchSortVaues values (list of objects) to its Protocol Buffer representation. + * This method is equivalent to the {@link SearchSortValues#toXContent(XContentBuilder, ToXContent.Params)} + * + * @param hitBuilder the Hit builder to populate with sort values + * @param sortValues the array of sort values to convert + */ + + protected static void toProto(Hit.Builder hitBuilder, Object[] sortValues) { + for (Object sortValue : sortValues) { + hitBuilder.addSort(FieldValueProtoUtils.toProto(sortValue)); + } + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/ShardStatisticsProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/ShardStatisticsProtoUtils.java new file mode 100644 index 0000000000000..74a4c006c4b9c --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/ShardStatisticsProtoUtils.java @@ -0,0 +1,66 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.response.search; + +import org.opensearch.ExceptionsHelper; +import org.opensearch.action.admin.indices.stats.ShardStats; +import org.opensearch.core.action.ShardOperationFailedException; +import org.opensearch.core.common.util.CollectionUtils; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.plugin.transport.grpc.proto.response.exceptions.shardoperationfailedexception.ShardOperationFailedExceptionProtoUtils; +import org.opensearch.protobufs.ShardStatistics; + +import java.io.IOException; + +/** + * Utility class for converting ShardStatistics objects to Protocol Buffers. + * This class handles the conversion of search operation responses to their + * Protocol Buffer representation. + */ +public class ShardStatisticsProtoUtils { + + private ShardStatisticsProtoUtils() { + // Utility class, no instances + } + + /** + * Converts shard statistics information to its Protocol Buffer representation. + * This method is equivalent to {@link ShardStats#toXContent(XContentBuilder, ToXContent.Params)} + * + * @param total the total number of shards + * @param successful the number of successful shards + * @param skipped the number of skipped shards + * @param failed the number of failed shards + * @param shardFailures the array of shard operation failures + * @return A Protocol Buffer ShardStatistics representation + * @throws IOException if there's an error during conversion + */ + protected static ShardStatistics getShardStats( + int total, + int successful, + int skipped, + int failed, + ShardOperationFailedException[] shardFailures + ) throws IOException { + ShardStatistics.Builder shardStats = ShardStatistics.newBuilder(); + shardStats.setTotal(total); + shardStats.setSuccessful(successful); + if (skipped >= 0) { + shardStats.setSkipped(skipped); + } + shardStats.setFailed(failed); + if (CollectionUtils.isEmpty(shardFailures) == false) { + for (ShardOperationFailedException shardFailure : ExceptionsHelper.groupBy(shardFailures)) { + shardStats.addFailures(ShardOperationFailedExceptionProtoUtils.toProto(shardFailure)); + } + } + return shardStats.build(); + } + +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/package-info.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/package-info.java new file mode 100644 index 0000000000000..6c122d098a73a --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/package-info.java @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains utility classes for converting search response components between OpenSearch + * and Protocol Buffers formats. These utilities handle the transformation of search results, + * hits, aggregations, and other response elements to ensure proper communication between the OpenSearch + * server and gRPC clients. + */ +package org.opensearch.plugin.transport.grpc.proto.response.search; diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/services/SearchServiceImpl.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/services/SearchServiceImpl.java new file mode 100644 index 0000000000000..65f55d042ea20 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/services/SearchServiceImpl.java @@ -0,0 +1,62 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.services; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.plugin.transport.grpc.listeners.SearchRequestActionListener; +import org.opensearch.plugin.transport.grpc.proto.request.search.SearchRequestProtoUtils; +import org.opensearch.protobufs.services.SearchServiceGrpc; +import org.opensearch.transport.client.Client; + +import java.io.IOException; + +import io.grpc.stub.StreamObserver; + +/** + * Implementation of the gRPC SearchService. + * This class handles incoming gRPC search requests, converts them to OpenSearch search requests, + * executes them using the provided client, and returns the results back to the gRPC client. + */ +public class SearchServiceImpl extends SearchServiceGrpc.SearchServiceImplBase { + private static final Logger logger = LogManager.getLogger(SearchServiceImpl.class); + private final Client client; + + /** + * Creates a new SearchServiceImpl. + * + * @param client: Client for executing actions on the local node + */ + public SearchServiceImpl(Client client) { + + this.client = client; + } + + /** + * Processes a search request. + * + * @param request The search request to process + * @param responseObserver The observer to send the response back to the client + */ + @Override + public void search( + org.opensearch.protobufs.SearchRequest request, + StreamObserver responseObserver + ) { + + try { + org.opensearch.action.search.SearchRequest searchRequest = SearchRequestProtoUtils.prepareRequest(request, client); + SearchRequestActionListener listener = new SearchRequestActionListener(responseObserver); + client.search(searchRequest, listener); + } catch (RuntimeException | IOException e) { + logger.error("SearchServiceImpl failed to process search request, request=" + request + ", error=" + e.getMessage()); + responseObserver.onError(e); + } + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/listeners/SearchRequestActionListenerTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/listeners/SearchRequestActionListenerTests.java new file mode 100644 index 0000000000000..1b0d50d8668a1 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/listeners/SearchRequestActionListenerTests.java @@ -0,0 +1,78 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.listeners; + +import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.search.SearchResponseSections; +import org.opensearch.action.search.ShardSearchFailure; +import org.opensearch.search.SearchHits; +import org.opensearch.test.OpenSearchTestCase; + +import io.grpc.stub.StreamObserver; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +public class SearchRequestActionListenerTests extends OpenSearchTestCase { + + @Mock + private StreamObserver responseObserver; + + private SearchRequestActionListener listener; + + @Override + public void setUp() throws Exception { + super.setUp(); + MockitoAnnotations.openMocks(this); + listener = new SearchRequestActionListener(responseObserver); + } + + public void testOnResponse() { + + // Create a SearchResponse + SearchResponse mockSearchResponse = new SearchResponse( + new SearchResponseSections(SearchHits.empty(), null, null, false, false, null, 1), + randomAlphaOfLengthBetween(5, 10), + 5, + 5, + 0, + 100, + ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY + ); + + // Call the method under test + listener.onResponse(mockSearchResponse); + + // Verify that onNext and onCompleted were called + verify(responseObserver, times(1)).onNext(any(org.opensearch.protobufs.SearchResponse.class)); + verify(responseObserver, times(1)).onCompleted(); + } + + public void testOnFailure() { + // Create a mock StreamObserver + @SuppressWarnings("unchecked") + StreamObserver mockResponseObserver = mock(StreamObserver.class); + + // Create a SearchRequestActionListener + SearchRequestActionListener listener = new SearchRequestActionListener(mockResponseObserver); + + // Create an exception + Exception exception = new Exception("Test exception"); + + // Call the method under test + listener.onFailure(exception); + + // Verify that onError was called with the exception + verify(mockResponseObserver, times(1)).onError(exception); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/common/FetchSourceContextProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/common/FetchSourceContextProtoUtilsTests.java index 1fc4d26101eab..5e6726c65b5d3 100644 --- a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/common/FetchSourceContextProtoUtilsTests.java +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/common/FetchSourceContextProtoUtilsTests.java @@ -10,6 +10,7 @@ import org.opensearch.core.common.Strings; import org.opensearch.protobufs.BulkRequest; +import org.opensearch.protobufs.SearchRequest; import org.opensearch.protobufs.SourceConfig; import org.opensearch.protobufs.SourceConfigParam; import org.opensearch.protobufs.SourceFilter; @@ -177,4 +178,181 @@ public void testFromProtoWithFilterExcludes() { assertArrayEquals("includes should be empty", Strings.EMPTY_ARRAY, context.includes()); assertArrayEquals("excludes should match", new String[] { "field1", "field2" }, context.excludes()); } + + public void testParseFromProtoRequestWithSearchRequestBoolValue() { + // Create a SearchRequest with source as boolean + SearchRequest request = SearchRequest.newBuilder().setSource(SourceConfigParam.newBuilder().setBoolValue(true).build()).build(); + + // Parse the request + FetchSourceContext context = FetchSourceContextProtoUtils.parseFromProtoRequest(request); + + // Verify the result + assertNotNull("Context should not be null", context); + assertTrue("fetchSource should be true", context.fetchSource()); + assertArrayEquals("includes should be empty", Strings.EMPTY_ARRAY, context.includes()); + assertArrayEquals("excludes should be empty", Strings.EMPTY_ARRAY, context.excludes()); + } + + public void testParseFromProtoRequestWithSearchRequestStringArray() { + // Create a SearchRequest with source as string array + SearchRequest request = SearchRequest.newBuilder() + .setSource( + SourceConfigParam.newBuilder() + .setStringArray(StringArray.newBuilder().addStringArray("field1").addStringArray("field2").build()) + .build() + ) + .build(); + + // Parse the request + FetchSourceContext context = FetchSourceContextProtoUtils.parseFromProtoRequest(request); + + // Verify the result + assertNotNull("Context should not be null", context); + assertTrue("fetchSource should be true", context.fetchSource()); + assertArrayEquals("includes should match", new String[] { "field1", "field2" }, context.includes()); + assertArrayEquals("excludes should be empty", Strings.EMPTY_ARRAY, context.excludes()); + } + + public void testParseFromProtoRequestWithSearchRequestSourceIncludes() { + // Create a SearchRequest with source includes + SearchRequest request = SearchRequest.newBuilder().addSourceIncludes("field1").addSourceIncludes("field2").build(); + + // Parse the request + FetchSourceContext context = FetchSourceContextProtoUtils.parseFromProtoRequest(request); + + // Verify the result + assertNotNull("Context should not be null", context); + assertTrue("fetchSource should be true", context.fetchSource()); + assertArrayEquals("includes should match", new String[] { "field1", "field2" }, context.includes()); + assertArrayEquals("excludes should be empty", Strings.EMPTY_ARRAY, context.excludes()); + } + + public void testParseFromProtoRequestWithSearchRequestSourceExcludes() { + // Create a SearchRequest with source excludes + SearchRequest request = SearchRequest.newBuilder().addSourceExcludes("field1").addSourceExcludes("field2").build(); + + // Parse the request + FetchSourceContext context = FetchSourceContextProtoUtils.parseFromProtoRequest(request); + + // Verify the result + assertNotNull("Context should not be null", context); + assertTrue("fetchSource should be true", context.fetchSource()); + assertArrayEquals("includes should be empty", Strings.EMPTY_ARRAY, context.includes()); + assertArrayEquals("excludes should match", new String[] { "field1", "field2" }, context.excludes()); + } + + public void testParseFromProtoRequestWithSearchRequestBothIncludesAndExcludes() { + // Create a SearchRequest with both source includes and excludes + SearchRequest request = SearchRequest.newBuilder() + .addSourceIncludes("include1") + .addSourceIncludes("include2") + .addSourceExcludes("exclude1") + .addSourceExcludes("exclude2") + .build(); + + // Parse the request + FetchSourceContext context = FetchSourceContextProtoUtils.parseFromProtoRequest(request); + + // Verify the result + assertNotNull("Context should not be null", context); + assertTrue("fetchSource should be true", context.fetchSource()); + assertArrayEquals("includes should match", new String[] { "include1", "include2" }, context.includes()); + assertArrayEquals("excludes should match", new String[] { "exclude1", "exclude2" }, context.excludes()); + } + + public void testParseFromProtoRequestWithSearchRequestNoSourceParams() { + // Create a SearchRequest with no source parameters + SearchRequest request = SearchRequest.newBuilder().build(); + + // Parse the request + FetchSourceContext context = FetchSourceContextProtoUtils.parseFromProtoRequest(request); + + // Verify the result + assertNull("Context should be null", context); + } + + public void testFromProtoWithSourceConfigFetch() { + // Create a SourceConfig with fetch=false + SourceConfig sourceConfig = SourceConfig.newBuilder().setFetch(false).build(); + + // Convert to FetchSourceContext + FetchSourceContext context = FetchSourceContextProtoUtils.fromProto(sourceConfig); + + // Verify the result + assertNotNull("Context should not be null", context); + assertFalse("fetchSource should be false", context.fetchSource()); + assertArrayEquals("includes should be empty", Strings.EMPTY_ARRAY, context.includes()); + assertArrayEquals("excludes should be empty", Strings.EMPTY_ARRAY, context.excludes()); + } + + public void testFromProtoWithSourceConfigIncludes() { + // Create a SourceConfig with includes + SourceConfig sourceConfig = SourceConfig.newBuilder() + .setIncludes(StringArray.newBuilder().addStringArray("field1").addStringArray("field2").build()) + .build(); + + // Convert to FetchSourceContext + FetchSourceContext context = FetchSourceContextProtoUtils.fromProto(sourceConfig); + + // Verify the result + assertNotNull("Context should not be null", context); + assertTrue("fetchSource should be true", context.fetchSource()); + assertArrayEquals("includes should match", new String[] { "field1", "field2" }, context.includes()); + assertArrayEquals("excludes should be empty", Strings.EMPTY_ARRAY, context.excludes()); + } + + public void testFromProtoWithSourceConfigFilterIncludesOnly() { + // Create a SourceConfig with filter includes only + SourceConfig sourceConfig = SourceConfig.newBuilder() + .setFilter(SourceFilter.newBuilder().addIncludes("field1").addIncludes("field2").build()) + .build(); + + // Convert to FetchSourceContext + FetchSourceContext context = FetchSourceContextProtoUtils.fromProto(sourceConfig); + + // Verify the result + assertNotNull("Context should not be null", context); + assertTrue("fetchSource should be true", context.fetchSource()); + assertArrayEquals("includes should match", new String[] { "field1", "field2" }, context.includes()); + assertArrayEquals("excludes should be empty", Strings.EMPTY_ARRAY, context.excludes()); + } + + public void testFromProtoWithSourceConfigFilterExcludesOnly() { + // Create a SourceConfig with filter excludes only + SourceConfig sourceConfig = SourceConfig.newBuilder() + .setFilter(SourceFilter.newBuilder().addExcludes("field1").addExcludes("field2").build()) + .build(); + + // Convert to FetchSourceContext + FetchSourceContext context = FetchSourceContextProtoUtils.fromProto(sourceConfig); + + // Verify the result + assertNotNull("Context should not be null", context); + assertTrue("fetchSource should be true", context.fetchSource()); + assertArrayEquals("includes should be empty", Strings.EMPTY_ARRAY, context.includes()); + assertArrayEquals("excludes should match", new String[] { "field1", "field2" }, context.excludes()); + } + + public void testFromProtoWithSourceConfigFilterBothIncludesAndExcludes() { + // Create a SourceConfig with filter includes and excludes + SourceConfig sourceConfig = SourceConfig.newBuilder() + .setFilter( + SourceFilter.newBuilder() + .addIncludes("include1") + .addIncludes("include2") + .addExcludes("exclude1") + .addExcludes("exclude2") + .build() + ) + .build(); + + // Convert to FetchSourceContext + FetchSourceContext context = FetchSourceContextProtoUtils.fromProto(sourceConfig); + + // Verify the result + assertNotNull("Context should not be null", context); + assertTrue("fetchSource should be true", context.fetchSource()); + assertArrayEquals("includes should match", new String[] { "include1", "include2" }, context.includes()); + assertArrayEquals("excludes should match", new String[] { "exclude1", "exclude2" }, context.excludes()); + } } diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/common/ScriptProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/common/ScriptProtoUtilsTests.java index cc5be18ea9942..1a4a2328ff1e0 100644 --- a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/common/ScriptProtoUtilsTests.java +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/common/ScriptProtoUtilsTests.java @@ -20,6 +20,8 @@ import java.util.HashMap; import java.util.Map; +import static org.opensearch.script.Script.DEFAULT_SCRIPT_LANG; + public class ScriptProtoUtilsTests extends OpenSearchTestCase { public void testParseFromProtoRequestWithInlineScript() { @@ -267,7 +269,6 @@ public void testParseScriptLanguageWithUnspecifiedLanguage() { ) .build(); - // Parse the protobuf Script, should throw UnsupportedOperationException - expectThrows(UnsupportedOperationException.class, () -> ScriptProtoUtils.parseFromProtoRequest(protoScript)); + assertEquals("uses default language", DEFAULT_SCRIPT_LANG, ScriptProtoUtils.parseFromProtoRequest(protoScript).getLang()); } } diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/CollapseBuilderProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/CollapseBuilderProtoUtilsTests.java new file mode 100644 index 0000000000000..79f0c1bb553a4 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/CollapseBuilderProtoUtilsTests.java @@ -0,0 +1,106 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.search; + +import org.opensearch.protobufs.FieldCollapse; +import org.opensearch.protobufs.InnerHits; +import org.opensearch.search.collapse.CollapseBuilder; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class CollapseBuilderProtoUtilsTests extends OpenSearchTestCase { + + public void testFromProtoWithBasicField() throws IOException { + // Create a protobuf FieldCollapse with just a field name + FieldCollapse fieldCollapse = FieldCollapse.newBuilder().setField("user_id").build(); + + // Call the method under test + CollapseBuilder collapseBuilder = CollapseBuilderProtoUtils.fromProto(fieldCollapse); + + // Verify the result + assertNotNull("CollapseBuilder should not be null", collapseBuilder); + assertEquals("Field name should match", "user_id", collapseBuilder.getField()); + assertEquals("MaxConcurrentGroupRequests should be default", 0, collapseBuilder.getMaxConcurrentGroupRequests()); + assertEquals("InnerHits should be empty", 0, collapseBuilder.getInnerHits().size()); + } + + public void testFromProtoWithMaxConcurrentGroupSearches() throws IOException { + // Create a protobuf FieldCollapse with maxConcurrentGroupSearches + FieldCollapse fieldCollapse = FieldCollapse.newBuilder().setField("user_id").setMaxConcurrentGroupSearches(10).build(); + + // Call the method under test + CollapseBuilder collapseBuilder = CollapseBuilderProtoUtils.fromProto(fieldCollapse); + + // Verify the result + assertNotNull("CollapseBuilder should not be null", collapseBuilder); + assertEquals("Field name should match", "user_id", collapseBuilder.getField()); + assertEquals("MaxConcurrentGroupRequests should match", 10, collapseBuilder.getMaxConcurrentGroupRequests()); + assertEquals("InnerHits should be empty", 0, collapseBuilder.getInnerHits().size()); + } + + public void testFromProtoWithInnerHits() throws IOException { + // Create a protobuf FieldCollapse with inner hits + FieldCollapse fieldCollapse = FieldCollapse.newBuilder() + .setField("user_id") + .addInnerHits(InnerHits.newBuilder().setName("last_tweet").setSize(5).build()) + .build(); + + // Call the method under test + CollapseBuilder collapseBuilder = CollapseBuilderProtoUtils.fromProto(fieldCollapse); + + // Verify the result + assertNotNull("CollapseBuilder should not be null", collapseBuilder); + assertEquals("Field name should match", "user_id", collapseBuilder.getField()); + assertNotNull("InnerHits should not be null", collapseBuilder.getInnerHits()); + assertEquals("InnerHits name should match", "last_tweet", collapseBuilder.getInnerHits().get(0).getName()); + assertEquals("InnerHits size should match", 5, collapseBuilder.getInnerHits().get(0).getSize()); + } + + public void testFromProtoWithMultipleInnerHits() throws IOException { + // Create a protobuf FieldCollapse with multiple inner hits + FieldCollapse fieldCollapse = FieldCollapse.newBuilder() + .setField("user_id") + .addInnerHits(InnerHits.newBuilder().setName("first_inner_hit").setSize(5).build()) + .addInnerHits(InnerHits.newBuilder().setName("second_inner_hit").setSize(10).build()) + .build(); + + // Call the method under test + CollapseBuilder collapseBuilder = CollapseBuilderProtoUtils.fromProto(fieldCollapse); + + // Verify the result + assertNotNull("CollapseBuilder should not be null", collapseBuilder); + assertEquals("Field name should match", "user_id", collapseBuilder.getField()); + assertNotNull("InnerHits should not be null", collapseBuilder.getInnerHits()); + // The last inner hit in the list should be used + assertEquals("InnerHits name should match the last inner hit", "second_inner_hit", collapseBuilder.getInnerHits().get(0).getName()); + assertEquals("InnerHits size should match the last inner hit", 10, collapseBuilder.getInnerHits().get(0).getSize()); + } + + public void testFromProtoWithAllFields() throws IOException { + // Create a protobuf FieldCollapse with all fields + FieldCollapse fieldCollapse = FieldCollapse.newBuilder() + .setField("user_id") + .setMaxConcurrentGroupSearches(10) + .addInnerHits(InnerHits.newBuilder().setName("last_tweet").setSize(5).build()) + .build(); + + // Call the method under test + CollapseBuilder collapseBuilder = CollapseBuilderProtoUtils.fromProto(fieldCollapse); + + // Verify the result + assertNotNull("CollapseBuilder should not be null", collapseBuilder); + assertEquals("Field name should match", "user_id", collapseBuilder.getField()); + assertEquals("MaxConcurrentGroupRequests should match", 10, collapseBuilder.getMaxConcurrentGroupRequests()); + assertNotNull("InnerHits should not be null", collapseBuilder.getInnerHits()); + assertEquals("InnerHits name should match", "last_tweet", collapseBuilder.getInnerHits().get(0).getName()); + assertEquals("InnerHits size should match", 5, collapseBuilder.getInnerHits().get(0).getSize()); + } + +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/IndicesOptionsProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/IndicesOptionsProtoUtilsTests.java new file mode 100644 index 0000000000000..e18b04d601040 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/IndicesOptionsProtoUtilsTests.java @@ -0,0 +1,292 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.search; + +import org.opensearch.action.support.IndicesOptions; +import org.opensearch.protobufs.SearchRequest; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Arrays; +import java.util.Collections; +import java.util.EnumSet; +import java.util.List; + +import static org.opensearch.action.support.IndicesOptions.WildcardStates; + +public class IndicesOptionsProtoUtilsTests extends OpenSearchTestCase { + + public void testFromRequestWithDefaultSettings() { + // Create a SearchRequest with no indices options + SearchRequest searchRequest = SearchRequest.newBuilder().build(); + + // Create default settings + IndicesOptions defaultSettings = IndicesOptions.strictExpandOpenAndForbidClosed(); + + // Call the method under test + IndicesOptions indicesOptions = IndicesOptionsProtoUtils.fromRequest(searchRequest, defaultSettings); + + // Verify the result + assertNotNull("IndicesOptions should not be null", indicesOptions); + assertEquals("Should return default settings", defaultSettings, indicesOptions); + } + + public void testFromRequestWithCustomSettings() { + // Create a SearchRequest with custom indices options + SearchRequest searchRequest = SearchRequest.newBuilder() + .setIgnoreUnavailable(true) + .setAllowNoIndices(false) + .setIgnoreThrottled(true) + .addExpandWildcards(SearchRequest.ExpandWildcard.EXPAND_WILDCARD_OPEN) + .addExpandWildcards(SearchRequest.ExpandWildcard.EXPAND_WILDCARD_CLOSED) + .build(); + + // Create default settings + IndicesOptions defaultSettings = IndicesOptions.strictExpandOpenAndForbidClosed(); + + // Call the method under test + IndicesOptions indicesOptions = IndicesOptionsProtoUtils.fromRequest(searchRequest, defaultSettings); + + // Verify the result + assertNotNull("IndicesOptions should not be null", indicesOptions); + assertTrue("Should ignore unavailable", indicesOptions.ignoreUnavailable()); + assertFalse("Should not allow no indices", indicesOptions.allowNoIndices()); + assertTrue("Should ignore throttled", indicesOptions.ignoreThrottled()); + assertTrue("Should expand open", indicesOptions.expandWildcardsOpen()); + assertTrue("Should expand closed", indicesOptions.expandWildcardsClosed()); + assertFalse("Should not expand hidden", indicesOptions.expandWildcardsHidden()); + } + + public void testFromProtoParametersWithPartialSettings() { + // Create a SearchRequest with partial indices options + SearchRequest searchRequest = SearchRequest.newBuilder() + .setIgnoreUnavailable(true) + // allowNoIndices not set + .setIgnoreThrottled(true) + .addExpandWildcards(SearchRequest.ExpandWildcard.EXPAND_WILDCARD_OPEN) + .build(); + + // Create default settings + IndicesOptions defaultSettings = IndicesOptions.strictExpandOpenAndForbidClosed(); + + // Call the method under test + IndicesOptions indicesOptions = IndicesOptionsProtoUtils.fromProtoParameters(searchRequest, defaultSettings); + + // Verify the result + assertNotNull("IndicesOptions should not be null", indicesOptions); + assertTrue("Should ignore unavailable", indicesOptions.ignoreUnavailable()); + assertEquals("Should use default for allowNoIndices", defaultSettings.allowNoIndices(), indicesOptions.allowNoIndices()); + assertTrue("Should ignore throttled", indicesOptions.ignoreThrottled()); + assertTrue("Should expand open", indicesOptions.expandWildcardsOpen()); + assertFalse("Should not expand closed", indicesOptions.expandWildcardsClosed()); + assertFalse("Should not expand hidden", indicesOptions.expandWildcardsHidden()); + } + + public void testParseProtoParameterWithEmptyList() { + // Create an empty list of ExpandWildcard + List wildcardList = Collections.emptyList(); + + // Create default states + EnumSet defaultStates = EnumSet.of(WildcardStates.OPEN); + + // Call the method under test + EnumSet states = IndicesOptionsProtoUtils.parseProtoParameter(wildcardList, defaultStates); + + // Verify the result + assertNotNull("States should not be null", states); + assertEquals("Should return default states", defaultStates, states); + } + + public void testParseProtoParameterWithSingleValue() { + // Create a list with a single ExpandWildcard + List wildcardList = Collections.singletonList(SearchRequest.ExpandWildcard.EXPAND_WILDCARD_CLOSED); + + // Create default states + EnumSet defaultStates = EnumSet.of(WildcardStates.OPEN); + + // Call the method under test + EnumSet states = IndicesOptionsProtoUtils.parseProtoParameter(wildcardList, defaultStates); + + // Verify the result + assertNotNull("States should not be null", states); + assertEquals("Should have 1 state", 1, states.size()); + assertTrue("Should contain CLOSED", states.contains(WildcardStates.CLOSED)); + assertFalse("Should not contain OPEN", states.contains(WildcardStates.OPEN)); + } + + public void testParseProtoParameterWithMultipleValues() { + // Create a list with multiple ExpandWildcard values + List wildcardList = Arrays.asList( + SearchRequest.ExpandWildcard.EXPAND_WILDCARD_OPEN, + SearchRequest.ExpandWildcard.EXPAND_WILDCARD_HIDDEN + ); + + // Create default states + EnumSet defaultStates = EnumSet.of(WildcardStates.CLOSED); + + // Call the method under test + EnumSet states = IndicesOptionsProtoUtils.parseProtoParameter(wildcardList, defaultStates); + + // Verify the result + assertNotNull("States should not be null", states); + assertEquals("Should have 2 states", 2, states.size()); + assertTrue("Should contain OPEN", states.contains(WildcardStates.OPEN)); + assertTrue("Should contain HIDDEN", states.contains(WildcardStates.HIDDEN)); + assertFalse("Should not contain CLOSED", states.contains(WildcardStates.CLOSED)); + } + + public void testParseProtoParameterWithNoneValue() { + // Create a list with NONE value + List wildcardList = Collections.singletonList(SearchRequest.ExpandWildcard.EXPAND_WILDCARD_NONE); + + // Create default states with all values + EnumSet defaultStates = EnumSet.allOf(WildcardStates.class); + + // Call the method under test + EnumSet states = IndicesOptionsProtoUtils.parseProtoParameter(wildcardList, defaultStates); + + // Verify the result + assertNotNull("States should not be null", states); + assertTrue("Should be empty", states.isEmpty()); + } + + public void testParseProtoParameterWithAllValue() { + // Create a list with ALL value + List wildcardList = Collections.singletonList(SearchRequest.ExpandWildcard.EXPAND_WILDCARD_ALL); + + // Create default states with no values + EnumSet defaultStates = EnumSet.noneOf(WildcardStates.class); + + // Call the method under test + EnumSet states = IndicesOptionsProtoUtils.parseProtoParameter(wildcardList, defaultStates); + + // Verify the result + assertNotNull("States should not be null", states); + assertEquals("Should have all states", EnumSet.allOf(WildcardStates.class), states); + } + + public void testParseProtoParameterWithNoneFollowedByValues() { + // Create a list with NONE followed by other values + List wildcardList = Arrays.asList( + SearchRequest.ExpandWildcard.EXPAND_WILDCARD_NONE, + SearchRequest.ExpandWildcard.EXPAND_WILDCARD_OPEN, + SearchRequest.ExpandWildcard.EXPAND_WILDCARD_CLOSED + ); + + // Create default states + EnumSet defaultStates = EnumSet.of(WildcardStates.HIDDEN); + + // Call the method under test + EnumSet states = IndicesOptionsProtoUtils.parseProtoParameter(wildcardList, defaultStates); + + // Verify the result + assertNotNull("States should not be null", states); + assertEquals("Should have 2 states", 2, states.size()); + assertTrue("Should contain OPEN", states.contains(WildcardStates.OPEN)); + assertTrue("Should contain CLOSED", states.contains(WildcardStates.CLOSED)); + assertFalse("Should not contain HIDDEN", states.contains(WildcardStates.HIDDEN)); + } + + public void testParseProtoParameterWithValuesFollowedByNone() { + // Create a list with values followed by NONE + List wildcardList = Arrays.asList( + SearchRequest.ExpandWildcard.EXPAND_WILDCARD_OPEN, + SearchRequest.ExpandWildcard.EXPAND_WILDCARD_CLOSED, + SearchRequest.ExpandWildcard.EXPAND_WILDCARD_NONE + ); + + // Create default states + EnumSet defaultStates = EnumSet.of(WildcardStates.HIDDEN); + + // Call the method under test + EnumSet states = IndicesOptionsProtoUtils.parseProtoParameter(wildcardList, defaultStates); + + // Verify the result + assertNotNull("States should not be null", states); + assertTrue("Should be empty", states.isEmpty()); + } + + public void testUpdateSetForValueWithOpen() { + // Create an empty EnumSet + EnumSet states = EnumSet.noneOf(WildcardStates.class); + + // Call the method under test + IndicesOptionsProtoUtils.updateSetForValue(states, SearchRequest.ExpandWildcard.EXPAND_WILDCARD_OPEN); + + // Verify the result + assertNotNull("States should not be null", states); + assertEquals("Should have 1 state", 1, states.size()); + assertTrue("Should contain OPEN", states.contains(WildcardStates.OPEN)); + } + + public void testUpdateSetForValueWithClosed() { + // Create an empty EnumSet + EnumSet states = EnumSet.noneOf(WildcardStates.class); + + // Call the method under test + IndicesOptionsProtoUtils.updateSetForValue(states, SearchRequest.ExpandWildcard.EXPAND_WILDCARD_CLOSED); + + // Verify the result + assertNotNull("States should not be null", states); + assertEquals("Should have 1 state", 1, states.size()); + assertTrue("Should contain CLOSED", states.contains(WildcardStates.CLOSED)); + } + + public void testUpdateSetForValueWithHidden() { + // Create an empty EnumSet + EnumSet states = EnumSet.noneOf(WildcardStates.class); + + // Call the method under test + IndicesOptionsProtoUtils.updateSetForValue(states, SearchRequest.ExpandWildcard.EXPAND_WILDCARD_HIDDEN); + + // Verify the result + assertNotNull("States should not be null", states); + assertEquals("Should have 1 state", 1, states.size()); + assertTrue("Should contain HIDDEN", states.contains(WildcardStates.HIDDEN)); + } + + public void testUpdateSetForValueWithNone() { + // Create an EnumSet with all values + EnumSet states = EnumSet.allOf(WildcardStates.class); + + // Call the method under test + IndicesOptionsProtoUtils.updateSetForValue(states, SearchRequest.ExpandWildcard.EXPAND_WILDCARD_NONE); + + // Verify the result + assertNotNull("States should not be null", states); + assertTrue("Should be empty", states.isEmpty()); + } + + public void testUpdateSetForValueWithAll() { + // Create an empty EnumSet + EnumSet states = EnumSet.noneOf(WildcardStates.class); + + // Call the method under test + IndicesOptionsProtoUtils.updateSetForValue(states, SearchRequest.ExpandWildcard.EXPAND_WILDCARD_ALL); + + // Verify the result + assertNotNull("States should not be null", states); + assertEquals("Should have all states", EnumSet.allOf(WildcardStates.class), states); + } + + public void testUpdateSetForValueWithInvalidValue() { + // Create an empty EnumSet + EnumSet states = EnumSet.noneOf(WildcardStates.class); + + // Call the method under test with UNRECOGNIZED value, should throw IllegalArgumentException + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> IndicesOptionsProtoUtils.updateSetForValue(states, SearchRequest.ExpandWildcard.UNRECOGNIZED) + ); + + assertTrue( + "Exception message should mention no valid expand wildcard value", + exception.getMessage().contains("No valid expand wildcard value") + ); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/InnerHitsBuilderProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/InnerHitsBuilderProtoUtilsTests.java new file mode 100644 index 0000000000000..2b4d9064428e7 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/InnerHitsBuilderProtoUtilsTests.java @@ -0,0 +1,242 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.search; + +import org.opensearch.index.query.InnerHitBuilder; +import org.opensearch.protobufs.FieldAndFormat; +import org.opensearch.protobufs.InlineScript; +import org.opensearch.protobufs.InnerHits; +import org.opensearch.protobufs.ScriptField; +import org.opensearch.protobufs.ScriptLanguage; +import org.opensearch.protobufs.ScriptLanguage.BuiltinScriptLanguage; +import org.opensearch.protobufs.SourceConfig; +import org.opensearch.protobufs.SourceFilter; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Set; + +public class InnerHitsBuilderProtoUtilsTests extends OpenSearchTestCase { + + public void testFromProtoWithBasicFields() throws IOException { + // Create a protobuf InnerHits with basic fields + InnerHits innerHits = InnerHits.newBuilder() + .setName("test_inner_hits") + .setIgnoreUnmapped(true) + .setFrom(10) + .setSize(20) + .setExplain(true) + .setVersion(true) + .setSeqNoPrimaryTerm(true) + .setTrackScores(true) + .build(); + + // Call the method under test + InnerHitBuilder innerHitBuilder = InnerHitsBuilderProtoUtils.fromProto(Collections.singletonList(innerHits)); + + // Verify the result + assertNotNull("InnerHitBuilder should not be null", innerHitBuilder); + assertEquals("Name should match", "test_inner_hits", innerHitBuilder.getName()); + assertTrue("IgnoreUnmapped should be true", innerHitBuilder.isIgnoreUnmapped()); + assertEquals("From should match", 10, innerHitBuilder.getFrom()); + assertEquals("Size should match", 20, innerHitBuilder.getSize()); + assertTrue("Explain should be true", innerHitBuilder.isExplain()); + assertTrue("Version should be true", innerHitBuilder.isVersion()); + assertTrue("SeqNoAndPrimaryTerm should be true", innerHitBuilder.isSeqNoAndPrimaryTerm()); + assertTrue("TrackScores should be true", innerHitBuilder.isTrackScores()); + } + + public void testFromProtoWithStoredFields() throws IOException { + // Create a protobuf InnerHits with stored fields + InnerHits innerHits = InnerHits.newBuilder() + .setName("test_inner_hits") + .addStoredFields("field1") + .addStoredFields("field2") + .addStoredFields("field3") + .build(); + + // Call the method under test + InnerHitBuilder innerHitBuilder = InnerHitsBuilderProtoUtils.fromProto(Collections.singletonList(innerHits)); + + // Verify the result + assertNotNull("InnerHitBuilder should not be null", innerHitBuilder); + assertNotNull("StoredFieldNames should not be null", innerHitBuilder.getStoredFieldsContext()); + assertEquals("StoredFieldNames size should match", 3, innerHitBuilder.getStoredFieldsContext().fieldNames().size()); + assertTrue("StoredFieldNames should contain field1", innerHitBuilder.getStoredFieldsContext().fieldNames().contains("field1")); + assertTrue("StoredFieldNames should contain field2", innerHitBuilder.getStoredFieldsContext().fieldNames().contains("field2")); + assertTrue("StoredFieldNames should contain field3", innerHitBuilder.getStoredFieldsContext().fieldNames().contains("field3")); + } + + public void testFromProtoWithDocValueFields() throws IOException { + // Create a protobuf InnerHits with doc value fields + InnerHits innerHits = InnerHits.newBuilder() + .setName("test_inner_hits") + .addDocvalueFields(FieldAndFormat.newBuilder().setField("field1").setFormat("format1").build()) + .addDocvalueFields(FieldAndFormat.newBuilder().setField("field2").setFormat("format2").build()) + .build(); + + // Call the method under test + InnerHitBuilder innerHitBuilder = InnerHitsBuilderProtoUtils.fromProto(Collections.singletonList(innerHits)); + + // Verify the result + assertNotNull("InnerHitBuilder should not be null", innerHitBuilder); + assertNotNull("DocValueFields should not be null", innerHitBuilder.getDocValueFields()); + assertEquals("DocValueFields size should match", 2, innerHitBuilder.getDocValueFields().size()); + + // Check field names and formats + boolean foundField1 = false; + boolean foundField2 = false; + for (org.opensearch.search.fetch.subphase.FieldAndFormat fieldAndFormat : innerHitBuilder.getDocValueFields()) { + if (fieldAndFormat.field.equals("field1")) { + assertEquals("Format should match for field1", "format1", fieldAndFormat.format); + foundField1 = true; + } else if (fieldAndFormat.field.equals("field2")) { + assertEquals("Format should match for field2", "format2", fieldAndFormat.format); + foundField2 = true; + } + } + assertTrue("Should find field1", foundField1); + assertTrue("Should find field2", foundField2); + } + + public void testFromProtoWithFetchFields() throws IOException { + // Create a protobuf InnerHits with fetch fields + InnerHits innerHits = InnerHits.newBuilder() + .setName("test_inner_hits") + .addFields(FieldAndFormat.newBuilder().setField("field1").setFormat("format1").build()) + .addFields(FieldAndFormat.newBuilder().setField("field2").setFormat("format2").build()) + .build(); + + // Call the method under test + InnerHitBuilder innerHitBuilder = InnerHitsBuilderProtoUtils.fromProto(Collections.singletonList(innerHits)); + + // Verify the result + assertNotNull("InnerHitBuilder should not be null", innerHitBuilder); + assertNotNull("FetchFields should not be null", innerHitBuilder.getFetchFields()); + assertEquals("FetchFields size should match", 2, innerHitBuilder.getFetchFields().size()); + + // Check field names and formats + boolean foundField1 = false; + boolean foundField2 = false; + for (org.opensearch.search.fetch.subphase.FieldAndFormat fieldAndFormat : innerHitBuilder.getFetchFields()) { + if (fieldAndFormat.field.equals("field1")) { + assertEquals("Format should match for field1", "format1", fieldAndFormat.format); + foundField1 = true; + } else if (fieldAndFormat.field.equals("field2")) { + assertEquals("Format should match for field2", "format2", fieldAndFormat.format); + foundField2 = true; + } + } + assertTrue("Should find field1", foundField1); + assertTrue("Should find field2", foundField2); + } + + public void testFromProtoWithScriptFields() throws IOException { + // Create a protobuf InnerHits with script fields + InnerHits.Builder innerHitsBuilder = InnerHits.newBuilder().setName("test_inner_hits"); + + // Create script field 1 + InlineScript inlineScript1 = InlineScript.newBuilder() + .setSource("doc['field1'].value * 2") + .setLang(ScriptLanguage.newBuilder().setBuiltinScriptLanguage(BuiltinScriptLanguage.BUILTIN_SCRIPT_LANGUAGE_PAINLESS).build()) + .build(); + org.opensearch.protobufs.Script script1 = org.opensearch.protobufs.Script.newBuilder().setInlineScript(inlineScript1).build(); + ScriptField scriptField1 = ScriptField.newBuilder().setScript(script1).setIgnoreFailure(true).build(); + innerHitsBuilder.putScriptFields("script_field1", scriptField1); + + // Create script field 2 + InlineScript inlineScript2 = InlineScript.newBuilder() + .setSource("doc['field2'].value + '_suffix'") + .setLang(ScriptLanguage.newBuilder().setBuiltinScriptLanguage(BuiltinScriptLanguage.BUILTIN_SCRIPT_LANGUAGE_PAINLESS).build()) + .build(); + org.opensearch.protobufs.Script script2 = org.opensearch.protobufs.Script.newBuilder().setInlineScript(inlineScript2).build(); + ScriptField scriptField2 = ScriptField.newBuilder().setScript(script2).build(); + innerHitsBuilder.putScriptFields("script_field2", scriptField2); + + InnerHits innerHits = innerHitsBuilder.build(); + + // Call the method under test + InnerHitBuilder innerHitBuilder = InnerHitsBuilderProtoUtils.fromProto(Collections.singletonList(innerHits)); + + // Verify the result + assertNotNull("InnerHitBuilder should not be null", innerHitBuilder); + Set scriptFields = innerHitBuilder.getScriptFields(); + assertNotNull("ScriptFields should not be null", scriptFields); + assertEquals("ScriptFields size should match", 2, scriptFields.size()); + + // Check script fields + boolean foundScriptField1 = false; + boolean foundScriptField2 = false; + for (SearchSourceBuilder.ScriptField scriptField : scriptFields) { + if (scriptField.fieldName().equals("script_field1")) { + assertTrue("IgnoreFailure should be true for script_field1", scriptField.ignoreFailure()); + foundScriptField1 = true; + } else if (scriptField.fieldName().equals("script_field2")) { + assertFalse("IgnoreFailure should be false for script_field2", scriptField.ignoreFailure()); + foundScriptField2 = true; + } + } + assertTrue("Should find script_field1", foundScriptField1); + assertTrue("Should find script_field2", foundScriptField2); + } + + public void testFromProtoWithSource() throws IOException { + // Create a protobuf InnerHits with source context + SourceConfig sourceContext = SourceConfig.newBuilder() + .setFilter(SourceFilter.newBuilder().addIncludes("include1").addIncludes("include2").addExcludes("exclude1").build()) + .build(); + + InnerHits innerHits = InnerHits.newBuilder().setName("test_inner_hits").setSource(sourceContext).build(); + + // Call the method under test + InnerHitBuilder innerHitBuilder = InnerHitsBuilderProtoUtils.fromProto(Collections.singletonList(innerHits)); + + // Verify the result + assertNotNull("InnerHitBuilder should not be null", innerHitBuilder); + org.opensearch.search.fetch.subphase.FetchSourceContext fetchSourceContext = innerHitBuilder.getFetchSourceContext(); + assertNotNull("FetchSourceContext should not be null", fetchSourceContext); + assertTrue("FetchSource should be true", fetchSourceContext.fetchSource()); + assertArrayEquals("Includes should match", new String[] { "include1", "include2" }, fetchSourceContext.includes()); + assertArrayEquals("Excludes should match", new String[] { "exclude1" }, fetchSourceContext.excludes()); + } + + public void testFromProtoWithMultipleInnerHits() throws IOException { + // Create multiple protobuf InnerHits + InnerHits innerHits1 = InnerHits.newBuilder().setName("inner_hits1").setSize(10).build(); + + InnerHits innerHits2 = InnerHits.newBuilder().setName("inner_hits2").setSize(20).build(); + + List innerHitsList = Arrays.asList(innerHits1, innerHits2); + + // Call the method under test + InnerHitBuilder innerHitBuilder = InnerHitsBuilderProtoUtils.fromProto(innerHitsList); + + // Verify the result + assertNotNull("InnerHitBuilder should not be null", innerHitBuilder); + // The last inner hits in the list should override previous ones + assertEquals("Name should match the last inner hits", "inner_hits2", innerHitBuilder.getName()); + assertEquals("Size should match the last inner hits", 20, innerHitBuilder.getSize()); + } + + public void testFromProtoWithEmptyList() throws IOException { + // Call the method under test with an empty list + InnerHitBuilder innerHitBuilder = InnerHitsBuilderProtoUtils.fromProto(Collections.emptyList()); + + // Verify the result + assertNotNull("InnerHitBuilder should not be null", innerHitBuilder); + // Should have default values + assertNull("Name should be null", innerHitBuilder.getName()); + assertEquals("From should be default", 0, innerHitBuilder.getFrom()); + assertEquals("Size should be default", 3, innerHitBuilder.getSize()); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/OperatorProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/OperatorProtoUtilsTests.java new file mode 100644 index 0000000000000..af65f15239bee --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/OperatorProtoUtilsTests.java @@ -0,0 +1,49 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.search; + +import org.opensearch.index.query.Operator; +import org.opensearch.protobufs.SearchRequest; +import org.opensearch.test.OpenSearchTestCase; + +public class OperatorProtoUtilsTests extends OpenSearchTestCase { + + public void testFromEnumWithAnd() { + // Call the method under test with AND operator + Operator operator = OperatorProtoUtils.fromEnum(SearchRequest.Operator.OPERATOR_AND); + + // Verify the result + assertNotNull("Operator should not be null", operator); + assertEquals("Operator should be AND", Operator.AND, operator); + } + + public void testFromEnumWithOr() { + // Call the method under test with OR operator + Operator operator = OperatorProtoUtils.fromEnum(SearchRequest.Operator.OPERATOR_OR); + + // Verify the result + assertNotNull("Operator should not be null", operator); + assertEquals("Operator should be OR", Operator.OR, operator); + } + + public void testFromEnumWithUnrecognized() { + // Call the method under test with UNRECOGNIZED operator, should throw IllegalArgumentException + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> OperatorProtoUtils.fromEnum(SearchRequest.Operator.UNRECOGNIZED) + ); + + assertTrue("Exception message should mention no operator found", exception.getMessage().contains("operator needs to be either")); + } + + public void testFromEnumWithNull() { + // Call the method under test with null, should throw NullPointerException + NullPointerException exception = expectThrows(NullPointerException.class, () -> OperatorProtoUtils.fromEnum(null)); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/PointInTimeBuilderProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/PointInTimeBuilderProtoUtilsTests.java new file mode 100644 index 0000000000000..d8e0f96bf128c --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/PointInTimeBuilderProtoUtilsTests.java @@ -0,0 +1,80 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.search; + +import org.opensearch.common.unit.TimeValue; +import org.opensearch.protobufs.PointInTimeReference; +import org.opensearch.search.builder.PointInTimeBuilder; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.concurrent.TimeUnit; + +public class PointInTimeBuilderProtoUtilsTests extends OpenSearchTestCase { + + public void testFromProtoWithValidValues() { + // Create a protobuf PointInTimeReference with valid values + PointInTimeReference pointInTimeReference = PointInTimeReference.newBuilder().setId("test_pit_id").setKeepAlive("5m").build(); + + // Call the method under test + PointInTimeBuilder pointInTimeBuilder = PointInTimeBuilderProtoUtils.fromProto(pointInTimeReference); + + // Verify the result + assertNotNull("PointInTimeBuilder should not be null", pointInTimeBuilder); + assertEquals("ID should match", "test_pit_id", pointInTimeBuilder.getId()); + assertEquals("KeepAlive should match", TimeValue.timeValueMinutes(5), pointInTimeBuilder.getKeepAlive()); + } + + public void testFromProtoWithDifferentTimeFormats() { + // Test with seconds + PointInTimeReference pointInTimeReference1 = PointInTimeReference.newBuilder().setId("test_pit_id_1").setKeepAlive("30s").build(); + PointInTimeBuilder pointInTimeBuilder1 = PointInTimeBuilderProtoUtils.fromProto(pointInTimeReference1); + assertEquals("KeepAlive should match for seconds", TimeValue.timeValueSeconds(30), pointInTimeBuilder1.getKeepAlive()); + + // Test with hours + PointInTimeReference pointInTimeReference2 = PointInTimeReference.newBuilder().setId("test_pit_id_2").setKeepAlive("2h").build(); + PointInTimeBuilder pointInTimeBuilder2 = PointInTimeBuilderProtoUtils.fromProto(pointInTimeReference2); + assertEquals("KeepAlive should match for hours", TimeValue.timeValueHours(2), pointInTimeBuilder2.getKeepAlive()); + + // Test with days + PointInTimeReference pointInTimeReference3 = PointInTimeReference.newBuilder().setId("test_pit_id_3").setKeepAlive("1d").build(); + PointInTimeBuilder pointInTimeBuilder3 = PointInTimeBuilderProtoUtils.fromProto(pointInTimeReference3); + assertEquals("KeepAlive should match for days", TimeValue.timeValueDays(1), pointInTimeBuilder3.getKeepAlive()); + + // Test with milliseconds + PointInTimeReference pointInTimeReference4 = PointInTimeReference.newBuilder().setId("test_pit_id_4").setKeepAlive("500ms").build(); + PointInTimeBuilder pointInTimeBuilder4 = PointInTimeBuilderProtoUtils.fromProto(pointInTimeReference4); + assertEquals("KeepAlive should match for milliseconds", TimeValue.timeValueMillis(500), pointInTimeBuilder4.getKeepAlive()); + } + + public void testFromProtoWithComplexTimeFormat() { + // Test with complex time format + PointInTimeReference pointInTimeReference = PointInTimeReference.newBuilder().setId("test_pit_id").setKeepAlive("30m").build(); + PointInTimeBuilder pointInTimeBuilder = PointInTimeBuilderProtoUtils.fromProto(pointInTimeReference); + + // Calculate expected time value: 30m = 1800s + TimeValue expectedTimeValue = new TimeValue(1800, TimeUnit.SECONDS); + assertEquals("KeepAlive should match for complex format", expectedTimeValue, pointInTimeBuilder.getKeepAlive()); + } + + public void testFromProtoWithInvalidTimeFormat() { + // Test with invalid time format + PointInTimeReference pointInTimeReference = PointInTimeReference.newBuilder() + .setId("test_pit_id") + .setKeepAlive("invalid_time_format") + .build(); + + // Call the method under test, should throw IllegalArgumentException + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> PointInTimeBuilderProtoUtils.fromProto(pointInTimeReference) + ); + + assertTrue("Exception message should mention failed to parse", exception.getMessage().contains("failed to parse")); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/ScriptFieldProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/ScriptFieldProtoUtilsTests.java new file mode 100644 index 0000000000000..b256ce4965a3c --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/ScriptFieldProtoUtilsTests.java @@ -0,0 +1,213 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.search; + +import org.opensearch.protobufs.InlineScript; +import org.opensearch.protobufs.ObjectMap; +import org.opensearch.protobufs.ScriptField; +import org.opensearch.protobufs.ScriptLanguage; +import org.opensearch.protobufs.ScriptLanguage.BuiltinScriptLanguage; +import org.opensearch.protobufs.StoredScriptId; +import org.opensearch.script.Script; +import org.opensearch.script.ScriptType; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +public class ScriptFieldProtoUtilsTests extends OpenSearchTestCase { + + public void testFromProtoWithInlineScript() throws IOException { + // Create a protobuf ScriptField with inline script + InlineScript inlineScript = InlineScript.newBuilder() + .setSource("doc['field'].value * 2") + .setLang(ScriptLanguage.newBuilder().setBuiltinScriptLanguage(BuiltinScriptLanguage.BUILTIN_SCRIPT_LANGUAGE_PAINLESS).build()) + .build(); + + org.opensearch.protobufs.Script script = org.opensearch.protobufs.Script.newBuilder().setInlineScript(inlineScript).build(); + + ScriptField scriptField = ScriptField.newBuilder().setScript(script).setIgnoreFailure(true).build(); + + // Call the method under test + SearchSourceBuilder.ScriptField result = SearchSourceBuilderProtoUtils.ScriptFieldProtoUtils.fromProto( + "test_script_field", + scriptField + ); + + // Verify the result + assertNotNull("ScriptField should not be null", result); + assertEquals("Field name should match", "test_script_field", result.fieldName()); + assertTrue("IgnoreFailure should be true", result.ignoreFailure()); + + Script resultScript = result.script(); + assertNotNull("Script should not be null", resultScript); + assertEquals("Script type should be INLINE", ScriptType.INLINE, resultScript.getType()); + assertEquals("Script language should be painless", "painless", resultScript.getLang()); + assertEquals("Script source should match", "doc['field'].value * 2", resultScript.getIdOrCode()); + assertEquals("Script params should be empty", Collections.emptyMap(), resultScript.getParams()); + } + + public void testFromProtoWithStoredScript() throws IOException { + // Create a protobuf ScriptField with stored script + StoredScriptId storedScriptId = StoredScriptId.newBuilder().setId("my_stored_script").build(); + + org.opensearch.protobufs.Script script = org.opensearch.protobufs.Script.newBuilder().setStoredScriptId(storedScriptId).build(); + + ScriptField scriptField = ScriptField.newBuilder().setScript(script).build(); + + // Call the method under test + SearchSourceBuilder.ScriptField result = SearchSourceBuilderProtoUtils.ScriptFieldProtoUtils.fromProto( + "test_stored_script", + scriptField + ); + + // Verify the result + assertNotNull("ScriptField should not be null", result); + assertEquals("Field name should match", "test_stored_script", result.fieldName()); + assertFalse("IgnoreFailure should be false by default", result.ignoreFailure()); + + Script resultScript = result.script(); + assertNotNull("Script should not be null", resultScript); + assertEquals("Script type should be STORED", ScriptType.STORED, resultScript.getType()); + assertEquals("Script id should match", "my_stored_script", resultScript.getIdOrCode()); + assertEquals("Script params should be empty", Collections.emptyMap(), resultScript.getParams()); + } + + public void testFromProtoWithScriptParams() throws IOException { + // Create a map of script parameters + Map paramsMap = new HashMap<>(); + paramsMap.put("factor", "2"); + paramsMap.put("field", "price"); + + // Create ObjectMap for script parameters + ObjectMap.Builder objectMapBuilder = ObjectMap.newBuilder(); + for (Map.Entry entry : paramsMap.entrySet()) { + objectMapBuilder.putFields(entry.getKey(), ObjectMap.Value.newBuilder().setString(entry.getValue()).build()); + } + + // Create a protobuf ScriptField with inline script and parameters + InlineScript inlineScript = InlineScript.newBuilder() + .setSource("doc[params.field].value * params.factor") + .setLang(ScriptLanguage.newBuilder().setStringValue("painless").build()) + .setParams(objectMapBuilder.build()) + .build(); + + org.opensearch.protobufs.Script script = org.opensearch.protobufs.Script.newBuilder().setInlineScript(inlineScript).build(); + + ScriptField scriptField = ScriptField.newBuilder().setScript(script).build(); + + // Call the method under test + SearchSourceBuilder.ScriptField result = SearchSourceBuilderProtoUtils.ScriptFieldProtoUtils.fromProto( + "test_script_with_params", + scriptField + ); + + // Verify the result + assertNotNull("ScriptField should not be null", result); + assertEquals("Field name should match", "test_script_with_params", result.fieldName()); + + Script resultScript = result.script(); + assertNotNull("Script should not be null", resultScript); + assertEquals("Script type should be INLINE", ScriptType.INLINE, resultScript.getType()); + assertEquals("Script language should be painless", "painless", resultScript.getLang()); + assertEquals("Script source should match", "doc[params.field].value * params.factor", resultScript.getIdOrCode()); + + Map expectedParams = new HashMap<>(); + expectedParams.put("factor", "2"); + expectedParams.put("field", "price"); + assertEquals("Script params should match", expectedParams, resultScript.getParams()); + } + + public void testFromProtoWithCustomLanguage() throws IOException { + // Create a protobuf ScriptField with custom language + InlineScript inlineScript = InlineScript.newBuilder() + .setSource("custom script code") + .setLang(ScriptLanguage.newBuilder().setStringValue("mylang").build()) + .build(); + + org.opensearch.protobufs.Script script = org.opensearch.protobufs.Script.newBuilder().setInlineScript(inlineScript).build(); + + ScriptField scriptField = ScriptField.newBuilder().setScript(script).build(); + + // Call the method under test + SearchSourceBuilder.ScriptField result = SearchSourceBuilderProtoUtils.ScriptFieldProtoUtils.fromProto( + "test_custom_lang", + scriptField + ); + + // Verify the result + assertNotNull("ScriptField should not be null", result); + + Script resultScript = result.script(); + assertNotNull("Script should not be null", resultScript); + assertEquals("Script language should match custom language", "mylang", resultScript.getLang()); + } + + public void testFromProtoWithScriptOptions() throws IOException { + // Create a map of script options + Map optionsMap = new HashMap<>(); + optionsMap.put("content_type", "application/json"); + + // Create a protobuf ScriptField with inline script and options + InlineScript inlineScript = InlineScript.newBuilder() + .setSource("doc['field'].value") + .setLang(ScriptLanguage.newBuilder().setBuiltinScriptLanguage(BuiltinScriptLanguage.BUILTIN_SCRIPT_LANGUAGE_PAINLESS).build()) + .putAllOptions(optionsMap) + .build(); + + org.opensearch.protobufs.Script script = org.opensearch.protobufs.Script.newBuilder().setInlineScript(inlineScript).build(); + + ScriptField scriptField = ScriptField.newBuilder().setScript(script).build(); + + // Call the method under test + SearchSourceBuilder.ScriptField result = SearchSourceBuilderProtoUtils.ScriptFieldProtoUtils.fromProto( + "test_script_options", + scriptField + ); + + // Verify the result + assertNotNull("ScriptField should not be null", result); + + Script resultScript = result.script(); + assertNotNull("Script should not be null", resultScript); + assertEquals("Script options should match", optionsMap, resultScript.getOptions()); + } + + public void testFromProtoWithInvalidScriptOptions() throws IOException { + // Create a map of invalid script options (more than just content_type) + Map optionsMap = new HashMap<>(); + optionsMap.put("content_type", "application/json"); + optionsMap.put("invalid_option", "value"); + + // Create a protobuf ScriptField with inline script and invalid options + InlineScript inlineScript = InlineScript.newBuilder() + .setSource("doc['field'].value") + .setLang(ScriptLanguage.newBuilder().setBuiltinScriptLanguage(BuiltinScriptLanguage.BUILTIN_SCRIPT_LANGUAGE_PAINLESS).build()) + .putAllOptions(optionsMap) + .build(); + + org.opensearch.protobufs.Script script = org.opensearch.protobufs.Script.newBuilder().setInlineScript(inlineScript).build(); + + ScriptField scriptField = ScriptField.newBuilder().setScript(script).build(); + + // Call the method under test, should throw IllegalArgumentException + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> SearchSourceBuilderProtoUtils.ScriptFieldProtoUtils.fromProto("test_invalid_options", scriptField) + ); + + assertTrue( + "Exception message should mention illegal compiler options", + exception.getMessage().contains("illegal compiler options") + ); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/SearchAfterBuilderProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/SearchAfterBuilderProtoUtilsTests.java new file mode 100644 index 0000000000000..7aaf99098154a --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/SearchAfterBuilderProtoUtilsTests.java @@ -0,0 +1,162 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.search; + +import org.opensearch.protobufs.FieldValue; +import org.opensearch.protobufs.GeneralNumber; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +public class SearchAfterBuilderProtoUtilsTests extends OpenSearchTestCase { + + public void testFromProtoWithEmptyList() throws IOException { + // Call the method under test with an empty list + Object[] values = SearchAfterBuilderProtoUtils.fromProto(Collections.emptyList()); + + // Verify the result + assertNotNull("Values array should not be null", values); + assertEquals("Values array should be empty", 0, values.length); + } + + public void testFromProtoWithStringValue() throws IOException { + // Create a list with a string value + List fieldValues = Collections.singletonList(FieldValue.newBuilder().setStringValue("test_string").build()); + + // Call the method under test + Object[] values = SearchAfterBuilderProtoUtils.fromProto(fieldValues); + + // Verify the result + assertNotNull("Values array should not be null", values); + assertEquals("Values array should have 1 element", 1, values.length); + assertEquals("Value should be a string", "test_string", values[0]); + } + + public void testFromProtoWithBooleanValue() throws IOException { + // Create a list with a boolean value + List fieldValues = Collections.singletonList(FieldValue.newBuilder().setBoolValue(true).build()); + + // Call the method under test + Object[] values = SearchAfterBuilderProtoUtils.fromProto(fieldValues); + + // Verify the result + assertNotNull("Values array should not be null", values); + assertEquals("Values array should have 1 element", 1, values.length); + assertEquals("Value should be a boolean", true, values[0]); + } + + public void testFromProtoWithInt32Value() throws IOException { + // Create a list with an int32 value + List fieldValues = Collections.singletonList( + FieldValue.newBuilder().setGeneralNumber(GeneralNumber.newBuilder().setInt32Value(42).build()).build() + ); + + // Call the method under test + Object[] values = SearchAfterBuilderProtoUtils.fromProto(fieldValues); + + // Verify the result + assertNotNull("Values array should not be null", values); + assertEquals("Values array should have 1 element", 1, values.length); + assertEquals("Value should be an integer", 42, values[0]); + } + + public void testFromProtoWithInt64Value() throws IOException { + // Create a list with an int64 value + List fieldValues = Collections.singletonList( + FieldValue.newBuilder().setGeneralNumber(GeneralNumber.newBuilder().setInt64Value(9223372036854775807L).build()).build() + ); + + // Call the method under test + Object[] values = SearchAfterBuilderProtoUtils.fromProto(fieldValues); + + // Verify the result + assertNotNull("Values array should not be null", values); + assertEquals("Values array should have 1 element", 1, values.length); + assertEquals("Value should be a long", 9223372036854775807L, values[0]); + } + + public void testFromProtoWithDoubleValue() throws IOException { + // Create a list with a double value + List fieldValues = Collections.singletonList( + FieldValue.newBuilder().setGeneralNumber(GeneralNumber.newBuilder().setDoubleValue(3.14159).build()).build() + ); + + // Call the method under test + Object[] values = SearchAfterBuilderProtoUtils.fromProto(fieldValues); + + // Verify the result + assertNotNull("Values array should not be null", values); + assertEquals("Values array should have 1 element", 1, values.length); + assertEquals("Value should be a double", 3.14159, values[0]); + } + + public void testFromProtoWithFloatValue() throws IOException { + // Create a list with a float value + List fieldValues = Collections.singletonList( + FieldValue.newBuilder().setGeneralNumber(GeneralNumber.newBuilder().setFloatValue(2.71828f).build()).build() + ); + + // Call the method under test + Object[] values = SearchAfterBuilderProtoUtils.fromProto(fieldValues); + + // Verify the result + assertNotNull("Values array should not be null", values); + assertEquals("Values array should have 1 element", 1, values.length); + assertEquals("Value should be a float", 2.71828f, values[0]); + } + + public void testFromProtoWithMultipleValues() throws IOException { + // Create a list with multiple values of different types + List fieldValues = new ArrayList<>(); + fieldValues.add(FieldValue.newBuilder().setStringValue("test_string").build()); + fieldValues.add(FieldValue.newBuilder().setBoolValue(true).build()); + fieldValues.add(FieldValue.newBuilder().setGeneralNumber(GeneralNumber.newBuilder().setInt32Value(42).build()).build()); + fieldValues.add(FieldValue.newBuilder().setGeneralNumber(GeneralNumber.newBuilder().setDoubleValue(3.14159).build()).build()); + + // Call the method under test + Object[] values = SearchAfterBuilderProtoUtils.fromProto(fieldValues); + + // Verify the result + assertNotNull("Values array should not be null", values); + assertEquals("Values array should have 4 elements", 4, values.length); + assertEquals("First value should be a string", "test_string", values[0]); + assertEquals("Second value should be a boolean", true, values[1]); + assertEquals("Third value should be an integer", 42, values[2]); + assertEquals("Fourth value should be a double", 3.14159, values[3]); + } + + public void testFromProtoWithEmptyFieldValue() throws IOException { + // Create a list with an empty field value (no value set) + List fieldValues = Collections.singletonList(FieldValue.newBuilder().build()); + + // Call the method under test + Object[] values = SearchAfterBuilderProtoUtils.fromProto(fieldValues); + + // Verify the result + assertNotNull("Values array should not be null", values); + assertEquals("Values array should be empty", 0, values.length); + } + + public void testFromProtoWithEmptyGeneralNumber() throws IOException { + // Create a list with a field value containing an empty general number (no value set) + List fieldValues = Collections.singletonList( + FieldValue.newBuilder().setGeneralNumber(GeneralNumber.newBuilder().build()).build() + ); + + // Call the method under test + Object[] values = SearchAfterBuilderProtoUtils.fromProto(fieldValues); + + // Verify the result + assertNotNull("Values array should not be null", values); + assertEquals("Values array should be empty", 0, values.length); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/SearchRequestProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/SearchRequestProtoUtilsTests.java new file mode 100644 index 0000000000000..87b46c20fa391 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/SearchRequestProtoUtilsTests.java @@ -0,0 +1,424 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.search; + +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchType; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.protobufs.SearchRequestBody; +import org.opensearch.protobufs.SourceConfigParam; +import org.opensearch.protobufs.TrackHits; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.search.fetch.StoredFieldsContext; +import org.opensearch.search.internal.SearchContext; +import org.opensearch.search.suggest.SuggestBuilder; +import org.opensearch.search.suggest.term.TermSuggestionBuilder; +import org.opensearch.search.suggest.term.TermSuggestionBuilder.SuggestMode; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.transport.client.Client; + +import java.io.IOException; + +import static org.opensearch.search.internal.SearchContext.TRACK_TOTAL_HITS_DISABLED; +import static org.mockito.Mockito.mock; + +public class SearchRequestProtoUtilsTests extends OpenSearchTestCase { + + private NamedWriteableRegistry namedWriteableRegistry; + private Client mockClient; + + @Override + public void setUp() throws Exception { + super.setUp(); + namedWriteableRegistry = mock(NamedWriteableRegistry.class); + mockClient = mock(Client.class); + } + + public void testParseSearchRequestWithBasicFields() throws IOException { + // Create a protobuf SearchRequest with basic fields + org.opensearch.protobufs.SearchRequest protoRequest = org.opensearch.protobufs.SearchRequest.newBuilder() + .addIndex("index1") + .addIndex("index2") + .setSearchType(org.opensearch.protobufs.SearchRequest.SearchType.SEARCH_TYPE_QUERY_THEN_FETCH) + .setBatchedReduceSize(10) + .setPreFilterShardSize(5) + .setMaxConcurrentShardRequests(20) + .setAllowPartialSearchResults(true) + .setPhaseTook(true) + .setRequestCache(true) + .setScroll("1m") + .addRouting("routing1") + .addRouting("routing2") + .setPreference("_local") + .setSearchPipeline("pipeline1") + .setCcsMinimizeRoundtrips(true) + .setCancelAfterTimeInterval("30s") + .build(); + + // Create a SearchRequest to populate + SearchRequest searchRequest = new SearchRequest(); + + // Call the method under test + SearchRequestProtoUtils.parseSearchRequest(searchRequest, protoRequest, namedWriteableRegistry, size -> {}); + + // Verify the result + assertNotNull("SearchRequest should not be null", searchRequest); + assertArrayEquals("Indices should match", new String[] { "index1", "index2" }, searchRequest.indices()); + assertEquals("SearchType should match", SearchType.QUERY_THEN_FETCH, searchRequest.searchType()); + assertEquals("BatchedReduceSize should match", 10, searchRequest.getBatchedReduceSize()); + assertEquals("PreFilterShardSize should match", 5, searchRequest.getPreFilterShardSize().intValue()); + assertEquals("MaxConcurrentShardRequests should match", 20, searchRequest.getMaxConcurrentShardRequests()); + assertTrue("AllowPartialSearchResults should be true", searchRequest.allowPartialSearchResults()); + assertTrue("PhaseTook should be true", searchRequest.isPhaseTook()); + assertTrue("RequestCache should be true", searchRequest.requestCache()); + assertNotNull("Scroll should not be null", searchRequest.scroll()); + assertEquals("Scroll timeout should match", TimeValue.timeValueMinutes(1), searchRequest.scroll().keepAlive()); + assertArrayEquals( + "Routing should match", + new String[] { "routing1", "routing2" }, + Strings.commaDelimitedListToStringArray(searchRequest.routing()) + ); + assertEquals("Preference should match", "_local", searchRequest.preference()); + assertEquals("SearchPipeline should match", "pipeline1", searchRequest.pipeline()); + assertTrue("CcsMinimizeRoundtrips should be true", searchRequest.isCcsMinimizeRoundtrips()); + assertEquals("CancelAfterTimeInterval should match", TimeValue.timeValueSeconds(30), searchRequest.getCancelAfterTimeInterval()); + } + + public void testParseSearchRequestWithRequestBody() throws IOException { + // Create a protobuf SearchRequestBody + SearchRequestBody requestBody = SearchRequestBody.newBuilder() + .setFrom(10) + .setSize(20) + .setTimeout("5s") + .setTerminateAfter(100) + .setExplain(true) + .setVersion(true) + .setSeqNoPrimaryTerm(true) + .setTrackScores(true) + .setProfile(true) + .build(); + + // Create a protobuf SearchRequest with the request body + org.opensearch.protobufs.SearchRequest protoRequest = org.opensearch.protobufs.SearchRequest.newBuilder() + .setRequestBody(requestBody) + .build(); + + // Create a SearchRequest to populate + SearchRequest searchRequest = new SearchRequest(); + + // Call the method under test + SearchRequestProtoUtils.parseSearchRequest(searchRequest, protoRequest, namedWriteableRegistry, size -> {}); + + // Verify the result + assertNotNull("SearchRequest should not be null", searchRequest); + assertNotNull("Source should not be null", searchRequest.source()); + assertEquals("From should match", 10, searchRequest.source().from()); + assertEquals("Size should match", 20, searchRequest.source().size()); + assertEquals("Timeout should match", TimeValue.timeValueSeconds(5), searchRequest.source().timeout()); + assertEquals("TerminateAfter should match", 100, searchRequest.source().terminateAfter()); + assertTrue("Explain should be true", searchRequest.source().explain()); + assertTrue("Version should be true", searchRequest.source().version()); + assertTrue("SeqNoAndPrimaryTerm should be true", searchRequest.source().seqNoAndPrimaryTerm()); + assertTrue("TrackScores should be true", searchRequest.source().trackScores()); + assertTrue("Profile should be true", searchRequest.source().profile()); + } + + public void testParseSearchSourceWithQueryAndSort() throws IOException { + // Create a protobuf SearchRequest with query and sort + org.opensearch.protobufs.SearchRequest protoRequest = org.opensearch.protobufs.SearchRequest.newBuilder() + .setQ("field:value") + .addSort( + org.opensearch.protobufs.SearchRequest.SortOrder.newBuilder() + .setField("field1") + .setDirection(org.opensearch.protobufs.SearchRequest.SortOrder.Direction.DIRECTION_ASC) + .build() + ) + .addSort( + org.opensearch.protobufs.SearchRequest.SortOrder.newBuilder() + .setField("field2") + .setDirection(org.opensearch.protobufs.SearchRequest.SortOrder.Direction.DIRECTION_DESC) + .build() + ) + .addSort(org.opensearch.protobufs.SearchRequest.SortOrder.newBuilder().setField("field3").build()) + .build(); + + // Create a SearchSourceBuilder to populate + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + + // Call the method under test + SearchRequestProtoUtils.parseSearchSource(searchSourceBuilder, protoRequest, size -> {}); + + // Verify the result + assertNotNull("SearchSourceBuilder should not be null", searchSourceBuilder); + assertNotNull("Query should not be null", searchSourceBuilder.query()); + assertNotNull("Sorts should not be null", searchSourceBuilder.sorts()); + assertEquals("Should have 3 sorts", 3, searchSourceBuilder.sorts().size()); + } + + public void testParseSearchSourceWithStoredFields() throws IOException { + // Create a protobuf SearchRequest with stored fields + org.opensearch.protobufs.SearchRequest protoRequest = org.opensearch.protobufs.SearchRequest.newBuilder() + .addStoredFields("field1") + .addStoredFields("field2") + .addStoredFields("field3") + .build(); + + // Create a SearchSourceBuilder to populate + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + + // Call the method under test + SearchRequestProtoUtils.parseSearchSource(searchSourceBuilder, protoRequest, size -> {}); + + // Verify the result + assertNotNull("SearchSourceBuilder should not be null", searchSourceBuilder); + StoredFieldsContext storedFieldsContext = searchSourceBuilder.storedFields(); + assertNotNull("StoredFieldsContext should not be null", storedFieldsContext); + assertEquals("Should have 3 stored fields", 3, storedFieldsContext.fieldNames().size()); + assertTrue("Should contain field1", storedFieldsContext.fieldNames().contains("field1")); + assertTrue("Should contain field2", storedFieldsContext.fieldNames().contains("field2")); + assertTrue("Should contain field3", storedFieldsContext.fieldNames().contains("field3")); + } + + public void testParseSearchSourceWithDocValueFields() throws IOException { + // Create a protobuf SearchRequest with doc value fields + org.opensearch.protobufs.SearchRequest protoRequest = org.opensearch.protobufs.SearchRequest.newBuilder() + .addDocvalueFields("field1") + .addDocvalueFields("field2") + .build(); + + // Create a SearchSourceBuilder to populate + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + + // Call the method under test + SearchRequestProtoUtils.parseSearchSource(searchSourceBuilder, protoRequest, size -> {}); + + // Verify the result + assertNotNull("SearchSourceBuilder should not be null", searchSourceBuilder); + assertNotNull("DocValueFields should not be null", searchSourceBuilder.docValueFields()); + assertEquals("Should have 2 doc value fields", 2, searchSourceBuilder.docValueFields().size()); + } + + public void testParseSearchSourceWithSource() throws IOException { + // Create a protobuf SearchRequest with source context + org.opensearch.protobufs.SearchRequest protoRequest = org.opensearch.protobufs.SearchRequest.newBuilder() + .setSource(SourceConfigParam.newBuilder().setBoolValue(true).build()) + .addSourceIncludes("include1") + .addSourceIncludes("include2") + .addSourceExcludes("exclude1") + .build(); + + // Create a SearchSourceBuilder to populate + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + + // Call the method under test + SearchRequestProtoUtils.parseSearchSource(searchSourceBuilder, protoRequest, size -> {}); + + // Verify the result + assertNotNull("SearchSourceBuilder should not be null", searchSourceBuilder); + org.opensearch.search.fetch.subphase.FetchSourceContext fetchSourceContext = searchSourceBuilder.fetchSource(); + assertNotNull("FetchSourceContext should not be null", fetchSourceContext); + assertTrue("FetchSource should be true", fetchSourceContext.fetchSource()); + assertArrayEquals("Includes should match", new String[] { "include1", "include2" }, fetchSourceContext.includes()); + assertArrayEquals("Excludes should match", new String[] { "exclude1" }, fetchSourceContext.excludes()); + } + + public void testParseSearchSourceWithTrackTotalHitsBoolean() throws IOException { + // Create a protobuf SearchRequest with track total hits boolean + org.opensearch.protobufs.SearchRequest protoRequest = org.opensearch.protobufs.SearchRequest.newBuilder() + .setTrackTotalHits(TrackHits.newBuilder().setBoolValue(true).build()) + .build(); + + // Create a SearchSourceBuilder to populate + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + + // Call the method under test + SearchRequestProtoUtils.parseSearchSource(searchSourceBuilder, protoRequest, size -> {}); + + // Verify the result + assertNotNull("SearchSourceBuilder should not be null", searchSourceBuilder); + assertTrue("TrackTotalHits should be true", searchSourceBuilder.trackTotalHitsUpTo() == SearchContext.TRACK_TOTAL_HITS_ACCURATE); + } + + public void testParseSearchSourceWithTrackTotalHitsInteger() throws IOException { + // Create a protobuf SearchRequest with track total hits integer + org.opensearch.protobufs.SearchRequest protoRequest = org.opensearch.protobufs.SearchRequest.newBuilder() + .setTrackTotalHits(TrackHits.newBuilder().setInt32Value(1000).build()) + .build(); + + // Create a SearchSourceBuilder to populate + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + + // Call the method under test + SearchRequestProtoUtils.parseSearchSource(searchSourceBuilder, protoRequest, size -> {}); + + // Verify the result + assertNotNull("SearchSourceBuilder should not be null", searchSourceBuilder); + assertEquals("TrackTotalHitsUpTo should match", 1000, searchSourceBuilder.trackTotalHitsUpTo().intValue()); + } + + public void testParseSearchSourceWithStats() throws IOException { + // Create a protobuf SearchRequest with stats + org.opensearch.protobufs.SearchRequest protoRequest = org.opensearch.protobufs.SearchRequest.newBuilder() + .addStats("stat1") + .addStats("stat2") + .build(); + + // Create a SearchSourceBuilder to populate + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + + // Call the method under test + SearchRequestProtoUtils.parseSearchSource(searchSourceBuilder, protoRequest, size -> {}); + + // Verify the result + assertNotNull("SearchSourceBuilder should not be null", searchSourceBuilder); + assertNotNull("Stats should not be null", searchSourceBuilder.stats()); + assertEquals("Should have 2 stats", 2, searchSourceBuilder.stats().size()); + assertTrue("Should contain stat1", searchSourceBuilder.stats().contains("stat1")); + assertTrue("Should contain stat2", searchSourceBuilder.stats().contains("stat2")); + } + + public void testParseSearchSourceWithSuggest() throws IOException { + // Create a protobuf SearchRequest with suggest + org.opensearch.protobufs.SearchRequest protoRequest = org.opensearch.protobufs.SearchRequest.newBuilder() + .setSuggestField("title") + .setSuggestText("opensearch") + .setSuggestSize(10) + .setSuggestMode(org.opensearch.protobufs.SearchRequest.SuggestMode.SUGGEST_MODE_POPULAR) + .build(); + + // Create a SearchSourceBuilder to populate + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + + // Call the method under test + SearchRequestProtoUtils.parseSearchSource(searchSourceBuilder, protoRequest, size -> {}); + + // Verify the result + assertNotNull("SearchSourceBuilder should not be null", searchSourceBuilder); + SuggestBuilder suggestBuilder = searchSourceBuilder.suggest(); + assertNotNull("SuggestBuilder should not be null", suggestBuilder); + assertEquals("Should have 1 suggestion", 1, suggestBuilder.getSuggestions().size()); + assertTrue("Should contain title suggestion", suggestBuilder.getSuggestions().containsKey("title")); + assertEquals("SuggestText should match", "opensearch", suggestBuilder.getSuggestions().get("title").text()); + assertEquals("SuggestSize should match", 10, suggestBuilder.getSuggestions().get("title").size().intValue()); + assertEquals( + "SuggestMode should match", + SuggestMode.POPULAR, + ((TermSuggestionBuilder) (suggestBuilder.getSuggestions().get("title"))).suggestMode() + ); + } + + public void testCheckProtoTotalHitsWithRestTotalHitsAsInt() throws IOException { + // Create a protobuf SearchRequest with rest_total_hits_as_int + org.opensearch.protobufs.SearchRequest protoRequest = org.opensearch.protobufs.SearchRequest.newBuilder() + .setRestTotalHitsAsInt(true) + .build(); + + // Create a SearchRequest to populate + SearchRequest searchRequest = new SearchRequest(); + + // Call the method under test + SearchRequestProtoUtils.checkProtoTotalHits(protoRequest, searchRequest); + + // Verify the result + assertNotNull("SearchRequest should not be null", searchRequest); + assertNotNull("Source should not be null", searchRequest.source()); + assertTrue("TrackTotalHits should be true", searchRequest.source().trackTotalHitsUpTo().intValue() > TRACK_TOTAL_HITS_DISABLED); + } + + public void testCheckProtoTotalHitsWithTrackTotalHitsUpTo() throws IOException { + // Create a protobuf SearchRequest with rest_total_hits_as_int and track_total_hits_up_to + org.opensearch.protobufs.SearchRequest protoRequest = org.opensearch.protobufs.SearchRequest.newBuilder() + .setRestTotalHitsAsInt(true) + .build(); + + // Create a SearchRequest with track_total_hits_up_to + SearchRequest searchRequest = new SearchRequest(); + searchRequest.source(new SearchSourceBuilder().trackTotalHitsUpTo(SearchContext.TRACK_TOTAL_HITS_ACCURATE)); + + // Call the method under test + SearchRequestProtoUtils.checkProtoTotalHits(protoRequest, searchRequest); + + // Verify the result + assertNotNull("SearchRequest should not be null", searchRequest); + assertNotNull("Source should not be null", searchRequest.source()); + assertEquals( + "TrackTotalHitsUpTo should be ACCURATE", + SearchContext.TRACK_TOTAL_HITS_ACCURATE, + searchRequest.source().trackTotalHitsUpTo().intValue() + ); + } + + public void testCheckProtoTotalHitsWithInvalidTrackTotalHitsUpTo() throws IOException { + // Create a protobuf SearchRequest with rest_total_hits_as_int + org.opensearch.protobufs.SearchRequest protoRequest = org.opensearch.protobufs.SearchRequest.newBuilder() + .setRestTotalHitsAsInt(true) + .build(); + + // Create a SearchRequest with invalid track_total_hits_up_to + SearchRequest searchRequest = new SearchRequest(); + searchRequest.source(new SearchSourceBuilder().trackTotalHitsUpTo(1000)); + + // Call the method under test, should throw IllegalArgumentException + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> SearchRequestProtoUtils.checkProtoTotalHits(protoRequest, searchRequest) + ); + + assertTrue("Exception message should mention rest_total_hits_as_int", exception.getMessage().contains("rest_total_hits_as_int")); + } + + public void testParseSearchSourceWithInvalidTerminateAfter() throws IOException { + // Create a protobuf SearchRequest with invalid terminateAfter + org.opensearch.protobufs.SearchRequest protoRequest = org.opensearch.protobufs.SearchRequest.newBuilder() + .setTerminateAfter(-1) + .build(); + + // Create a SearchSourceBuilder to populate + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + + // Call the method under test, should throw IllegalArgumentException + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> SearchRequestProtoUtils.parseSearchSource(searchSourceBuilder, protoRequest, size -> {}) + ); + + assertTrue( + "Exception message should mention terminateAfter must be > 0", + exception.getMessage().contains("terminateAfter must be > 0") + ); + } + + public void testParseSearchSourceWithInvalidSortDirection() throws IOException { + // Create a protobuf SearchRequest with invalid sort direction + org.opensearch.protobufs.SearchRequest protoRequest = org.opensearch.protobufs.SearchRequest.newBuilder() + .addSort( + org.opensearch.protobufs.SearchRequest.SortOrder.newBuilder() + .setField("field1") + .setDirection(org.opensearch.protobufs.SearchRequest.SortOrder.Direction.DIRECTION_UNSPECIFIED) + .build() + ) + .build(); + + // Create a SearchSourceBuilder to populate + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + + // Call the method under test, should throw IllegalArgumentException + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> SearchRequestProtoUtils.parseSearchSource(searchSourceBuilder, protoRequest, size -> {}) + ); + + assertTrue( + "Exception message should mention unsupported sort direction", + exception.getMessage().contains("Unsupported sort direction") + ); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/SearchSourceBuilderProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/SearchSourceBuilderProtoUtilsTests.java new file mode 100644 index 0000000000000..93be2a2442a52 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/SearchSourceBuilderProtoUtilsTests.java @@ -0,0 +1,607 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.search; + +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.index.query.MatchAllQueryBuilder; +import org.opensearch.protobufs.DerivedField; +import org.opensearch.protobufs.FieldAndFormat; +import org.opensearch.protobufs.FieldValue; +import org.opensearch.protobufs.GeneralNumber; +import org.opensearch.protobufs.InlineScript; +import org.opensearch.protobufs.MatchAllQuery; +import org.opensearch.protobufs.NumberMap; +import org.opensearch.protobufs.ObjectMap; +import org.opensearch.protobufs.QueryContainer; +import org.opensearch.protobufs.Script; +import org.opensearch.protobufs.ScriptField; +import org.opensearch.protobufs.SearchRequestBody; +import org.opensearch.protobufs.SlicedScroll; +import org.opensearch.protobufs.SortCombinations; +import org.opensearch.protobufs.TrackHits; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.opensearch.search.internal.SearchContext.TRACK_TOTAL_HITS_ACCURATE; +import static org.opensearch.search.internal.SearchContext.TRACK_TOTAL_HITS_DISABLED; +import static org.mockito.Mockito.mock; + +public class SearchSourceBuilderProtoUtilsTests extends OpenSearchTestCase { + + private NamedWriteableRegistry mockRegistry; + + @Override + public void setUp() throws Exception { + super.setUp(); + mockRegistry = mock(NamedWriteableRegistry.class); + } + + public void testParseProtoWithFrom() throws IOException { + // Create a protobuf SearchRequestBody with from + SearchRequestBody protoRequest = SearchRequestBody.newBuilder().setFrom(10).build(); + + // Create a SearchSourceBuilder to populate + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + + // Call the method under test + SearchSourceBuilderProtoUtils.parseProto(searchSourceBuilder, protoRequest); + + // Verify the result + assertEquals("From should match", 10, searchSourceBuilder.from()); + } + + public void testParseProtoWithSize() throws IOException { + // Create a protobuf SearchRequestBody with size + SearchRequestBody protoRequest = SearchRequestBody.newBuilder().setSize(20).build(); + + // Create a SearchSourceBuilder to populate + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + + // Call the method under test + SearchSourceBuilderProtoUtils.parseProto(searchSourceBuilder, protoRequest); + + // Verify the result + assertEquals("Size should match", 20, searchSourceBuilder.size()); + } + + public void testParseProtoWithTimeout() throws IOException { + // Create a protobuf SearchRequestBody with timeout + SearchRequestBody protoRequest = SearchRequestBody.newBuilder().setTimeout("5s").build(); + + // Create a SearchSourceBuilder to populate + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + + // Call the method under test + SearchSourceBuilderProtoUtils.parseProto(searchSourceBuilder, protoRequest); + + // Verify the result + assertEquals("Timeout should match", TimeValue.timeValueSeconds(5), searchSourceBuilder.timeout()); + } + + public void testParseProtoWithTerminateAfter() throws IOException { + // Create a protobuf SearchRequestBody with terminateAfter + SearchRequestBody protoRequest = SearchRequestBody.newBuilder().setTerminateAfter(100).build(); + + // Create a SearchSourceBuilder to populate + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + + // Call the method under test + SearchSourceBuilderProtoUtils.parseProto(searchSourceBuilder, protoRequest); + + // Verify the result + assertEquals("TerminateAfter should match", 100, searchSourceBuilder.terminateAfter()); + } + + public void testParseProtoWithMinScore() throws IOException { + // Create a protobuf SearchRequestBody with minScore + SearchRequestBody protoRequest = SearchRequestBody.newBuilder().setMinScore(0.5f).build(); + + // Create a SearchSourceBuilder to populate + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + + // Call the method under test + SearchSourceBuilderProtoUtils.parseProto(searchSourceBuilder, protoRequest); + + // Verify the result + assertEquals("MinScore should match", 0.5f, searchSourceBuilder.minScore(), 0.0f); + } + + public void testParseProtoWithVersion() throws IOException { + // Create a protobuf SearchRequestBody with version + SearchRequestBody protoRequest = SearchRequestBody.newBuilder().setVersion(true).build(); + + // Create a SearchSourceBuilder to populate + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + + // Call the method under test + SearchSourceBuilderProtoUtils.parseProto(searchSourceBuilder, protoRequest); + + // Verify the result + assertTrue("Version should be true", searchSourceBuilder.version()); + } + + public void testParseProtoWithSeqNoPrimaryTerm() throws IOException { + // Create a protobuf SearchRequestBody with seqNoPrimaryTerm + SearchRequestBody protoRequest = SearchRequestBody.newBuilder().setSeqNoPrimaryTerm(true).build(); + + // Create a SearchSourceBuilder to populate + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + + // Call the method under test + SearchSourceBuilderProtoUtils.parseProto(searchSourceBuilder, protoRequest); + + // Verify the result + assertTrue("SeqNoPrimaryTerm should be true", searchSourceBuilder.seqNoAndPrimaryTerm()); + } + + public void testParseProtoWithExplain() throws IOException { + // Create a protobuf SearchRequestBody with explain + SearchRequestBody protoRequest = SearchRequestBody.newBuilder().setExplain(true).build(); + + // Create a SearchSourceBuilder to populate + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + + // Call the method under test + SearchSourceBuilderProtoUtils.parseProto(searchSourceBuilder, protoRequest); + + // Verify the result + assertTrue("Explain should be true", searchSourceBuilder.explain()); + } + + public void testParseProtoWithTrackScores() throws IOException { + // Create a protobuf SearchRequestBody with trackScores + SearchRequestBody protoRequest = SearchRequestBody.newBuilder().setTrackScores(true).build(); + + // Create a SearchSourceBuilder to populate + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + + // Call the method under test + SearchSourceBuilderProtoUtils.parseProto(searchSourceBuilder, protoRequest); + + // Verify the result + assertTrue("TrackScores should be true", searchSourceBuilder.trackScores()); + } + + public void testParseProtoWithIncludeNamedQueriesScore() throws IOException { + // Create a protobuf SearchRequestBody with includeNamedQueriesScore + SearchRequestBody protoRequest = SearchRequestBody.newBuilder().setIncludeNamedQueriesScore(true).build(); + + // Create a SearchSourceBuilder to populate + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + + // Call the method under test + SearchSourceBuilderProtoUtils.parseProto(searchSourceBuilder, protoRequest); + + // Verify the result + searchSourceBuilder.includeNamedQueriesScores(true); + assertTrue("IncludeNamedQueriesScore should be true", searchSourceBuilder.includeNamedQueriesScore()); + } + + public void testParseProtoWithTrackTotalHitsBooleanTrue() throws IOException { + // Create a protobuf SearchRequestBody with trackTotalHits boolean true + SearchRequestBody protoRequest = SearchRequestBody.newBuilder() + .setTrackTotalHits(TrackHits.newBuilder().setBoolValue(true).build()) + .build(); + + // Create a SearchSourceBuilder to populate + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + + // Call the method under test + SearchSourceBuilderProtoUtils.parseProto(searchSourceBuilder, protoRequest); + + // Verify the result + assertEquals("TrackTotalHits should be accurate", TRACK_TOTAL_HITS_ACCURATE, searchSourceBuilder.trackTotalHitsUpTo().intValue()); + } + + public void testParseProtoWithTrackTotalHitsBooleanFalse() throws IOException { + // Create a protobuf SearchRequestBody with trackTotalHits boolean false + SearchRequestBody protoRequest = SearchRequestBody.newBuilder() + .setTrackTotalHits(TrackHits.newBuilder().setBoolValue(false).build()) + .build(); + + // Create a SearchSourceBuilder to populate + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + + // Call the method under test + SearchSourceBuilderProtoUtils.parseProto(searchSourceBuilder, protoRequest); + + // Verify the result + assertEquals("TrackTotalHits should be disabled", TRACK_TOTAL_HITS_DISABLED, searchSourceBuilder.trackTotalHitsUpTo().intValue()); + } + + public void testParseProtoWithTrackTotalHitsInteger() throws IOException { + // Create a protobuf SearchRequestBody with trackTotalHits integer + SearchRequestBody protoRequest = SearchRequestBody.newBuilder() + .setTrackTotalHits(TrackHits.newBuilder().setInt32Value(1000).build()) + .build(); + + // Create a SearchSourceBuilder to populate + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + + // Call the method under test + SearchSourceBuilderProtoUtils.parseProto(searchSourceBuilder, protoRequest); + + // Verify the result + assertEquals("TrackTotalHits should match", 1000, searchSourceBuilder.trackTotalHitsUpTo().intValue()); + } + + public void testParseProtoWithProfile() throws IOException { + // Create a protobuf SearchRequestBody with profile + SearchRequestBody protoRequest = SearchRequestBody.newBuilder().setProfile(true).build(); + + // Create a SearchSourceBuilder to populate + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + + // Call the method under test + SearchSourceBuilderProtoUtils.parseProto(searchSourceBuilder, protoRequest); + + // Verify the result + assertTrue("Profile should be true", searchSourceBuilder.profile()); + } + + public void testParseProtoWithSearchPipeline() throws IOException { + // Create a protobuf SearchRequestBody with searchPipeline + SearchRequestBody protoRequest = SearchRequestBody.newBuilder().setSearchPipeline("my-pipeline").build(); + + // Create a SearchSourceBuilder to populate + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + + // Call the method under test + SearchSourceBuilderProtoUtils.parseProto(searchSourceBuilder, protoRequest); + + // Verify the result + assertEquals("SearchPipeline should match", "my-pipeline", searchSourceBuilder.pipeline()); + } + + public void testParseProtoWithVerbosePipeline() throws IOException { + // Create a protobuf SearchRequestBody with verbosePipeline + SearchRequestBody protoRequest = SearchRequestBody.newBuilder().setVerbosePipeline(true).build(); + + // Create a SearchSourceBuilder to populate + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + + // Call the method under test + SearchSourceBuilderProtoUtils.parseProto(searchSourceBuilder, protoRequest); + + // Verify the result + assertTrue("VerbosePipeline should be true", searchSourceBuilder.verbosePipeline()); + } + + public void testParseProtoWithQuery() throws IOException { + // Create a protobuf SearchRequestBody with query + SearchRequestBody protoRequest = SearchRequestBody.newBuilder() + .setQuery(QueryContainer.newBuilder().setMatchAll(MatchAllQuery.newBuilder().build()).build()) + .build(); + + // Create a SearchSourceBuilder to populate + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + + // Call the method under test + SearchSourceBuilderProtoUtils.parseProto(searchSourceBuilder, protoRequest); + + // Verify the result + assertNotNull("Query should not be null", searchSourceBuilder.query()); + assertTrue("Query should be MatchAllQueryBuilder", searchSourceBuilder.query() instanceof MatchAllQueryBuilder); + } + + public void testParseProtoWithStats() throws IOException { + // Create a protobuf SearchRequestBody with stats + SearchRequestBody protoRequest = SearchRequestBody.newBuilder().addStats("stat1").addStats("stat2").build(); + + // Create a SearchSourceBuilder to populate + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + + // Call the method under test + SearchSourceBuilderProtoUtils.parseProto(searchSourceBuilder, protoRequest); + + // Verify the result + assertNotNull("Stats should not be null", searchSourceBuilder.stats()); + assertEquals("Should have 2 stats", 2, searchSourceBuilder.stats().size()); + assertTrue("Stats should contain stat1", searchSourceBuilder.stats().contains("stat1")); + assertTrue("Stats should contain stat2", searchSourceBuilder.stats().contains("stat2")); + } + + public void testParseProtoWithDocValueFields() throws IOException { + // Create a protobuf SearchRequestBody with docValueFields + SearchRequestBody protoRequest = SearchRequestBody.newBuilder() + .addDocvalueFields(FieldAndFormat.newBuilder().setField("field1").setFormat("format1").build()) + .addDocvalueFields(FieldAndFormat.newBuilder().setField("field2").setFormat("format2").build()) + .build(); + + // Create a SearchSourceBuilder to populate + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + + // Call the method under test + SearchSourceBuilderProtoUtils.parseProto(searchSourceBuilder, protoRequest); + + // Verify the result + assertNotNull("DocValueFields should not be null", searchSourceBuilder.docValueFields()); + assertEquals("Should have 2 docValueFields", 2, searchSourceBuilder.docValueFields().size()); + } + + public void testParseProtoWithFields() throws IOException { + // Create a protobuf SearchRequestBody with fields + SearchRequestBody protoRequest = SearchRequestBody.newBuilder() + .addFields(FieldAndFormat.newBuilder().setField("field1").setFormat("format1").build()) + .addFields(FieldAndFormat.newBuilder().setField("field2").setFormat("format2").build()) + .build(); + + // Create a SearchSourceBuilder to populate + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + + // Call the method under test + SearchSourceBuilderProtoUtils.parseProto(searchSourceBuilder, protoRequest); + + // Verify the result + assertNotNull("FetchFields should not be null", searchSourceBuilder.fetchFields()); + assertEquals("Should have 2 fetchFields", 2, searchSourceBuilder.fetchFields().size()); + } + + public void testParseProtoWithIndicesBoost() throws IOException { + // Create a protobuf SearchRequestBody with indicesBoost + Map boostMap = new HashMap<>(); + boostMap.put("index1", 1.0f); + boostMap.put("index2", 2.0f); + + SearchRequestBody protoRequest = SearchRequestBody.newBuilder() + .addIndicesBoost(NumberMap.newBuilder().putAllNumberMap(boostMap).build()) + .build(); + + // Create a SearchSourceBuilder to populate + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + + // Call the method under test + SearchSourceBuilderProtoUtils.parseProto(searchSourceBuilder, protoRequest); + + // Verify the result + assertNotNull("IndexBoosts should not be null", searchSourceBuilder.indexBoosts()); + assertEquals("Should have 2 indexBoosts", 2, searchSourceBuilder.indexBoosts().size()); + } + + public void testParseProtoWithSortString() throws IOException { + // Create a protobuf SearchRequestBody with sort string + SearchRequestBody protoRequest = SearchRequestBody.newBuilder() + .addSort(SortCombinations.newBuilder().setStringValue("field1").build()) + .build(); + + // Create a SearchSourceBuilder to populate + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + + // Call the method under test + SearchSourceBuilderProtoUtils.parseProto(searchSourceBuilder, protoRequest); + + // Verify the result + assertNotNull("Sorts should not be null", searchSourceBuilder.sorts()); + assertEquals("Should have 1 sort", 1, searchSourceBuilder.sorts().size()); + } + + public void testParseProtoWithPostFilter() throws IOException { + // Create a protobuf SearchRequestBody with postFilter + SearchRequestBody protoRequest = SearchRequestBody.newBuilder() + .setPostFilter(QueryContainer.newBuilder().setMatchAll(MatchAllQuery.newBuilder().build()).build()) + .build(); + + // Create a SearchSourceBuilder to populate + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + + // Call the method under test + SearchSourceBuilderProtoUtils.parseProto(searchSourceBuilder, protoRequest); + + // Verify the result + assertNotNull("PostFilter should not be null", searchSourceBuilder.postFilter()); + assertTrue("PostFilter should be MatchAllQueryBuilder", searchSourceBuilder.postFilter() instanceof MatchAllQueryBuilder); + } + + public void testParseProtoWithScriptFields() throws IOException { + // Create a protobuf SearchRequestBody with scriptFields + Map scriptFieldsMap = new HashMap<>(); + scriptFieldsMap.put( + "script_field_1", + ScriptField.newBuilder() + .setScript( + Script.newBuilder().setInlineScript(InlineScript.newBuilder().setSource("doc['field'].value * 2").build()).build() + ) + .setIgnoreFailure(true) + .build() + ); + scriptFieldsMap.put( + "script_field_2", + ScriptField.newBuilder() + .setScript( + Script.newBuilder().setInlineScript(InlineScript.newBuilder().setSource("doc['field'].value * 2").build()).build() + ) + .build() + ); + + SearchRequestBody protoRequest = SearchRequestBody.newBuilder().putAllScriptFields(scriptFieldsMap).build(); + + // Create a SearchSourceBuilder to populate + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + + // Call the method under test + SearchSourceBuilderProtoUtils.parseProto(searchSourceBuilder, protoRequest); + + // Verify the result + assertNotNull("ScriptFields should not be null", searchSourceBuilder.scriptFields()); + assertEquals("Should have 2 script fields", 2, searchSourceBuilder.scriptFields().size()); + assertTrue( + "Should contain script_field_1", + searchSourceBuilder.scriptFields() + .contains( + new SearchSourceBuilder.ScriptField("script_field_1", new org.opensearch.script.Script("doc['field'].value * 2"), true) + ) + ); + assertTrue( + "Should contain script_field_2", + searchSourceBuilder.scriptFields() + .contains( + new SearchSourceBuilder.ScriptField("script_field_2", new org.opensearch.script.Script("doc['field'].value * 2"), false) + ) + ); + } + + public void testParseProtoWithSlice() throws IOException { + // Create a protobuf SearchRequestBody with slice + SearchRequestBody protoRequest = SearchRequestBody.newBuilder() + .setSlice(SlicedScroll.newBuilder().setId(5).setMax(10).setField("_id").build()) + .build(); + + // Create a SearchSourceBuilder to populate + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + + // Call the method under test + SearchSourceBuilderProtoUtils.parseProto(searchSourceBuilder, protoRequest); + + // Verify the result + assertNotNull("Slice should not be null", searchSourceBuilder.slice()); + assertEquals("Slice id should match", 5, searchSourceBuilder.slice().getId()); + assertEquals("Slice max should match", 10, searchSourceBuilder.slice().getMax()); + assertEquals("Slice field should match", "_id", searchSourceBuilder.slice().getField()); + } + + public void testParseProtoWithDerivedFields() throws IOException { + // Create a protobuf SearchRequestBody with derived fields + Map derivedFieldsMap = new HashMap<>(); + derivedFieldsMap.put( + "derived_field_1", + DerivedField.newBuilder() + .setType("number") + .setScript( + Script.newBuilder().setInlineScript(InlineScript.newBuilder().setSource("doc['field'].value * 2").build()).build() + ) + .build() + ); + derivedFieldsMap.put( + "derived_field_2", + DerivedField.newBuilder() + .setType("string") + .setScript( + Script.newBuilder().setInlineScript(InlineScript.newBuilder().setSource("doc['field'].value * 2").build()).build() + ) + .build() + ); + + SearchRequestBody protoRequest = SearchRequestBody.newBuilder().putAllDerived(derivedFieldsMap).build(); + + // Create a SearchSourceBuilder to populate + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + + // Call the method under test + SearchSourceBuilderProtoUtils.parseProto(searchSourceBuilder, protoRequest); + + // Verify the result + assertNotNull("DerivedFields should not be null", searchSourceBuilder.getDerivedFields()); + assertEquals("Should have 2 derived fields", 2, searchSourceBuilder.getDerivedFields().size()); + assertTrue( + "Should contain derived_field_1", + searchSourceBuilder.getDerivedFields() + .contains( + new org.opensearch.index.mapper.DerivedField( + "derived_field_1", + "number", + new org.opensearch.script.Script("doc['field'].value * 2") + ) + ) + ); + assertTrue( + "Should contain derived_field_2", + searchSourceBuilder.getDerivedFields() + .contains( + new org.opensearch.index.mapper.DerivedField( + "derived_field_2", + "string", + new org.opensearch.script.Script("doc['field'].value * 2") + ) + ) + ); + } + + public void testParseProtoWithSearchAfter() throws IOException { + // Create a protobuf SearchRequestBody with searchAfter + SearchRequestBody protoRequest = SearchRequestBody.newBuilder() + .addSearchAfter(FieldValue.newBuilder().setStringValue("value1").build()) + .addSearchAfter(FieldValue.newBuilder().setGeneralNumber(GeneralNumber.newBuilder().setInt64Value(42).build()).build()) + .build(); + + // Create a SearchSourceBuilder to populate + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + + // Call the method under test + SearchSourceBuilderProtoUtils.parseProto(searchSourceBuilder, protoRequest); + + // Verify the result + assertNotNull("SearchAfter should not be null", searchSourceBuilder.searchAfter()); + assertEquals("SearchAfter should have 2 values", 2, searchSourceBuilder.searchAfter().length); + assertEquals("First value should match", "value1", searchSourceBuilder.searchAfter()[0]); + assertEquals("Second value should match", 42L, searchSourceBuilder.searchAfter()[1]); + } + + public void testParseProtoWithExtThrowsUnsupportedOperationException() throws IOException { + // Create a protobuf SearchRequestBody with ext + SearchRequestBody protoRequest = SearchRequestBody.newBuilder().setExt(ObjectMap.newBuilder().build()).build(); + + // Create a SearchSourceBuilder to populate + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + + // Call the method under test, should throw UnsupportedOperationException + UnsupportedOperationException exception = expectThrows( + UnsupportedOperationException.class, + () -> SearchSourceBuilderProtoUtils.parseProto(searchSourceBuilder, protoRequest) + ); + + assertTrue("Exception message should mention ext param", exception.getMessage().contains("ext param is not supported yet")); + } + + public void testScriptFieldProtoUtilsFromProto() throws IOException { + // Create a protobuf ScriptField + ScriptField scriptFieldProto = ScriptField.newBuilder() + .setScript(Script.newBuilder().setInlineScript(InlineScript.newBuilder().setSource("doc['field'].value * 2").build()).build()) + .setIgnoreFailure(true) + .build(); + + // Call the method under test + SearchSourceBuilder.ScriptField scriptField = SearchSourceBuilderProtoUtils.ScriptFieldProtoUtils.fromProto( + "test_script_field", + scriptFieldProto + ); + + // Verify the result + assertNotNull("ScriptField should not be null", scriptField); + assertEquals("Field name should match", "test_script_field", scriptField.fieldName()); + assertNotNull("Script should not be null", scriptField.script()); + assertEquals("Script source should match", "doc['field'].value * 2", scriptField.script().getIdOrCode()); + assertTrue("IgnoreFailure should be true", scriptField.ignoreFailure()); + } + + public void testScriptFieldProtoUtilsFromProtoWithDefaultIgnoreFailure() throws IOException { + // Create a protobuf ScriptField without ignoreFailure + ScriptField scriptFieldProto = ScriptField.newBuilder() + .setScript(Script.newBuilder().setInlineScript(InlineScript.newBuilder().setSource("doc['field'].value * 2").build()).build()) + .build(); + + // Call the method under test + SearchSourceBuilder.ScriptField scriptField = SearchSourceBuilderProtoUtils.ScriptFieldProtoUtils.fromProto( + "test_script_field", + scriptFieldProto + ); + + // Verify the result + assertNotNull("ScriptField should not be null", scriptField); + assertEquals("Field name should match", "test_script_field", scriptField.fieldName()); + assertNotNull("Script should not be null", scriptField.script()); + assertEquals("Script source should match", "doc['field'].value * 2", scriptField.script().getIdOrCode()); + assertFalse("IgnoreFailure should be false by default", scriptField.ignoreFailure()); + } + +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/SortBuilderProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/SortBuilderProtoUtilsTests.java new file mode 100644 index 0000000000000..300b5aa4c992d --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/SortBuilderProtoUtilsTests.java @@ -0,0 +1,28 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.search; + +import org.opensearch.plugin.transport.grpc.proto.request.search.sort.SortBuilderProtoUtils; +import org.opensearch.search.sort.SortBuilder; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Collections; +import java.util.List; + +public class SortBuilderProtoUtilsTests extends OpenSearchTestCase { + + public void testFromProtoWithEmptyList() { + // Call the method under test with an empty list + List> sortBuilders = SortBuilderProtoUtils.fromProto(Collections.emptyList()); + + // Verify the result + assertNotNull("SortBuilders list should not be null", sortBuilders); + assertTrue("SortBuilders list should be empty", sortBuilders.isEmpty()); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/StoredFieldsContextProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/StoredFieldsContextProtoUtilsTests.java new file mode 100644 index 0000000000000..a2d7fe0d7eb57 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/StoredFieldsContextProtoUtilsTests.java @@ -0,0 +1,129 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.search; + +import org.opensearch.protobufs.SearchRequest; +import org.opensearch.search.fetch.StoredFieldsContext; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +public class StoredFieldsContextProtoUtilsTests extends OpenSearchTestCase { + + public void testFromProtoWithValidFieldList() throws IOException { + // Create a list of field names + List fieldNames = Arrays.asList("field1", "field2", "field3"); + + // Call the method under test + StoredFieldsContext storedFieldsContext = StoredFieldsContextProtoUtils.fromProto(fieldNames); + + // Verify the result + assertNotNull("StoredFieldsContext should not be null", storedFieldsContext); + assertEquals("Field names should match", fieldNames, storedFieldsContext.fieldNames()); + assertTrue("FetchFields should be true", storedFieldsContext.fetchFields()); + } + + public void testFromProtoWithEmptyFieldList() throws IOException { + // Call the method under test with an empty list + StoredFieldsContext storedFieldsContext = StoredFieldsContextProtoUtils.fromProto(Collections.emptyList()); + + // Verify the result + assertNull("StoredFieldsContext should be null for empty list", storedFieldsContext); + } + + public void testFromProtoWithNullFieldList() throws IOException { + // Call the method under test with null + StoredFieldsContext storedFieldsContext = StoredFieldsContextProtoUtils.fromProto(null); + + // Verify the result + assertNull("StoredFieldsContext should be null for null list", storedFieldsContext); + } + + public void testFromProtoWithSingleField() throws IOException { + // Create a list with a single field name + List fieldNames = Collections.singletonList("single_field"); + + // Call the method under test + StoredFieldsContext storedFieldsContext = StoredFieldsContextProtoUtils.fromProto(fieldNames); + + // Verify the result + assertNotNull("StoredFieldsContext should not be null", storedFieldsContext); + assertEquals("Field names should match", fieldNames, storedFieldsContext.fieldNames()); + assertEquals("Should have 1 field", 1, storedFieldsContext.fieldNames().size()); + } + + public void testFromProtoRequestWithStoredFields() { + // Create a SearchRequest with stored fields + List fieldNames = Arrays.asList("field1", "field2", "field3"); + SearchRequest searchRequest = SearchRequest.newBuilder().addAllStoredFields(fieldNames).build(); + + // Call the method under test + StoredFieldsContext storedFieldsContext = StoredFieldsContextProtoUtils.fromProtoRequest(searchRequest); + + // Verify the result + assertNotNull("StoredFieldsContext should not be null", storedFieldsContext); + assertEquals("Field names should match", fieldNames, storedFieldsContext.fieldNames()); + assertTrue("FetchFields should be true", storedFieldsContext.fetchFields()); + } + + public void testFromProtoRequestWithNoStoredFields() { + // Create a SearchRequest with no stored fields + SearchRequest searchRequest = SearchRequest.newBuilder().build(); + + // Call the method under test + StoredFieldsContext storedFieldsContext = StoredFieldsContextProtoUtils.fromProtoRequest(searchRequest); + + // Verify the result + assertNull("StoredFieldsContext should be null for request with no stored fields", storedFieldsContext); + } + + public void testFromProtoRequestWithEmptyStoredFields() { + // Create a SearchRequest with empty stored fields list + SearchRequest searchRequest = SearchRequest.newBuilder().addAllStoredFields(Collections.emptyList()).build(); + + // Call the method under test + StoredFieldsContext storedFieldsContext = StoredFieldsContextProtoUtils.fromProtoRequest(searchRequest); + + // Verify the result + assertNull("StoredFieldsContext should be null for request with empty stored fields", storedFieldsContext); + } + + public void testFromProtoWithSpecialFields() throws IOException { + // Create a list with special field names like _source, _id, etc. + List fieldNames = Arrays.asList("_id", "_source", "_routing", "_field_names"); + + // Call the method under test + StoredFieldsContext storedFieldsContext = StoredFieldsContextProtoUtils.fromProto(fieldNames); + + // Verify the result + assertNotNull("StoredFieldsContext should not be null", storedFieldsContext); + assertEquals("Field names should match", fieldNames, storedFieldsContext.fieldNames()); + assertEquals("Should have 4 fields", 4, storedFieldsContext.fieldNames().size()); + } + + public void testFromProtoWithLargeNumberOfFields() throws IOException { + // Create a list with a large number of field names + List fieldNames = new ArrayList<>(); + for (int i = 0; i < 100; i++) { + fieldNames.add("field" + i); + } + + // Call the method under test + StoredFieldsContext storedFieldsContext = StoredFieldsContextProtoUtils.fromProto(fieldNames); + + // Verify the result + assertNotNull("StoredFieldsContext should not be null", storedFieldsContext); + assertEquals("Field names should match", fieldNames, storedFieldsContext.fieldNames()); + assertEquals("Should have 100 fields", 100, storedFieldsContext.fieldNames().size()); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/MatchAllQueryBuilderProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/MatchAllQueryBuilderProtoUtilsTests.java new file mode 100644 index 0000000000000..186f54f09a51f --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/MatchAllQueryBuilderProtoUtilsTests.java @@ -0,0 +1,73 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.search.query; + +import org.opensearch.index.query.MatchAllQueryBuilder; +import org.opensearch.protobufs.MatchAllQuery; +import org.opensearch.test.OpenSearchTestCase; + +public class MatchAllQueryBuilderProtoUtilsTests extends OpenSearchTestCase { + + public void testFromProtoWithDefaultValues() { + // Create a protobuf MatchAllQuery with default values + MatchAllQuery matchAllQueryProto = MatchAllQuery.newBuilder().build(); + + // Call the method under test + MatchAllQueryBuilder matchAllQueryBuilder = MatchAllQueryBuilderProtoUtils.fromProto(matchAllQueryProto); + + // Verify the result + assertNotNull("MatchAllQueryBuilder should not be null", matchAllQueryBuilder); + assertEquals("Boost should be default", 1.0f, matchAllQueryBuilder.boost(), 0.0f); + assertNull("Query name should be null", matchAllQueryBuilder.queryName()); + } + + public void testFromProtoWithBoost() { + // Create a protobuf MatchAllQuery with boost + MatchAllQuery matchAllQueryProto = MatchAllQuery.newBuilder().setBoost(2.5f).build(); + + // Call the method under test + MatchAllQueryBuilder matchAllQueryBuilder = MatchAllQueryBuilderProtoUtils.fromProto(matchAllQueryProto); + + // Verify the result + assertNotNull("MatchAllQueryBuilder should not be null", matchAllQueryBuilder); + assertEquals("Boost should match", 2.5f, matchAllQueryBuilder.boost(), 0.0f); + assertNull("Query name should be null", matchAllQueryBuilder.queryName()); + } + + public void testFromProtoWithName() { + // Create a protobuf MatchAllQuery with name + MatchAllQuery matchAllQueryProto = MatchAllQuery.newBuilder().setName("test_query").build(); + + // Call the method under test + MatchAllQueryBuilder matchAllQueryBuilder = MatchAllQueryBuilderProtoUtils.fromProto(matchAllQueryProto); + + // Verify the result + assertNotNull("MatchAllQueryBuilder should not be null", matchAllQueryBuilder); + assertEquals("Boost should be default", 1.0f, matchAllQueryBuilder.boost(), 0.0f); + assertEquals("Query name should match", "test_query", matchAllQueryBuilder.queryName()); + } + + public void testFromProtoWithBoostAndName() { + // Create a protobuf MatchAllQuery with boost and name + MatchAllQuery matchAllQueryProto = MatchAllQuery.newBuilder().setBoost(3.0f).setName("test_query").build(); + + // Call the method under test + MatchAllQueryBuilder matchAllQueryBuilder = MatchAllQueryBuilderProtoUtils.fromProto(matchAllQueryProto); + + // Verify the result + assertNotNull("MatchAllQueryBuilder should not be null", matchAllQueryBuilder); + assertEquals("Boost should match", 3.0f, matchAllQueryBuilder.boost(), 0.0f); + assertEquals("Query name should match", "test_query", matchAllQueryBuilder.queryName()); + } + + public void testFromProtoWithNullInput() { + // Call the method under test with null input, should throw NullPointerException + NullPointerException exception = expectThrows(NullPointerException.class, () -> MatchAllQueryBuilderProtoUtils.fromProto(null)); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/MatchNoneQueryBuilderProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/MatchNoneQueryBuilderProtoUtilsTests.java new file mode 100644 index 0000000000000..8149319241479 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/MatchNoneQueryBuilderProtoUtilsTests.java @@ -0,0 +1,73 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.search.query; + +import org.opensearch.index.query.MatchNoneQueryBuilder; +import org.opensearch.protobufs.MatchNoneQuery; +import org.opensearch.test.OpenSearchTestCase; + +public class MatchNoneQueryBuilderProtoUtilsTests extends OpenSearchTestCase { + + public void testFromProtoWithDefaultValues() { + // Create a protobuf MatchNoneQuery with default values + MatchNoneQuery matchNoneQueryProto = MatchNoneQuery.newBuilder().build(); + + // Call the method under test + MatchNoneQueryBuilder matchNoneQueryBuilder = MatchNoneQueryBuilderProtoUtils.fromProto(matchNoneQueryProto); + + // Verify the result + assertNotNull("MatchNoneQueryBuilder should not be null", matchNoneQueryBuilder); + assertEquals("Boost should be default", 1.0f, matchNoneQueryBuilder.boost(), 0.0f); + assertNull("Query name should be null", matchNoneQueryBuilder.queryName()); + } + + public void testFromProtoWithBoost() { + // Create a protobuf MatchNoneQuery with boost + MatchNoneQuery matchNoneQueryProto = MatchNoneQuery.newBuilder().setBoost(2.5f).build(); + + // Call the method under test + MatchNoneQueryBuilder matchNoneQueryBuilder = MatchNoneQueryBuilderProtoUtils.fromProto(matchNoneQueryProto); + + // Verify the result + assertNotNull("MatchNoneQueryBuilder should not be null", matchNoneQueryBuilder); + assertEquals("Boost should match", 2.5f, matchNoneQueryBuilder.boost(), 0.0f); + assertNull("Query name should be null", matchNoneQueryBuilder.queryName()); + } + + public void testFromProtoWithName() { + // Create a protobuf MatchNoneQuery with name + MatchNoneQuery matchNoneQueryProto = MatchNoneQuery.newBuilder().setName("test_query").build(); + + // Call the method under test + MatchNoneQueryBuilder matchNoneQueryBuilder = MatchNoneQueryBuilderProtoUtils.fromProto(matchNoneQueryProto); + + // Verify the result + assertNotNull("MatchNoneQueryBuilder should not be null", matchNoneQueryBuilder); + assertEquals("Boost should be default", 1.0f, matchNoneQueryBuilder.boost(), 0.0f); + assertEquals("Query name should match", "test_query", matchNoneQueryBuilder.queryName()); + } + + public void testFromProtoWithBoostAndName() { + // Create a protobuf MatchNoneQuery with boost and name + MatchNoneQuery matchNoneQueryProto = MatchNoneQuery.newBuilder().setBoost(3.0f).setName("test_query").build(); + + // Call the method under test + MatchNoneQueryBuilder matchNoneQueryBuilder = MatchNoneQueryBuilderProtoUtils.fromProto(matchNoneQueryProto); + + // Verify the result + assertNotNull("MatchNoneQueryBuilder should not be null", matchNoneQueryBuilder); + assertEquals("Boost should match", 3.0f, matchNoneQueryBuilder.boost(), 0.0f); + assertEquals("Query name should match", "test_query", matchNoneQueryBuilder.queryName()); + } + + public void testFromProtoWithNullInput() { + // Call the method under test with null input, should throw NullPointerException + NullPointerException exception = expectThrows(NullPointerException.class, () -> MatchNoneQueryBuilderProtoUtils.fromProto(null)); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/TermQueryBuilderProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/TermQueryBuilderProtoUtilsTests.java new file mode 100644 index 0000000000000..583bdb920726e --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/TermQueryBuilderProtoUtilsTests.java @@ -0,0 +1,306 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.search.query; + +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.protobufs.FieldValue; +import org.opensearch.protobufs.GeneralNumber; +import org.opensearch.protobufs.ObjectMap; +import org.opensearch.protobufs.TermQuery; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.HashMap; +import java.util.Map; + +public class TermQueryBuilderProtoUtilsTests extends OpenSearchTestCase { + + public void testFromProtoWithStringValue() { + // Create a protobuf TermQuery with string value + TermQuery termQuery = TermQuery.newBuilder() + .setName("test_query") + .setBoost(2.0f) + .setValue(FieldValue.newBuilder().setStringValue("test_value").build()) + .build(); + + // Create a map with field name and TermQuery + Map termQueryProto = new HashMap<>(); + termQueryProto.put("test_field", termQuery); + + // Call the method under test + TermQueryBuilder termQueryBuilder = TermQueryBuilderProtoUtils.fromProto(termQueryProto); + + // Verify the result + assertNotNull("TermQueryBuilder should not be null", termQueryBuilder); + assertEquals("Field name should match", "test_field", termQueryBuilder.fieldName()); + assertEquals("Value should match", "test_value", termQueryBuilder.value()); + assertEquals("Boost should match", 2.0f, termQueryBuilder.boost(), 0.0f); + assertEquals("Query name should match", "test_query", termQueryBuilder.queryName()); + } + + public void testFromProtoWithNumberValue() { + // Create a protobuf TermQuery with number value + TermQuery termQuery = TermQuery.newBuilder() + .setName("test_query") + .setBoost(2.0f) + .setValue(FieldValue.newBuilder().setGeneralNumber(GeneralNumber.newBuilder().setFloatValue(10.5f).build()).build()) + .build(); + + // Create a map with field name and TermQuery + Map termQueryProto = new HashMap<>(); + termQueryProto.put("test_field", termQuery); + + // Call the method under test + TermQueryBuilder termQueryBuilder = TermQueryBuilderProtoUtils.fromProto(termQueryProto); + + // Verify the result + assertNotNull("TermQueryBuilder should not be null", termQueryBuilder); + assertEquals("Field name should match", "test_field", termQueryBuilder.fieldName()); + assertEquals("Value should match", 10.5f, termQueryBuilder.value()); + assertEquals("Boost should match", 2.0f, termQueryBuilder.boost(), 0.0f); + assertEquals("Query name should match", "test_query", termQueryBuilder.queryName()); + } + + public void testFromProtoWithBooleanValue() { + // Create a protobuf TermQuery with boolean value + TermQuery termQuery = TermQuery.newBuilder() + .setName("test_query") + .setBoost(2.0f) + .setValue(FieldValue.newBuilder().setBoolValue(true).build()) + .build(); + + // Create a map with field name and TermQuery + Map termQueryProto = new HashMap<>(); + termQueryProto.put("test_field", termQuery); + + // Call the method under test + TermQueryBuilder termQueryBuilder = TermQueryBuilderProtoUtils.fromProto(termQueryProto); + + // Verify the result + assertNotNull("TermQueryBuilder should not be null", termQueryBuilder); + assertEquals("Field name should match", "test_field", termQueryBuilder.fieldName()); + assertEquals("Value should match", true, termQueryBuilder.value()); + assertEquals("Boost should match", 2.0f, termQueryBuilder.boost(), 0.0f); + assertEquals("Query name should match", "test_query", termQueryBuilder.queryName()); + } + + public void testFromProtoWithObjectMapValue() { + // Create a protobuf TermQuery with object map value + Map objectMapValues = new HashMap<>(); + objectMapValues.put("key1", "value1"); + objectMapValues.put("key2", "value2"); + + ObjectMap.Builder objectMapBuilder = ObjectMap.newBuilder(); + for (Map.Entry entry : objectMapValues.entrySet()) { + objectMapBuilder.putFields(entry.getKey(), ObjectMap.Value.newBuilder().setString(entry.getValue()).build()); + } + + TermQuery termQuery = TermQuery.newBuilder() + .setName("test_query") + .setBoost(2.0f) + .setValue(FieldValue.newBuilder().setObjectMap(objectMapBuilder.build()).build()) + .build(); + + // Create a map with field name and TermQuery + Map termQueryProto = new HashMap<>(); + termQueryProto.put("test_field", termQuery); + + // Call the method under test + TermQueryBuilder termQueryBuilder = TermQueryBuilderProtoUtils.fromProto(termQueryProto); + + // Verify the result + assertNotNull("TermQueryBuilder should not be null", termQueryBuilder); + assertEquals("Field name should match", "test_field", termQueryBuilder.fieldName()); + assertTrue("Value should be a Map", termQueryBuilder.value() instanceof Map); + @SuppressWarnings("unchecked") + Map value = (Map) termQueryBuilder.value(); + assertEquals("Map should have 2 entries", 2, value.size()); + assertEquals("Map entry 1 should match", "value1", value.get("key1")); + assertEquals("Map entry 2 should match", "value2", value.get("key2")); + assertEquals("Boost should match", 2.0f, termQueryBuilder.boost(), 0.0f); + assertEquals("Query name should match", "test_query", termQueryBuilder.queryName()); + } + + public void testFromProtoWithDefaultValues() { + // Create a protobuf TermQuery with minimal values + TermQuery termQuery = TermQuery.newBuilder().setValue(FieldValue.newBuilder().setStringValue("test_value").build()).build(); + + // Create a map with field name and TermQuery + Map termQueryProto = new HashMap<>(); + termQueryProto.put("test_field", termQuery); + + // Call the method under test + TermQueryBuilder termQueryBuilder = TermQueryBuilderProtoUtils.fromProto(termQueryProto); + + // Verify the result + assertNotNull("TermQueryBuilder should not be null", termQueryBuilder); + assertEquals("Field name should match", "test_field", termQueryBuilder.fieldName()); + assertEquals("Value should match", "test_value", termQueryBuilder.value()); + assertEquals("Boost should be default", 1.0f, termQueryBuilder.boost(), 0.0f); + assertNull("Query name should be null", termQueryBuilder.queryName()); + } + + public void testFromProtoWithInvalidFieldValueType() { + // Create a protobuf TermQuery with invalid field value type + TermQuery termQuery = TermQuery.newBuilder() + .setValue(FieldValue.newBuilder().build()) // No value set + .build(); + + // Create a map with field name and TermQuery + Map termQueryProto = new HashMap<>(); + termQueryProto.put("test_field", termQuery); + + // Call the method under test, should throw IllegalArgumentException + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> TermQueryBuilderProtoUtils.fromProto(termQueryProto) + ); + + assertTrue( + "Exception message should mention field value not recognized", + exception.getMessage().contains("field value not recognized") + ); + } + + public void testFromProtoWithTooManyElements() { + // Create a map with too many elements + Map termQueryProto = new HashMap<>(); + termQueryProto.put("field1", TermQuery.newBuilder().build()); + termQueryProto.put("field2", TermQuery.newBuilder().build()); + + // Call the method under test, should throw IllegalArgumentException + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> TermQueryBuilderProtoUtils.fromProto(termQueryProto) + ); + + assertTrue("Exception message should mention can only have 1 element", exception.getMessage().contains("can only have 1 element")); + } + + public void testFromProtoWithInt32Value() { + // Create a protobuf TermQuery with int32 value + TermQuery termQuery = TermQuery.newBuilder() + .setName("test_query") + .setBoost(2.0f) + .setValue(FieldValue.newBuilder().setGeneralNumber(GeneralNumber.newBuilder().setInt32Value(42).build()).build()) + .build(); + + // Create a map with field name and TermQuery + Map termQueryProto = new HashMap<>(); + termQueryProto.put("test_field", termQuery); + + // Call the method under test + TermQueryBuilder termQueryBuilder = TermQueryBuilderProtoUtils.fromProto(termQueryProto); + + // Verify the result + assertNotNull("TermQueryBuilder should not be null", termQueryBuilder); + assertEquals("Field name should match", "test_field", termQueryBuilder.fieldName()); + assertEquals("Value should match", 42, termQueryBuilder.value()); + assertEquals("Boost should match", 2.0f, termQueryBuilder.boost(), 0.0f); + assertEquals("Query name should match", "test_query", termQueryBuilder.queryName()); + } + + public void testFromProtoWithInt64Value() { + // Create a protobuf TermQuery with int64 value + TermQuery termQuery = TermQuery.newBuilder() + .setName("test_query") + .setBoost(2.0f) + .setValue( + FieldValue.newBuilder().setGeneralNumber(GeneralNumber.newBuilder().setInt64Value(9223372036854775807L).build()).build() + ) + .build(); + + // Create a map with field name and TermQuery + Map termQueryProto = new HashMap<>(); + termQueryProto.put("test_field", termQuery); + + // Call the method under test + TermQueryBuilder termQueryBuilder = TermQueryBuilderProtoUtils.fromProto(termQueryProto); + + // Verify the result + assertNotNull("TermQueryBuilder should not be null", termQueryBuilder); + assertEquals("Field name should match", "test_field", termQueryBuilder.fieldName()); + assertEquals("Value should match", 9223372036854775807L, termQueryBuilder.value()); + assertEquals("Boost should match", 2.0f, termQueryBuilder.boost(), 0.0f); + assertEquals("Query name should match", "test_query", termQueryBuilder.queryName()); + } + + public void testFromProtoWithDoubleValue() { + // Create a protobuf TermQuery with double value + TermQuery termQuery = TermQuery.newBuilder() + .setName("test_query") + .setBoost(2.0f) + .setValue(FieldValue.newBuilder().setGeneralNumber(GeneralNumber.newBuilder().setDoubleValue(3.14159).build()).build()) + .build(); + + // Create a map with field name and TermQuery + Map termQueryProto = new HashMap<>(); + termQueryProto.put("test_field", termQuery); + + // Call the method under test + TermQueryBuilder termQueryBuilder = TermQueryBuilderProtoUtils.fromProto(termQueryProto); + + // Verify the result + assertNotNull("TermQueryBuilder should not be null", termQueryBuilder); + assertEquals("Field name should match", "test_field", termQueryBuilder.fieldName()); + assertEquals("Value should match", 3.14159, termQueryBuilder.value()); + assertEquals("Boost should match", 2.0f, termQueryBuilder.boost(), 0.0f); + assertEquals("Query name should match", "test_query", termQueryBuilder.queryName()); + } + + public void testFromProtoWithCaseInsensitive() { + // Create a protobuf TermQuery with case insensitive flag + TermQuery termQuery = TermQuery.newBuilder() + .setName("test_query") + .setBoost(2.0f) + .setValue(FieldValue.newBuilder().setStringValue("test_value").build()) + .setCaseInsensitive(true) + .build(); + + // Create a map with field name and TermQuery + Map termQueryProto = new HashMap<>(); + termQueryProto.put("test_field", termQuery); + + // Call the method under test + TermQueryBuilder termQueryBuilder = TermQueryBuilderProtoUtils.fromProto(termQueryProto); + + // Verify the result + assertNotNull("TermQueryBuilder should not be null", termQueryBuilder); + assertEquals("Field name should match", "test_field", termQueryBuilder.fieldName()); + assertEquals("Value should match", "test_value", termQueryBuilder.value()); + assertEquals("Boost should match", 2.0f, termQueryBuilder.boost(), 0.0f); + assertEquals("Query name should match", "test_query", termQueryBuilder.queryName()); + assertTrue("Case insensitive should be true", termQueryBuilder.caseInsensitive()); + } + + public void testFromProtoWithUnsupportedGeneralNumberType() { + // Create a protobuf TermQuery with unsupported general number type + TermQuery termQuery = TermQuery.newBuilder() + .setValue( + FieldValue.newBuilder() + .setGeneralNumber(GeneralNumber.newBuilder().build()) // No value set + .build() + ) + .build(); + + // Create a map with field name and TermQuery + Map termQueryProto = new HashMap<>(); + termQueryProto.put("test_field", termQuery); + + // Call the method under test, should throw IllegalArgumentException + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> TermQueryBuilderProtoUtils.fromProto(termQueryProto) + ); + + assertTrue( + "Exception message should mention unsupported general number type", + exception.getMessage().contains("Unsupported general nunber type") + ); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/sort/FieldSortBuilderProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/sort/FieldSortBuilderProtoUtilsTests.java new file mode 100644 index 0000000000000..8dbf593a0a762 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/sort/FieldSortBuilderProtoUtilsTests.java @@ -0,0 +1,112 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.request.search.sort; + +import org.opensearch.protobufs.FieldWithOrderMap; +import org.opensearch.protobufs.ScoreSort; +import org.opensearch.search.sort.FieldSortBuilder; +import org.opensearch.search.sort.ScoreSortBuilder; +import org.opensearch.search.sort.SortBuilder; +import org.opensearch.search.sort.SortOrder; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.ArrayList; +import java.util.List; + +public class FieldSortBuilderProtoUtilsTests extends OpenSearchTestCase { + + public void testFromProtoWithEmptyMap() { + // Create an empty FieldWithOrderMap + FieldWithOrderMap fieldWithOrderMap = FieldWithOrderMap.newBuilder().build(); + + // Create a list to populate + List> sortBuilders = new ArrayList<>(); + + // Call the method under test + FieldSortBuilderProtoUtils.fromProto(sortBuilders, fieldWithOrderMap); + + // Verify the result + assertTrue("SortBuilders list should be empty", sortBuilders.isEmpty()); + } + + public void testFromProtoWithSingleField() { + // Create a FieldWithOrderMap with a single field + FieldWithOrderMap.Builder builder = FieldWithOrderMap.newBuilder(); + builder.putFieldWithOrderMap("field1", ScoreSort.newBuilder().setOrder(ScoreSort.SortOrder.SORT_ORDER_ASC).build()); + FieldWithOrderMap fieldWithOrderMap = builder.build(); + + // Create a list to populate + List> sortBuilders = new ArrayList<>(); + + // Call the method under test + FieldSortBuilderProtoUtils.fromProto(sortBuilders, fieldWithOrderMap); + + // Verify the result + assertEquals("SortBuilders list should have 1 element", 1, sortBuilders.size()); + assertTrue("SortBuilder should be a FieldSortBuilder", sortBuilders.get(0) instanceof FieldSortBuilder); + FieldSortBuilder fieldSortBuilder = (FieldSortBuilder) sortBuilders.get(0); + assertEquals("Field name should match", "field1", fieldSortBuilder.getFieldName()); + assertEquals("Sort order should be ASC", SortOrder.ASC, fieldSortBuilder.order()); + } + + public void testFromProtoWithMultipleFields() { + // Create a FieldWithOrderMap with multiple fields + FieldWithOrderMap.Builder builder = FieldWithOrderMap.newBuilder(); + builder.putFieldWithOrderMap("field1", ScoreSort.newBuilder().setOrder(ScoreSort.SortOrder.SORT_ORDER_ASC).build()); + builder.putFieldWithOrderMap("field2", ScoreSort.newBuilder().setOrder(ScoreSort.SortOrder.SORT_ORDER_DESC).build()); + FieldWithOrderMap fieldWithOrderMap = builder.build(); + + // Create a list to populate + List> sortBuilders = new ArrayList<>(); + + // Call the method under test + FieldSortBuilderProtoUtils.fromProto(sortBuilders, fieldWithOrderMap); + + // Verify the result + assertEquals("SortBuilders list should have 2 elements", 2, sortBuilders.size()); + + // Since the order of entries in a map is not guaranteed, we need to check both fields + boolean foundField1 = false; + boolean foundField2 = false; + + for (SortBuilder sortBuilder : sortBuilders) { + assertTrue("SortBuilder should be a FieldSortBuilder", sortBuilder instanceof FieldSortBuilder); + FieldSortBuilder fieldSortBuilder = (FieldSortBuilder) sortBuilder; + + if (fieldSortBuilder.getFieldName().equals("field1")) { + foundField1 = true; + assertEquals("Sort order for field1 should be ASC", SortOrder.ASC, fieldSortBuilder.order()); + } else if (fieldSortBuilder.getFieldName().equals("field2")) { + foundField2 = true; + assertEquals("Sort order for field2 should be DESC", SortOrder.DESC, fieldSortBuilder.order()); + } + } + + assertTrue("Should have found field1", foundField1); + assertTrue("Should have found field2", foundField2); + } + + public void testFromProtoWithScoreField() { + // Create a FieldWithOrderMap with the special "score" field + FieldWithOrderMap.Builder builder = FieldWithOrderMap.newBuilder(); + builder.putFieldWithOrderMap("score", ScoreSort.newBuilder().setOrder(ScoreSort.SortOrder.SORT_ORDER_DESC).build()); + FieldWithOrderMap fieldWithOrderMap = builder.build(); + + // Create a list to populate + List> sortBuilders = new ArrayList<>(); + + // Call the method under test + FieldSortBuilderProtoUtils.fromProto(sortBuilders, fieldWithOrderMap); + + // Verify the result + assertEquals("SortBuilders list should have 1 element", 1, sortBuilders.size()); + assertTrue("SortBuilder should be a ScoreSortBuilder", sortBuilders.get(0) instanceof ScoreSortBuilder); + ScoreSortBuilder scoreSortBuilder = (ScoreSortBuilder) sortBuilders.get(0); + assertEquals("Sort order should be DESC", SortOrder.DESC, scoreSortBuilder.order()); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/sort/SortOrderProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/sort/SortOrderProtoUtilsTests.java new file mode 100644 index 0000000000000..1f31780840057 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/sort/SortOrderProtoUtilsTests.java @@ -0,0 +1,89 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.request.search.sort; + +import org.opensearch.protobufs.GeoDistanceSort; +import org.opensearch.protobufs.ScoreSort; +import org.opensearch.protobufs.ScriptSort; +import org.opensearch.search.sort.SortOrder; +import org.opensearch.test.OpenSearchTestCase; + +public class SortOrderProtoUtilsTests extends OpenSearchTestCase { + + public void testFromProtoScoreSortAsc() { + // Test ASC order + SortOrder sortOrder = SortOrderProtoUtils.fromProto(ScoreSort.SortOrder.SORT_ORDER_ASC); + assertEquals("Sort order should be ASC", SortOrder.ASC, sortOrder); + } + + public void testFromProtoScoreSortDesc() { + // Test DESC order + SortOrder sortOrder = SortOrderProtoUtils.fromProto(ScoreSort.SortOrder.SORT_ORDER_DESC); + assertEquals("Sort order should be DESC", SortOrder.DESC, sortOrder); + } + + public void testFromProtoScoreSortUnspecified() { + // Test UNSPECIFIED order (should throw exception) + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> SortOrderProtoUtils.fromProto(ScoreSort.SortOrder.SORT_ORDER_UNSPECIFIED) + ); + assertTrue( + "Exception message should mention 'Must provide oneof sort combinations'", + exception.getMessage().contains("Must provide oneof sort combinations") + ); + } + + public void testFromProtoGeoDistanceSortAsc() { + // Test ASC order + SortOrder sortOrder = SortOrderProtoUtils.fromProto(GeoDistanceSort.SortOrder.SORT_ORDER_ASC); + assertEquals("Sort order should be ASC", SortOrder.ASC, sortOrder); + } + + public void testFromProtoGeoDistanceSortDesc() { + // Test DESC order + SortOrder sortOrder = SortOrderProtoUtils.fromProto(GeoDistanceSort.SortOrder.SORT_ORDER_DESC); + assertEquals("Sort order should be DESC", SortOrder.DESC, sortOrder); + } + + public void testFromProtoGeoDistanceSortUnspecified() { + // Test UNSPECIFIED order (should throw exception) + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> SortOrderProtoUtils.fromProto(GeoDistanceSort.SortOrder.SORT_ORDER_UNSPECIFIED) + ); + assertTrue( + "Exception message should mention 'Must provide oneof sort combinations'", + exception.getMessage().contains("Must provide oneof sort combinations") + ); + } + + public void testFromProtoScriptSortAsc() { + // Test ASC order + SortOrder sortOrder = SortOrderProtoUtils.fromProto(ScriptSort.SortOrder.SORT_ORDER_ASC); + assertEquals("Sort order should be ASC", SortOrder.ASC, sortOrder); + } + + public void testFromProtoScriptSortDesc() { + // Test DESC order + SortOrder sortOrder = SortOrderProtoUtils.fromProto(ScriptSort.SortOrder.SORT_ORDER_DESC); + assertEquals("Sort order should be DESC", SortOrder.DESC, sortOrder); + } + + public void testFromProtoScriptSortUnspecified() { + // Test UNSPECIFIED order (should throw exception) + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> SortOrderProtoUtils.fromProto(ScriptSort.SortOrder.SORT_ORDER_UNSPECIFIED) + ); + assertTrue( + "Exception message should mention 'Must provide oneof sort combinations'", + exception.getMessage().contains("Must provide oneof sort combinations") + ); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/suggest/SuggestBuilderProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/suggest/SuggestBuilderProtoUtilsTests.java new file mode 100644 index 0000000000000..5ff74dc2772c1 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/suggest/SuggestBuilderProtoUtilsTests.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.search.suggest; + +import org.opensearch.protobufs.Suggester; +import org.opensearch.search.suggest.SuggestBuilder; +import org.opensearch.test.OpenSearchTestCase; + +public class SuggestBuilderProtoUtilsTests extends OpenSearchTestCase { + + public void testFromProtoWithEmptySuggester() { + // Create an empty Suggester proto + Suggester suggesterProto = Suggester.newBuilder().build(); + + // Call the method under test + SuggestBuilder suggestBuilder = SuggestBuilderProtoUtils.fromProto(suggesterProto); + + // Verify the result + assertNotNull("SuggestBuilder should not be null", suggestBuilder); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/suggest/TermSuggestionBuilderProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/suggest/TermSuggestionBuilderProtoUtilsTests.java new file mode 100644 index 0000000000000..d8b5d319c0458 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/suggest/TermSuggestionBuilderProtoUtilsTests.java @@ -0,0 +1,51 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.search.suggest; + +import org.opensearch.protobufs.SearchRequest; +import org.opensearch.search.suggest.term.TermSuggestionBuilder; +import org.opensearch.test.OpenSearchTestCase; + +public class TermSuggestionBuilderProtoUtilsTests extends OpenSearchTestCase { + + public void testResolveWithAlwaysMode() { + // Call the method under test with ALWAYS mode + TermSuggestionBuilder.SuggestMode result = TermSuggestionBuilderProtoUtils.resolve(SearchRequest.SuggestMode.SUGGEST_MODE_ALWAYS); + + // Verify the result + assertEquals("SuggestMode should be ALWAYS", TermSuggestionBuilder.SuggestMode.ALWAYS, result); + } + + public void testResolveWithMissingMode() { + // Call the method under test with MISSING mode + TermSuggestionBuilder.SuggestMode result = TermSuggestionBuilderProtoUtils.resolve(SearchRequest.SuggestMode.SUGGEST_MODE_MISSING); + + // Verify the result + assertEquals("SuggestMode should be MISSING", TermSuggestionBuilder.SuggestMode.MISSING, result); + } + + public void testResolveWithPopularMode() { + // Call the method under test with POPULAR mode + TermSuggestionBuilder.SuggestMode result = TermSuggestionBuilderProtoUtils.resolve(SearchRequest.SuggestMode.SUGGEST_MODE_POPULAR); + + // Verify the result + assertEquals("SuggestMode should be POPULAR", TermSuggestionBuilder.SuggestMode.POPULAR, result); + } + + public void testResolveWithInvalidMode() { + // Call the method under test with UNRECOGNIZED mode, should throw IllegalArgumentException + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> TermSuggestionBuilderProtoUtils.resolve(SearchRequest.SuggestMode.UNRECOGNIZED) + ); + + // Verify the exception message + assertTrue("Exception message should mention invalid suggest_mode", exception.getMessage().contains("Invalid suggest_mode")); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/common/FieldValueProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/common/FieldValueProtoUtilsTests.java new file mode 100644 index 0000000000000..78fd640710ad7 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/common/FieldValueProtoUtilsTests.java @@ -0,0 +1,192 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.response.common; + +import org.opensearch.protobufs.FieldValue; +import org.opensearch.protobufs.GeneralNumber; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.HashMap; +import java.util.Map; + +public class FieldValueProtoUtilsTests extends OpenSearchTestCase { + + public void testToProtoWithInteger() { + Integer intValue = 42; + FieldValue fieldValue = FieldValueProtoUtils.toProto(intValue); + + assertNotNull("FieldValue should not be null", fieldValue); + assertTrue("FieldValue should have general number", fieldValue.hasGeneralNumber()); + + GeneralNumber generalNumber = fieldValue.getGeneralNumber(); + assertTrue("GeneralNumber should have int32 value", generalNumber.hasInt32Value()); + assertEquals("Int32 value should match", 42, generalNumber.getInt32Value()); + } + + public void testToProtoWithLong() { + Long longValue = 9223372036854775807L; // Max long value + FieldValue fieldValue = FieldValueProtoUtils.toProto(longValue); + + assertNotNull("FieldValue should not be null", fieldValue); + assertTrue("FieldValue should have general number", fieldValue.hasGeneralNumber()); + + GeneralNumber generalNumber = fieldValue.getGeneralNumber(); + assertTrue("GeneralNumber should have int64 value", generalNumber.hasInt64Value()); + assertEquals("Int64 value should match", 9223372036854775807L, generalNumber.getInt64Value()); + } + + public void testToProtoWithDouble() { + Double doubleValue = 3.14159; + FieldValue fieldValue = FieldValueProtoUtils.toProto(doubleValue); + + assertNotNull("FieldValue should not be null", fieldValue); + assertTrue("FieldValue should have general number", fieldValue.hasGeneralNumber()); + + GeneralNumber generalNumber = fieldValue.getGeneralNumber(); + assertTrue("GeneralNumber should have double value", generalNumber.hasDoubleValue()); + assertEquals("Double value should match", 3.14159, generalNumber.getDoubleValue(), 0.0); + } + + public void testToProtoWithFloat() { + Float floatValue = 2.71828f; + FieldValue fieldValue = FieldValueProtoUtils.toProto(floatValue); + + assertNotNull("FieldValue should not be null", fieldValue); + assertTrue("FieldValue should have general number", fieldValue.hasGeneralNumber()); + + GeneralNumber generalNumber = fieldValue.getGeneralNumber(); + assertTrue("GeneralNumber should have float value", generalNumber.hasFloatValue()); + assertEquals("Float value should match", 2.71828f, generalNumber.getFloatValue(), 0.0f); + } + + public void testToProtoWithString() { + String stringValue = "test string"; + FieldValue fieldValue = FieldValueProtoUtils.toProto(stringValue); + + assertNotNull("FieldValue should not be null", fieldValue); + assertTrue("FieldValue should have string value", fieldValue.hasStringValue()); + assertEquals("String value should match", "test string", fieldValue.getStringValue()); + } + + public void testToProtoWithBoolean() { + // Test with true + Boolean trueValue = true; + FieldValue trueFieldValue = FieldValueProtoUtils.toProto(trueValue); + + assertNotNull("FieldValue should not be null", trueFieldValue); + assertTrue("FieldValue should have bool value", trueFieldValue.hasBoolValue()); + assertTrue("Bool value should be true", trueFieldValue.getBoolValue()); + + // Test with false + Boolean falseValue = false; + FieldValue falseFieldValue = FieldValueProtoUtils.toProto(falseValue); + + assertNotNull("FieldValue should not be null", falseFieldValue); + assertTrue("FieldValue should have bool value", falseFieldValue.hasBoolValue()); + assertFalse("Bool value should be false", falseFieldValue.getBoolValue()); + } + + public void testToProtoWithEnum() { + // Use a test enum + TestEnum enumValue = TestEnum.TEST_VALUE; + FieldValue fieldValue = FieldValueProtoUtils.toProto(enumValue); + + assertNotNull("FieldValue should not be null", fieldValue); + assertTrue("FieldValue should have string value", fieldValue.hasStringValue()); + assertEquals("String value should match enum toString", "TEST_VALUE", fieldValue.getStringValue()); + } + + public void testToProtoWithMap() { + Map map = new HashMap<>(); + map.put("string", "value"); + map.put("integer", 42); + map.put("boolean", true); + + FieldValue fieldValue = FieldValueProtoUtils.toProto(map); + + assertNotNull("FieldValue should not be null", fieldValue); + assertTrue("FieldValue should have object map", fieldValue.hasObjectMap()); + + org.opensearch.protobufs.ObjectMap objectMap = fieldValue.getObjectMap(); + assertEquals("ObjectMap should have 3 fields", 3, objectMap.getFieldsCount()); + + // Check string field + assertTrue("String field should exist", objectMap.containsFields("string")); + assertTrue("String field should have string value", objectMap.getFieldsOrThrow("string").hasString()); + assertEquals("String field should match", "value", objectMap.getFieldsOrThrow("string").getString()); + + // Check integer field + assertTrue("Integer field should exist", objectMap.containsFields("integer")); + assertTrue("Integer field should have int32 value", objectMap.getFieldsOrThrow("integer").hasInt32()); + assertEquals("Integer field should match", 42, objectMap.getFieldsOrThrow("integer").getInt32()); + + // Check boolean field + assertTrue("Boolean field should exist", objectMap.containsFields("boolean")); + assertTrue("Boolean field should have bool value", objectMap.getFieldsOrThrow("boolean").hasBool()); + assertTrue("Boolean field should be true", objectMap.getFieldsOrThrow("boolean").getBool()); + } + + public void testToProtoWithNestedMap() { + Map nestedMap = new HashMap<>(); + nestedMap.put("nested_string", "nested value"); + nestedMap.put("nested_integer", 99); + + Map outerMap = new HashMap<>(); + outerMap.put("outer_string", "outer value"); + outerMap.put("nested_map", nestedMap); + + FieldValue fieldValue = FieldValueProtoUtils.toProto(outerMap); + + assertNotNull("FieldValue should not be null", fieldValue); + assertTrue("FieldValue should have object map", fieldValue.hasObjectMap()); + + org.opensearch.protobufs.ObjectMap outerObjectMap = fieldValue.getObjectMap(); + assertEquals("Outer object map should have 2 fields", 2, outerObjectMap.getFieldsCount()); + + // Check outer string field + assertTrue("Outer string field should exist", outerObjectMap.containsFields("outer_string")); + assertTrue("Outer string field should have string value", outerObjectMap.getFieldsOrThrow("outer_string").hasString()); + assertEquals("Outer string field should match", "outer value", outerObjectMap.getFieldsOrThrow("outer_string").getString()); + + // Check nested map field + assertTrue("Nested map field should exist", outerObjectMap.containsFields("nested_map")); + assertTrue("Nested map field should have object map", outerObjectMap.getFieldsOrThrow("nested_map").hasObjectMap()); + + org.opensearch.protobufs.ObjectMap nestedObjectMap = outerObjectMap.getFieldsOrThrow("nested_map").getObjectMap(); + assertEquals("Nested object map should have 2 fields", 2, nestedObjectMap.getFieldsCount()); + + // Check nested string field + assertTrue("Nested string field should exist", nestedObjectMap.containsFields("nested_string")); + assertTrue("Nested string field should have string value", nestedObjectMap.getFieldsOrThrow("nested_string").hasString()); + assertEquals("Nested string field should match", "nested value", nestedObjectMap.getFieldsOrThrow("nested_string").getString()); + + // Check nested integer field + assertTrue("Nested integer field should exist", nestedObjectMap.containsFields("nested_integer")); + assertTrue("Nested integer field should have int32 value", nestedObjectMap.getFieldsOrThrow("nested_integer").hasInt32()); + assertEquals("Nested integer field should match", 99, nestedObjectMap.getFieldsOrThrow("nested_integer").getInt32()); + } + + public void testToProtoWithUnsupportedType() { + // Create an object of an unsupported type + Object unsupportedObject = new StringBuilder("unsupported"); + + // Call the method under test, should throw IllegalArgumentException + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> FieldValueProtoUtils.toProto(unsupportedObject) + ); + + assertTrue("Exception message should mention cannot convert", exception.getMessage().contains("Cannot convert")); + } + + // Test enum for testing enum conversion + private enum TestEnum { + TEST_VALUE + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/common/OpenSearchExceptionProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/OpenSearchExceptionProtoUtilsTests.java similarity index 99% rename from plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/common/OpenSearchExceptionProtoUtilsTests.java rename to plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/OpenSearchExceptionProtoUtilsTests.java index 135fd0d094e1b..a22a930831c07 100644 --- a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/common/OpenSearchExceptionProtoUtilsTests.java +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/OpenSearchExceptionProtoUtilsTests.java @@ -16,6 +16,7 @@ import org.opensearch.common.breaker.ResponseLimitSettings; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.breaker.CircuitBreakingException; +import org.opensearch.plugin.transport.grpc.proto.response.exceptions.opensearchexception.OpenSearchExceptionProtoUtils; import org.opensearch.protobufs.ErrorCause; import org.opensearch.protobufs.ObjectMap; import org.opensearch.protobufs.StringOrStringArray; @@ -151,7 +152,7 @@ public void testInnerToProtoWithBasicException() throws IOException { // Create a basic exception RuntimeException exception = new RuntimeException("Test exception"); - // Convert to Protocol Buffer using the protected method via reflection + // Convert to Protocol Buffer ErrorCause errorCause = OpenSearchExceptionProtoUtils.generateThrowableProto(exception); // Verify the conversion diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/shardoperationfailedexception/DefaultShardOperationFailedExceptionProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/shardoperationfailedexception/DefaultShardOperationFailedExceptionProtoUtilsTests.java new file mode 100644 index 0000000000000..50f3980d8bef0 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/shardoperationfailedexception/DefaultShardOperationFailedExceptionProtoUtilsTests.java @@ -0,0 +1,125 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.response.exceptions.shardoperationfailedexception; + +import org.opensearch.action.admin.indices.close.CloseIndexResponse; +import org.opensearch.action.admin.indices.readonly.AddIndexBlockResponse; +import org.opensearch.action.admin.indices.shards.IndicesShardStoresResponse; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.protobufs.ShardFailure; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class DefaultShardOperationFailedExceptionProtoUtilsTests extends OpenSearchTestCase { + + public void testToProtoWithDefaultShardOperationFailedException() throws IOException { + // Create a real DefaultShardOperationFailedException + DefaultShardOperationFailedException exception = new DefaultShardOperationFailedException( + "test-index", + 1, + new RuntimeException("Test cause") + ); + + // Call the method under test + ShardFailure shardFailure = DefaultShardOperationFailedExceptionProtoUtils.toProto(exception); + + // Verify the result + assertNotNull("ShardFailure should not be null", shardFailure); + assertEquals("Shard ID should match", 1, shardFailure.getShard()); + assertEquals("Index should match", "test-index", shardFailure.getIndex()); + assertEquals("Status should match", RestStatus.INTERNAL_SERVER_ERROR.name(), shardFailure.getStatus()); + assertNotNull("Reason should not be null", shardFailure.getReason()); + } + + public void testToProtoWithAddIndexBlockResponseFailure() throws IOException { + // Create a real AddIndexBlockResponse.AddBlockShardResult.Failure + AddIndexBlockResponse.AddBlockShardResult.Failure exception = new AddIndexBlockResponse.AddBlockShardResult.Failure( + "test-index-2", + 2, + new RuntimeException("Test cause 2"), + "node-1" + ); + + // Call the method under test + ShardFailure shardFailure = DefaultShardOperationFailedExceptionProtoUtils.toProto(exception); + + // Verify the result + assertNotNull("ShardFailure should not be null", shardFailure); + assertEquals("Shard ID should match", 2, shardFailure.getShard()); + assertEquals("Index should match", "test-index-2", shardFailure.getIndex()); + assertEquals("Status should match", RestStatus.INTERNAL_SERVER_ERROR.name(), shardFailure.getStatus()); + assertEquals("Node should match", "node-1", shardFailure.getNode()); + assertNotNull("Reason should not be null", shardFailure.getReason()); + } + + public void testToProtoWithIndicesShardStoresResponseFailure() throws IOException { + // Create a real IndicesShardStoresResponse.Failure + IndicesShardStoresResponse.Failure exception = new IndicesShardStoresResponse.Failure( + "node-2", + "test-index-3", + 3, + new RuntimeException("Test cause 3") + ); + + // Call the method under test + ShardFailure shardFailure = DefaultShardOperationFailedExceptionProtoUtils.toProto(exception); + + // Verify the result + assertNotNull("ShardFailure should not be null", shardFailure); + assertEquals("Shard ID should match", 3, shardFailure.getShard()); + assertEquals("Index should match", "test-index-3", shardFailure.getIndex()); + assertEquals("Status should match", RestStatus.INTERNAL_SERVER_ERROR.name(), shardFailure.getStatus()); + assertEquals("Node should match", "node-2", shardFailure.getNode()); + assertNotNull("Reason should not be null", shardFailure.getReason()); + } + + public void testToProtoWithCloseIndexResponseFailure() throws IOException { + // Create a real CloseIndexResponse.ShardResult.Failure + CloseIndexResponse.ShardResult.Failure exception = new CloseIndexResponse.ShardResult.Failure( + "test-index-4", + 4, + new RuntimeException("Test cause 4"), + "node-3" + ); + + // Call the method under test + ShardFailure shardFailure = DefaultShardOperationFailedExceptionProtoUtils.toProto(exception); + + // Verify the result + assertNotNull("ShardFailure should not be null", shardFailure); + assertEquals("Shard ID should match", 4, shardFailure.getShard()); + assertEquals("Index should match", "test-index-4", shardFailure.getIndex()); + assertEquals("Status should match", RestStatus.INTERNAL_SERVER_ERROR.name(), shardFailure.getStatus()); + assertEquals("Node should match", "node-3", shardFailure.getNode()); + assertNotNull("Reason should not be null", shardFailure.getReason()); + } + + public void testToProtoWithNullNodeId() throws IOException { + // Create a real AddIndexBlockResponse.AddBlockShardResult.Failure with null nodeId + AddIndexBlockResponse.AddBlockShardResult.Failure exception = new AddIndexBlockResponse.AddBlockShardResult.Failure( + "test-index-5", + 5, + new RuntimeException("Test cause 5"), + null // null nodeId + ); + + // Call the method under test + ShardFailure shardFailure = DefaultShardOperationFailedExceptionProtoUtils.toProto(exception); + + // Verify the result + assertNotNull("ShardFailure should not be null", shardFailure); + assertEquals("Shard ID should match", 5, shardFailure.getShard()); + assertEquals("Index should match", "test-index-5", shardFailure.getIndex()); + assertEquals("Status should match", RestStatus.INTERNAL_SERVER_ERROR.name(), shardFailure.getStatus()); + assertFalse("Node should not be set", shardFailure.hasNode()); + assertNotNull("Reason should not be null", shardFailure.getReason()); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/shardoperationfailedexception/ShardOperationFailedExceptionProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/shardoperationfailedexception/ShardOperationFailedExceptionProtoUtilsTests.java new file mode 100644 index 0000000000000..1b218bde073f7 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/exceptions/shardoperationfailedexception/ShardOperationFailedExceptionProtoUtilsTests.java @@ -0,0 +1,122 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.response.exceptions.shardoperationfailedexception; + +import org.opensearch.action.search.ShardSearchFailure; +import org.opensearch.action.support.replication.ReplicationResponse; +import org.opensearch.core.action.ShardOperationFailedException; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.protobufs.ShardFailure; +import org.opensearch.search.SearchShardTarget; +import org.opensearch.snapshots.SnapshotShardFailure; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +import static org.mockito.Mockito.mock; + +public class ShardOperationFailedExceptionProtoUtilsTests extends OpenSearchTestCase { + + public void testToProtoWithShardSearchFailure() throws IOException { + + // Create a SearchShardTarget with a nodeId + ShardId shardId = new ShardId("test_index", "_na_", 1); + SearchShardTarget searchShardTarget = new SearchShardTarget("test_node", shardId, null, null); + + // Create a ShardSearchFailure + ShardSearchFailure shardSearchFailure = new ShardSearchFailure(new Exception("fake exception"), searchShardTarget); + + // Call the method under test + ShardFailure protoFailure = ShardOperationFailedExceptionProtoUtils.toProto(shardSearchFailure); + + // Verify the result + assertNotNull("Proto failure should not be null", protoFailure); + assertEquals("Index should match", "test_index", protoFailure.getIndex()); + assertEquals("Shard ID should match", 1, protoFailure.getShard()); + assertEquals("Node ID should match", "test_node", protoFailure.getNode()); + } + + public void testToProtoWithSnapshotShardFailure() throws IOException { + + // Create a SearchShardTarget with a nodeId + ShardId shardId = new ShardId("test_index", "_na_", 2); + + // Create a SnapshotShardFailure + SnapshotShardFailure shardSearchFailure = new SnapshotShardFailure("test_node", shardId, "Snapshot failed"); + + // Call the method under test + ShardFailure protoFailure = ShardOperationFailedExceptionProtoUtils.toProto(shardSearchFailure); + + // Verify the result + assertNotNull("Proto failure should not be null", protoFailure); + assertEquals("Index should match", "test_index", protoFailure.getIndex()); + assertEquals("Shard ID should match", 2, protoFailure.getShard()); + assertEquals("Node ID should match", "test_node", protoFailure.getNode()); + assertEquals("Status should match", "INTERNAL_SERVER_ERROR", protoFailure.getStatus()); + } + + public void testToProtoWithDefaultShardOperationFailedException() throws IOException { + // Create a mock DefaultShardOperationFailedException + DefaultShardOperationFailedException defaultShardOperationFailedException = new DefaultShardOperationFailedException( + "test_index", + 3, + new RuntimeException("Test exception") + ); + + // Call the method under test + ShardFailure protoFailure = ShardOperationFailedExceptionProtoUtils.toProto(defaultShardOperationFailedException); + + // Verify the result + assertNotNull("Proto failure should not be null", protoFailure); + assertEquals("Index should match", "test_index", protoFailure.getIndex()); + assertEquals("Shard ID should match", 3, protoFailure.getShard()); + assertEquals("Status should match", "INTERNAL_SERVER_ERROR", protoFailure.getStatus()); + } + + public void testToProtoWithReplicationResponseShardInfoFailure() throws IOException { + // Create a mock ReplicationResponse.ShardInfo.Failure + ShardId shardId = new ShardId("test_index", "_na_", 4); + ReplicationResponse.ShardInfo.Failure replicationResponseFailure = new ReplicationResponse.ShardInfo.Failure( + shardId, + "test_node", + new RuntimeException("Test exception"), + RestStatus.INTERNAL_SERVER_ERROR, + true + ); + + // Call the method under test + ShardFailure protoFailure = ShardOperationFailedExceptionProtoUtils.toProto(replicationResponseFailure); + + // Verify the result + assertNotNull("Proto failure should not be null", protoFailure); + assertEquals("Index should match", "test_index", protoFailure.getIndex()); + assertEquals("Shard ID should match", 4, protoFailure.getShard()); + assertTrue("Primary should be true", protoFailure.getPrimary()); + assertEquals("Node ID should match", "test_node", protoFailure.getNode()); + assertEquals("Status should match", "INTERNAL_SERVER_ERROR", protoFailure.getStatus()); + } + + public void testToProtoWithUnsupportedShardOperationFailedException() { + // Create a mock ShardOperationFailedException that is not one of the supported types + ShardOperationFailedException mockFailure = mock(ShardOperationFailedException.class); + + // Call the method under test, should throw UnsupportedOperationException + UnsupportedOperationException exception = expectThrows( + UnsupportedOperationException.class, + () -> ShardOperationFailedExceptionProtoUtils.toProto(mockFailure) + ); + + assertTrue( + "Exception message should mention unsupported ShardOperationFailedException", + exception.getMessage().contains("Unsupported ShardOperationFailedException") + ); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/search/HighlightFieldProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/search/HighlightFieldProtoUtilsTests.java new file mode 100644 index 0000000000000..f45b18075d1e5 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/search/HighlightFieldProtoUtilsTests.java @@ -0,0 +1,61 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.response.search; + +import org.opensearch.core.common.text.Text; +import org.opensearch.protobufs.StringArray; +import org.opensearch.test.OpenSearchTestCase; + +public class HighlightFieldProtoUtilsTests extends OpenSearchTestCase { + + public void testToProtoWithEmptyFragments() { + Text[] emptyFragments = new Text[0]; + StringArray stringArray = HighlightFieldProtoUtils.toProto(emptyFragments); + + assertNotNull("StringArray should not be null", stringArray); + assertEquals("StringArray should be empty", 0, stringArray.getStringArrayCount()); + } + + public void testToProtoWithSingleFragment() { + Text[] singleFragment = new Text[] { new Text("highlight fragment") }; + StringArray stringArray = HighlightFieldProtoUtils.toProto(singleFragment); + + assertNotNull("StringArray should not be null", stringArray); + assertEquals("StringArray should have 1 element", 1, stringArray.getStringArrayCount()); + assertEquals("StringArray element should match", "highlight fragment", stringArray.getStringArray(0)); + } + + public void testToProtoWithMultipleFragments() { + Text[] multipleFragments = new Text[] { + new Text("first highlight fragment"), + new Text("second highlight fragment"), + new Text("third highlight fragment") }; + StringArray stringArray = HighlightFieldProtoUtils.toProto(multipleFragments); + + assertNotNull("StringArray should not be null", stringArray); + assertEquals("StringArray should have 3 elements", 3, stringArray.getStringArrayCount()); + assertEquals("First element should match", "first highlight fragment", stringArray.getStringArray(0)); + assertEquals("Second element should match", "second highlight fragment", stringArray.getStringArray(1)); + assertEquals("Third element should match", "third highlight fragment", stringArray.getStringArray(2)); + } + + public void testToProtoWithSpecialCharacters() { + Text[] specialCharFragments = new Text[] { + new Text("fragment with HTML tags"), + new Text("fragment with symbols: !@#$%^&*()"), + new Text("fragment with unicode: 你好, 世界") }; + StringArray stringArray = HighlightFieldProtoUtils.toProto(specialCharFragments); + + assertNotNull("StringArray should not be null", stringArray); + assertEquals("StringArray should have 3 elements", 3, stringArray.getStringArrayCount()); + assertEquals("First element should match", "fragment with HTML tags", stringArray.getStringArray(0)); + assertEquals("Second element should match", "fragment with symbols: !@#$%^&*()", stringArray.getStringArray(1)); + assertEquals("Third element should match", "fragment with unicode: 你好, 世界", stringArray.getStringArray(2)); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/search/SearchHitNestedIdentityProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/search/SearchHitNestedIdentityProtoUtilsTests.java new file mode 100644 index 0000000000000..79452871e1958 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/search/SearchHitNestedIdentityProtoUtilsTests.java @@ -0,0 +1,96 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.response.search; + +import org.opensearch.protobufs.NestedIdentity; +import org.opensearch.search.SearchHit; +import org.opensearch.test.OpenSearchTestCase; + +public class SearchHitNestedIdentityProtoUtilsTests extends OpenSearchTestCase { + + public void testToProtoWithBasicNestedIdentity() throws Exception { + // Create a SearchHit.NestedIdentity with basic fields + SearchHit.NestedIdentity nestedIdentity = new SearchHit.NestedIdentity("parent_field", 5, null); + + // Call the method under test + NestedIdentity protoNestedIdentity = SearchHitProtoUtils.NestedIdentityProtoUtils.toProto(nestedIdentity); + + // Verify the result + assertNotNull("NestedIdentity should not be null", protoNestedIdentity); + assertEquals("Field should match", "parent_field", protoNestedIdentity.getField()); + assertEquals("Offset should match", 5, protoNestedIdentity.getOffset()); + assertFalse("Nested field should not be set", protoNestedIdentity.hasNested()); + } + + public void testToProtoWithNestedNestedIdentity() throws Exception { + // Create a nested SearchHit.NestedIdentity + SearchHit.NestedIdentity childNestedIdentity = new SearchHit.NestedIdentity("child_field", 2, null); + SearchHit.NestedIdentity parentNestedIdentity = new SearchHit.NestedIdentity("parent_field", 5, childNestedIdentity); + + // Call the method under test + NestedIdentity protoNestedIdentity = SearchHitProtoUtils.NestedIdentityProtoUtils.toProto(parentNestedIdentity); + + // Verify the result + assertNotNull("NestedIdentity should not be null", protoNestedIdentity); + assertEquals("Field should match", "parent_field", protoNestedIdentity.getField()); + assertEquals("Offset should match", 5, protoNestedIdentity.getOffset()); + assertTrue("Nested field should be set", protoNestedIdentity.hasNested()); + + // Verify the nested identity + NestedIdentity nestedProtoNestedIdentity = protoNestedIdentity.getNested(); + assertNotNull("Nested NestedIdentity should not be null", nestedProtoNestedIdentity); + assertEquals("Nested field should match", "child_field", nestedProtoNestedIdentity.getField()); + assertEquals("Nested offset should match", 2, nestedProtoNestedIdentity.getOffset()); + assertFalse("Nested nested field should not be set", nestedProtoNestedIdentity.hasNested()); + } + + public void testToProtoWithDeeplyNestedNestedIdentity() throws Exception { + // Create a deeply nested SearchHit.NestedIdentity + SearchHit.NestedIdentity grandchildNestedIdentity = new SearchHit.NestedIdentity("grandchild_field", 1, null); + SearchHit.NestedIdentity childNestedIdentity = new SearchHit.NestedIdentity("child_field", 2, grandchildNestedIdentity); + SearchHit.NestedIdentity parentNestedIdentity = new SearchHit.NestedIdentity("parent_field", 5, childNestedIdentity); + + // Call the method under test + NestedIdentity protoNestedIdentity = SearchHitProtoUtils.NestedIdentityProtoUtils.toProto(parentNestedIdentity); + + // Verify the result + assertNotNull("NestedIdentity should not be null", protoNestedIdentity); + assertEquals("Field should match", "parent_field", protoNestedIdentity.getField()); + assertEquals("Offset should match", 5, protoNestedIdentity.getOffset()); + assertTrue("Nested field should be set", protoNestedIdentity.hasNested()); + + // Verify the child nested identity + NestedIdentity childProtoNestedIdentity = protoNestedIdentity.getNested(); + assertNotNull("Child NestedIdentity should not be null", childProtoNestedIdentity); + assertEquals("Child field should match", "child_field", childProtoNestedIdentity.getField()); + assertEquals("Child offset should match", 2, childProtoNestedIdentity.getOffset()); + assertTrue("Child nested field should be set", childProtoNestedIdentity.hasNested()); + + // Verify the grandchild nested identity + NestedIdentity grandchildProtoNestedIdentity = childProtoNestedIdentity.getNested(); + assertNotNull("Grandchild NestedIdentity should not be null", grandchildProtoNestedIdentity); + assertEquals("Grandchild field should match", "grandchild_field", grandchildProtoNestedIdentity.getField()); + assertEquals("Grandchild offset should match", 1, grandchildProtoNestedIdentity.getOffset()); + assertFalse("Grandchild nested field should not be set", grandchildProtoNestedIdentity.hasNested()); + } + + public void testToProtoWithNegativeOffset() throws Exception { + // Create a SearchHit.NestedIdentity with negative offset + SearchHit.NestedIdentity nestedIdentity = new SearchHit.NestedIdentity("field", -1, null); + + // Call the method under test + NestedIdentity protoNestedIdentity = SearchHitProtoUtils.NestedIdentityProtoUtils.toProto(nestedIdentity); + + // Verify the result + assertNotNull("NestedIdentity should not be null", protoNestedIdentity); + assertEquals("Field should match", "field", protoNestedIdentity.getField()); + assertEquals("Offset should not be set", 0, protoNestedIdentity.getOffset()); + assertFalse("Nested field should not be set", protoNestedIdentity.hasNested()); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/search/SearchHitProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/search/SearchHitProtoUtilsTests.java new file mode 100644 index 0000000000000..117cd12cdc675 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/search/SearchHitProtoUtilsTests.java @@ -0,0 +1,253 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.response.search; + +import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.TotalHits; +import org.opensearch.common.document.DocumentField; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.text.Text; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.protobufs.Hit; +import org.opensearch.search.SearchHit; +import org.opensearch.search.SearchHits; +import org.opensearch.search.SearchShardTarget; +import org.opensearch.search.fetch.subphase.highlight.HighlightField; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.apache.lucene.search.TotalHits.Relation.EQUAL_TO; + +public class SearchHitProtoUtilsTests extends OpenSearchTestCase { + + public void testToProtoWithBasicFields() throws IOException { + // Create a SearchHit with basic fields + SearchHit searchHit = new SearchHit(1, "test_id", null, null); + searchHit.score(2.0f); + searchHit.shard(new SearchShardTarget("test_node", new ShardId("test_index", "_na_", 0), null, null)); + searchHit.version(3); + searchHit.setSeqNo(4); + searchHit.setPrimaryTerm(5); + + // Call the method under test + Hit hit = SearchHitProtoUtils.toProto(searchHit); + + // Verify the result + assertNotNull("Hit should not be null", hit); + assertEquals("Index should match", "test_index", hit.getIndex()); + assertEquals("ID should match", "test_id", hit.getId()); + assertEquals("Version should match", 3, hit.getVersion()); + assertEquals("SeqNo should match", 4, hit.getSeqNo()); + assertEquals("PrimaryTerm should match", 5, hit.getPrimaryTerm()); + assertEquals("Score should match", 2.0f, hit.getScore().getFloatValue(), 0.0f); + } + + public void testToProtoWithNullScore() throws IOException { + // Create a SearchHit with NaN score + SearchHit searchHit = new SearchHit(1); + searchHit.score(Float.NaN); + + // Call the method under test + Hit hit = SearchHitProtoUtils.toProto(searchHit); + + // Verify the result + assertNotNull("Hit should not be null", hit); + assertTrue("Score should be null", hit.getScore().hasNullValue()); + } + + public void testToProtoWithSource() throws IOException { + // Create a SearchHit with source + SearchHit searchHit = new SearchHit(1); + byte[] sourceBytes = "{\"field\":\"value\"}".getBytes(StandardCharsets.UTF_8); + searchHit.sourceRef(new BytesArray(sourceBytes)); + + // Call the method under test + Hit hit = SearchHitProtoUtils.toProto(searchHit); + + // Verify the result + assertNotNull("Hit should not be null", hit); + assertTrue("Source should not be empty", hit.getSource().size() > 0); + assertArrayEquals("Source bytes should match", sourceBytes, hit.getSource().toByteArray()); + } + + public void testToProtoWithClusterAlias() throws IOException { + // Create a SearchHit with cluster alias + SearchHit searchHit = new SearchHit(1); + searchHit.shard(new SearchShardTarget("test_node", new ShardId("test_index", "_na_", 0), "test_cluster", null)); + + // Call the method under test + Hit hit = SearchHitProtoUtils.toProto(searchHit); + + // Verify the result + assertNotNull("Hit should not be null", hit); + assertEquals("Index with cluster alias should match", "test_cluster:test_index", hit.getIndex()); + } + + public void testToProtoWithUnassignedSeqNo() throws IOException { + // Create a SearchHit with unassigned seqNo + SearchHit searchHit = new SearchHit(1); + searchHit.setSeqNo(SequenceNumbers.UNASSIGNED_SEQ_NO); + + // Call the method under test + Hit hit = SearchHitProtoUtils.toProto(searchHit); + + // Verify the result + assertNotNull("Hit should not be null", hit); + assertFalse("SeqNo should not be set", hit.hasSeqNo()); + assertFalse("PrimaryTerm should not be set", hit.hasPrimaryTerm()); + } + + public void testToProtoWithNullFields() throws IOException { + // Create a SearchHit with null fields + SearchHit searchHit = new SearchHit(1); + // Don't set any fields + + // Call the method under test + Hit hit = SearchHitProtoUtils.toProto(searchHit); + + // Verify the result + assertNotNull("Hit should not be null", hit); + assertEquals("Index should not be set", "", hit.getIndex()); + assertEquals("ID should not be set", "", hit.getId()); + assertFalse("Version should not be set", hit.hasVersion()); + assertFalse("SeqNo should not be set", hit.hasSeqNo()); + assertFalse("PrimaryTerm should not be set", hit.hasPrimaryTerm()); + assertFalse("Source should not be set", hit.hasSource()); + } + + public void testToProtoWithDocumentFields() throws IOException { + // Create a SearchHit with document fields + SearchHit searchHit = new SearchHit(1); + + // Add document fields + List fieldValues = new ArrayList<>(); + fieldValues.add("value1"); + fieldValues.add("value2"); + searchHit.setDocumentField("field1", new DocumentField("field1", fieldValues)); + + // Call the method under test + Hit hit = SearchHitProtoUtils.toProto(searchHit); + + // Verify the result + assertNotNull("Hit should not be null", hit); + assertTrue("Fields should be set", hit.hasFields()); + assertTrue("Field1 should exist", hit.getFields().containsFields("field1")); + assertEquals("Field1 should have 2 values", 2, hit.getFields().getFieldsOrThrow("field1").getListValue().getValueCount()); + assertEquals( + "First value should match", + "value1", + hit.getFields().getFieldsOrThrow("field1").getListValue().getValue(0).getString() + ); + assertEquals( + "Second value should match", + "value2", + hit.getFields().getFieldsOrThrow("field1").getListValue().getValue(1).getString() + ); + } + + public void testToProtoWithHighlightFields() throws IOException { + // Create a SearchHit with highlight fields + SearchHit searchHit = new SearchHit(1); + + // Add highlight fields + Map highlightFields = new HashMap<>(); + Text[] fragments = new Text[] { new Text("highlighted text") }; + highlightFields.put("field1", new HighlightField("field1", fragments)); + searchHit.highlightFields(highlightFields); + + // Call the method under test + Hit hit = SearchHitProtoUtils.toProto(searchHit); + + // Verify the result + assertNotNull("Hit should not be null", hit); + assertEquals("Should have 1 highlight field", 1, hit.getHighlightCount()); + assertTrue("Highlight field1 should exist", hit.containsHighlight("field1")); + assertEquals("Highlight field1 should have 1 fragment", 1, hit.getHighlightOrThrow("field1").getStringArrayCount()); + assertEquals("Highlight fragment should match", "highlighted text", hit.getHighlightOrThrow("field1").getStringArray(0)); + } + + public void testToProtoWithMatchedQueries() throws IOException { + // Create a SearchHit with matched queries + SearchHit searchHit = new SearchHit(1); + + // Add matched queries + searchHit.matchedQueries(new String[] { "query1", "query2" }); + + // Call the method under test + Hit hit = SearchHitProtoUtils.toProto(searchHit); + + // Verify the result + assertNotNull("Hit should not be null", hit); + assertEquals("Should have 2 matched queries", 2, hit.getMatchedQueriesCount()); + assertEquals("First matched query should match", "query1", hit.getMatchedQueries(0)); + assertEquals("Second matched query should match", "query2", hit.getMatchedQueries(1)); + } + + public void testToProtoWithExplanation() throws IOException { + // Create a SearchHit with explanation + SearchHit searchHit = new SearchHit(1); + searchHit.shard(new SearchShardTarget("test_node", new ShardId("test_index", "_na_", 0), null, null)); + + // Add explanation + Explanation explanation = Explanation.match(1.0f, "explanation"); + searchHit.explanation(explanation); + + // Call the method under test + Hit hit = SearchHitProtoUtils.toProto(searchHit); + + // Verify the result + assertNotNull("Hit should not be null", hit); + assertTrue("Explanation should be set", hit.hasExplanation()); + assertEquals("Explanation value should match", 1.0, hit.getExplanation().getValue(), 0.0); + assertEquals("Explanation description should match", "explanation", hit.getExplanation().getDescription()); + } + + public void testToProtoWithInnerHits() throws IOException { + // Create a SearchHit with inner hits + SearchHit searchHit = new SearchHit(1); + + // Add inner hits + Map innerHits = new HashMap<>(); + SearchHit[] innerHitsArray = new SearchHit[] { new SearchHit(2, "inner_id", null, null) }; + innerHits.put("inner_hit", new SearchHits(innerHitsArray, new TotalHits(1, EQUAL_TO), 1.0f)); + searchHit.setInnerHits(innerHits); + + // Call the method under test + Hit hit = SearchHitProtoUtils.toProto(searchHit); + + // Verify the result + assertNotNull("Hit should not be null", hit); + assertEquals("Should have 1 inner hit", 1, hit.getInnerHitsCount()); + assertTrue("Inner hit should exist", hit.containsInnerHits("inner_hit")); + assertEquals("Inner hit should have 1 hit", 1, hit.getInnerHitsOrThrow("inner_hit").getHits().getHitsCount()); + assertEquals("Inner hit ID should match", "inner_id", hit.getInnerHitsOrThrow("inner_hit").getHits().getHits(0).getId()); + } + + public void testToProtoWithNestedIdentity() throws Exception { + // Create a SearchHit with nested identity + SearchHit.NestedIdentity nestedIdentity = new SearchHit.NestedIdentity("parent_field", 5, null); + SearchHit searchHit = new SearchHit(1, "1", nestedIdentity, null, null); + + // Call the method under test + Hit hit = SearchHitProtoUtils.toProto(searchHit); + + // Verify the result + assertNotNull("Hit should not be null", hit); + assertTrue("Nested identity should be set", hit.hasNested()); + assertEquals("Nested field should match", "parent_field", hit.getNested().getField()); + assertEquals("Nested offset should match", 5, hit.getNested().getOffset()); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/search/SearchHitsProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/search/SearchHitsProtoUtilsTests.java new file mode 100644 index 0000000000000..97e2e0e4768f0 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/search/SearchHitsProtoUtilsTests.java @@ -0,0 +1,138 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.response.search; + +import org.apache.lucene.search.TotalHits; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.protobufs.HitsMetadata; +import org.opensearch.search.SearchHit; +import org.opensearch.search.SearchHits; +import org.opensearch.search.SearchShardTarget; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class SearchHitsProtoUtilsTests extends OpenSearchTestCase { + + public void testToProtoWithBasicFields() throws IOException { + // Create SearchHits with basic fields + SearchHit[] hits = new SearchHit[2]; + hits[0] = new SearchHit(1, "test_id_1", null, null); + hits[0].score(2.0f); + ShardId shardId = new ShardId("test_index_1", "_na_", 1); + SearchShardTarget searchShardTarget = new SearchShardTarget("test_node", shardId, null, null); + hits[0].shard(searchShardTarget); + + hits[1] = new SearchHit(2, "test_id_2", null, null); + hits[1].score(3.0f); + ShardId shardId2 = new ShardId("test_index_2", "_na_", 1); + SearchShardTarget searchShardTarget2 = new SearchShardTarget("test_node", shardId2, null, null); + hits[1].shard(searchShardTarget2); + + TotalHits totalHits = new TotalHits(10, TotalHits.Relation.EQUAL_TO); + SearchHits searchHits = new SearchHits(hits, totalHits, 3.0f); + + // Call the method under test + HitsMetadata hitsMetadata = SearchHitsProtoUtils.toProto(searchHits); + + // Verify the result + assertNotNull("HitsMetadata should not be null", hitsMetadata); + assertEquals("Total hits value should match", 10, hitsMetadata.getTotal().getTotalHits().getValue()); + assertEquals( + "Total hits relation should be EQUAL_TO", + org.opensearch.protobufs.TotalHits.TotalHitsRelation.TOTAL_HITS_RELATION_EQ, + hitsMetadata.getTotal().getTotalHits().getRelation() + ); + assertEquals("Max score should match", 3.0f, hitsMetadata.getMaxScore().getFloatValue(), 0.0f); + assertEquals("Hits count should match", 2, hitsMetadata.getHitsCount()); + assertEquals("First hit ID should match", "test_id_1", hitsMetadata.getHits(0).getId()); + assertEquals("Second hit ID should match", "test_id_2", hitsMetadata.getHits(1).getId()); + } + + public void testToProtoWithNullTotalHits() throws IOException { + // Create SearchHits with null totalHits + SearchHit[] hits = new SearchHit[1]; + hits[0] = new SearchHit(1, "test_id", null, null); + hits[0].score(2.0f); + ShardId shardId = new ShardId("test_index", "_na_", 1); + SearchShardTarget searchShardTarget = new SearchShardTarget("test_node", shardId, null, null); + hits[0].shard(searchShardTarget); + + SearchHits searchHits = new SearchHits(hits, null, 2.0f); + + // Call the method under test + HitsMetadata hitsMetadata = SearchHitsProtoUtils.toProto(searchHits); + + // Verify the result + assertNotNull("HitsMetadata should not be null", hitsMetadata); + assertFalse("Total hits should not have value", hitsMetadata.getTotal().hasTotalHits()); + assertEquals("Max score should match", 2.0f, hitsMetadata.getMaxScore().getFloatValue(), 0.0f); + assertEquals("Hits count should match", 1, hitsMetadata.getHitsCount()); + } + + public void testToProtoWithGreaterThanRelation() throws IOException { + // Create SearchHits with GREATER_THAN_OR_EQUAL_TO relation + SearchHit[] hits = new SearchHit[1]; + hits[0] = new SearchHit(1, "test_id", null, null); + hits[0].score(2.0f); + ShardId shardId = new ShardId("test_index", "_na_", 1); + SearchShardTarget searchShardTarget = new SearchShardTarget("test_node", shardId, null, null); + hits[0].shard(searchShardTarget); + + TotalHits totalHits = new TotalHits(10, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO); + SearchHits searchHits = new SearchHits(hits, totalHits, 2.0f); + + // Call the method under test + HitsMetadata hitsMetadata = SearchHitsProtoUtils.toProto(searchHits); + + // Verify the result + assertNotNull("HitsMetadata should not be null", hitsMetadata); + assertEquals("Total hits value should match", 10, hitsMetadata.getTotal().getTotalHits().getValue()); + assertEquals( + "Total hits relation should be GREATER_THAN_OR_EQUAL_TO", + org.opensearch.protobufs.TotalHits.TotalHitsRelation.TOTAL_HITS_RELATION_GTE, + hitsMetadata.getTotal().getTotalHits().getRelation() + ); + } + + public void testToProtoWithNaNMaxScore() throws IOException { + // Create SearchHits with NaN maxScore + SearchHit[] hits = new SearchHit[1]; + hits[0] = new SearchHit(1, "test_id", null, null); + hits[0].score(2.0f); + ShardId shardId = new ShardId("test_index", "_na_", 1); + SearchShardTarget searchShardTarget = new SearchShardTarget("test_node", shardId, null, null); + hits[0].shard(searchShardTarget); + + TotalHits totalHits = new TotalHits(10, TotalHits.Relation.EQUAL_TO); + SearchHits searchHits = new SearchHits(hits, totalHits, Float.NaN); + + // Call the method under test + HitsMetadata hitsMetadata = SearchHitsProtoUtils.toProto(searchHits); + + // Verify the result + assertNotNull("HitsMetadata should not be null", hitsMetadata); + assertTrue("Max score should be null", hitsMetadata.getMaxScore().hasNullValue()); + } + + public void testToProtoWithEmptyHits() throws IOException { + // Create SearchHits with empty hits array + SearchHit[] hits = new SearchHit[0]; + TotalHits totalHits = new TotalHits(0, TotalHits.Relation.EQUAL_TO); + SearchHits searchHits = new SearchHits(hits, totalHits, 0.0f); + + // Call the method under test + HitsMetadata hitsMetadata = SearchHitsProtoUtils.toProto(searchHits); + + // Verify the result + assertNotNull("HitsMetadata should not be null", hitsMetadata); + assertEquals("Total hits value should match", 0, hitsMetadata.getTotal().getTotalHits().getValue()); + assertEquals("Hits count should be 0", 0, hitsMetadata.getHitsCount()); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/search/SearchResponseProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/search/SearchResponseProtoUtilsTests.java new file mode 100644 index 0000000000000..3b02d9600dbe6 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/search/SearchResponseProtoUtilsTests.java @@ -0,0 +1,282 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.response.search; + +import org.opensearch.action.search.SearchPhaseName; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.search.SearchResponseSections; +import org.opensearch.action.search.ShardSearchFailure; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.protobufs.PhaseTook; +import org.opensearch.search.SearchHits; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SearchResponseProtoUtilsTests extends OpenSearchTestCase { + + public void testToProtoWithBasicResponse() throws IOException { + // Create a mock SearchResponse + SearchResponse mockResponse = mock(SearchResponse.class); + when(mockResponse.getTook()).thenReturn(TimeValue.timeValueMillis(100)); + when(mockResponse.isTimedOut()).thenReturn(false); + when(mockResponse.getTotalShards()).thenReturn(5); + when(mockResponse.getSuccessfulShards()).thenReturn(5); + when(mockResponse.getSkippedShards()).thenReturn(0); + when(mockResponse.getFailedShards()).thenReturn(0); + when(mockResponse.getShardFailures()).thenReturn(new ShardSearchFailure[0]); + when(mockResponse.getClusters()).thenReturn(new SearchResponse.Clusters(0, 0, 0)); + when(mockResponse.getHits()).thenReturn(SearchHits.empty()); + when(mockResponse.getInternalResponse()).thenReturn(mock(SearchResponseSections.class)); + + // Call the method under test + org.opensearch.protobufs.SearchResponse protoResponse = SearchResponseProtoUtils.toProto(mockResponse); + + // Verify the result + assertNotNull("Proto response should not be null", protoResponse); + assertEquals("Took should match", 100, protoResponse.getResponseBody().getTook()); + assertFalse("Timed out should be false", protoResponse.getResponseBody().getTimedOut()); + assertEquals("Total shards should match", 5, protoResponse.getResponseBody().getShards().getTotal()); + assertEquals("Successful shards should match", 5, protoResponse.getResponseBody().getShards().getSuccessful()); + assertEquals("Skipped shards should match", 0, protoResponse.getResponseBody().getShards().getSkipped()); + assertEquals("Failed shards should match", 0, protoResponse.getResponseBody().getShards().getFailed()); + } + + public void testToProtoWithScrollId() throws IOException { + // Create a mock SearchResponse with scroll ID + SearchResponse mockResponse = mock(SearchResponse.class); + when(mockResponse.getTook()).thenReturn(TimeValue.timeValueMillis(100)); + when(mockResponse.isTimedOut()).thenReturn(false); + when(mockResponse.getTotalShards()).thenReturn(5); + when(mockResponse.getSuccessfulShards()).thenReturn(5); + when(mockResponse.getSkippedShards()).thenReturn(0); + when(mockResponse.getFailedShards()).thenReturn(0); + when(mockResponse.getShardFailures()).thenReturn(new ShardSearchFailure[0]); + when(mockResponse.getClusters()).thenReturn(new SearchResponse.Clusters(0, 0, 0)); + when(mockResponse.getScrollId()).thenReturn("test_scroll_id"); + when(mockResponse.getHits()).thenReturn(SearchHits.empty()); + when(mockResponse.getInternalResponse()).thenReturn(mock(SearchResponseSections.class)); + + // Call the method under test + org.opensearch.protobufs.SearchResponse protoResponse = SearchResponseProtoUtils.toProto(mockResponse); + + // Verify the result + assertNotNull("Proto response should not be null", protoResponse); + assertEquals("Scroll ID should match", "test_scroll_id", protoResponse.getResponseBody().getScrollId()); + } + + public void testToProtoWithPointInTimeId() throws IOException { + // Create a mock SearchResponse with point in time ID + SearchResponse mockResponse = mock(SearchResponse.class); + when(mockResponse.getTook()).thenReturn(TimeValue.timeValueMillis(100)); + when(mockResponse.isTimedOut()).thenReturn(false); + when(mockResponse.getTotalShards()).thenReturn(5); + when(mockResponse.getSuccessfulShards()).thenReturn(5); + when(mockResponse.getSkippedShards()).thenReturn(0); + when(mockResponse.getFailedShards()).thenReturn(0); + when(mockResponse.getShardFailures()).thenReturn(new ShardSearchFailure[0]); + when(mockResponse.getClusters()).thenReturn(new SearchResponse.Clusters(0, 0, 0)); + when(mockResponse.pointInTimeId()).thenReturn("test_pit_id"); + when(mockResponse.getHits()).thenReturn(SearchHits.empty()); + when(mockResponse.getInternalResponse()).thenReturn(mock(SearchResponseSections.class)); + + // Call the method under test + org.opensearch.protobufs.SearchResponse protoResponse = SearchResponseProtoUtils.toProto(mockResponse); + + // Verify the result + assertNotNull("Proto response should not be null", protoResponse); + assertEquals("Point in time ID should match", "test_pit_id", protoResponse.getResponseBody().getPitId()); + } + + public void testToProtoWithPhaseTook() throws IOException { + // Create a mock SearchResponse.PhaseTook + Map phaseTookMap = new HashMap<>(); + phaseTookMap.put(SearchPhaseName.QUERY.getName(), 50L); + phaseTookMap.put(SearchPhaseName.FETCH.getName(), 30L); + phaseTookMap.put(SearchPhaseName.DFS_QUERY.getName(), 20L); + phaseTookMap.put(SearchPhaseName.DFS_PRE_QUERY.getName(), 10L); + phaseTookMap.put(SearchPhaseName.EXPAND.getName(), 5L); + phaseTookMap.put(SearchPhaseName.CAN_MATCH.getName(), 5L); + + SearchResponse.PhaseTook phaseTook = new SearchResponse.PhaseTook(phaseTookMap); + + // Create a mock SearchResponse with phase took + SearchResponse mockResponse = mock(SearchResponse.class); + when(mockResponse.getTook()).thenReturn(TimeValue.timeValueMillis(100)); + when(mockResponse.isTimedOut()).thenReturn(false); + when(mockResponse.getTotalShards()).thenReturn(5); + when(mockResponse.getSuccessfulShards()).thenReturn(5); + when(mockResponse.getSkippedShards()).thenReturn(0); + when(mockResponse.getFailedShards()).thenReturn(0); + when(mockResponse.getShardFailures()).thenReturn(new ShardSearchFailure[0]); + when(mockResponse.getClusters()).thenReturn(new SearchResponse.Clusters(0, 0, 0)); + when(mockResponse.getPhaseTook()).thenReturn(phaseTook); + when(mockResponse.getHits()).thenReturn(SearchHits.empty()); + when(mockResponse.getInternalResponse()).thenReturn(mock(SearchResponseSections.class)); + + // Call the method under test + org.opensearch.protobufs.SearchResponse protoResponse = SearchResponseProtoUtils.toProto(mockResponse); + + // Verify the result + assertNotNull("Proto response should not be null", protoResponse); + assertTrue("Phase took should be present", protoResponse.getResponseBody().hasPhaseTook()); + assertEquals("Query phase took should match", 50L, protoResponse.getResponseBody().getPhaseTook().getQuery()); + assertEquals("Fetch phase took should match", 30L, protoResponse.getResponseBody().getPhaseTook().getFetch()); + assertEquals("DFS query phase took should match", 20L, protoResponse.getResponseBody().getPhaseTook().getDfsQuery()); + assertEquals("DFS pre-query phase took should match", 10L, protoResponse.getResponseBody().getPhaseTook().getDfsPreQuery()); + assertEquals("Expand phase took should match", 5L, protoResponse.getResponseBody().getPhaseTook().getExpand()); + assertEquals("Can match phase took should match", 5L, protoResponse.getResponseBody().getPhaseTook().getCanMatch()); + } + + public void testToProtoWithTerminatedEarly() throws IOException { + // Create a mock SearchResponse with terminated early + SearchResponse mockResponse = mock(SearchResponse.class); + when(mockResponse.getTook()).thenReturn(TimeValue.timeValueMillis(100)); + when(mockResponse.isTimedOut()).thenReturn(false); + when(mockResponse.getTotalShards()).thenReturn(5); + when(mockResponse.getSuccessfulShards()).thenReturn(5); + when(mockResponse.getSkippedShards()).thenReturn(0); + when(mockResponse.getFailedShards()).thenReturn(0); + when(mockResponse.getShardFailures()).thenReturn(new ShardSearchFailure[0]); + when(mockResponse.getClusters()).thenReturn(new SearchResponse.Clusters(0, 0, 0)); + when(mockResponse.isTerminatedEarly()).thenReturn(true); + when(mockResponse.getHits()).thenReturn(SearchHits.empty()); + when(mockResponse.getInternalResponse()).thenReturn(mock(SearchResponseSections.class)); + + // Call the method under test + org.opensearch.protobufs.SearchResponse protoResponse = SearchResponseProtoUtils.toProto(mockResponse); + + // Verify the result + assertNotNull("Proto response should not be null", protoResponse); + assertTrue("Terminated early should be true", protoResponse.getResponseBody().getTerminatedEarly()); + } + + public void testToProtoWithNumReducePhases() throws IOException { + // Create a mock SearchResponse with num reduce phases + SearchResponse mockResponse = mock(SearchResponse.class); + when(mockResponse.getTook()).thenReturn(TimeValue.timeValueMillis(100)); + when(mockResponse.isTimedOut()).thenReturn(false); + when(mockResponse.getTotalShards()).thenReturn(5); + when(mockResponse.getSuccessfulShards()).thenReturn(5); + when(mockResponse.getSkippedShards()).thenReturn(0); + when(mockResponse.getFailedShards()).thenReturn(0); + when(mockResponse.getShardFailures()).thenReturn(new ShardSearchFailure[0]); + when(mockResponse.getClusters()).thenReturn(new SearchResponse.Clusters(0, 0, 0)); + when(mockResponse.getNumReducePhases()).thenReturn(3); + when(mockResponse.getHits()).thenReturn(SearchHits.empty()); + when(mockResponse.getInternalResponse()).thenReturn(mock(SearchResponseSections.class)); + + // Call the method under test + org.opensearch.protobufs.SearchResponse protoResponse = SearchResponseProtoUtils.toProto(mockResponse); + + // Verify the result + assertNotNull("Proto response should not be null", protoResponse); + assertEquals("Num reduce phases should match", 3, protoResponse.getResponseBody().getNumReducePhases()); + } + + public void testToProtoWithClusters() throws IOException { + // Create a mock SearchResponse with clusters + SearchResponse mockResponse = mock(SearchResponse.class); + when(mockResponse.getTook()).thenReturn(TimeValue.timeValueMillis(100)); + when(mockResponse.isTimedOut()).thenReturn(false); + when(mockResponse.getTotalShards()).thenReturn(5); + when(mockResponse.getSuccessfulShards()).thenReturn(5); + when(mockResponse.getSkippedShards()).thenReturn(0); + when(mockResponse.getFailedShards()).thenReturn(0); + when(mockResponse.getShardFailures()).thenReturn(new ShardSearchFailure[0]); + when(mockResponse.getClusters()).thenReturn(new SearchResponse.Clusters(3, 2, 1)); + when(mockResponse.getHits()).thenReturn(SearchHits.empty()); + when(mockResponse.getInternalResponse()).thenReturn(mock(SearchResponseSections.class)); + + // Call the method under test + org.opensearch.protobufs.SearchResponse protoResponse = SearchResponseProtoUtils.toProto(mockResponse); + + // Verify the result + assertNotNull("Proto response should not be null", protoResponse); + assertTrue("Clusters should be present", protoResponse.getResponseBody().hasClusters()); + assertEquals("Total clusters should match", 3, protoResponse.getResponseBody().getClusters().getTotal()); + assertEquals("Successful clusters should match", 2, protoResponse.getResponseBody().getClusters().getSuccessful()); + assertEquals("Skipped clusters should match", 1, protoResponse.getResponseBody().getClusters().getSkipped()); + } + + public void testPhaseTookProtoUtilsToProto() { + // Create a mock SearchResponse.PhaseTook + Map phaseTookMap = new HashMap<>(); + phaseTookMap.put(SearchPhaseName.QUERY.getName(), 50L); + phaseTookMap.put(SearchPhaseName.FETCH.getName(), 30L); + phaseTookMap.put(SearchPhaseName.DFS_QUERY.getName(), 20L); + phaseTookMap.put(SearchPhaseName.DFS_PRE_QUERY.getName(), 10L); + phaseTookMap.put(SearchPhaseName.EXPAND.getName(), 5L); + phaseTookMap.put(SearchPhaseName.CAN_MATCH.getName(), 5L); + + SearchResponse.PhaseTook phaseTook = new SearchResponse.PhaseTook(phaseTookMap); + + // Call the method under test + PhaseTook protoPhaseTook = SearchResponseProtoUtils.PhaseTookProtoUtils.toProto(phaseTook); + + // Verify the result + assertNotNull("Proto phase took should not be null", protoPhaseTook); + assertEquals("Query phase took should match", 50L, protoPhaseTook.getQuery()); + assertEquals("Fetch phase took should match", 30L, protoPhaseTook.getFetch()); + assertEquals("DFS query phase took should match", 20L, protoPhaseTook.getDfsQuery()); + assertEquals("DFS pre-query phase took should match", 10L, protoPhaseTook.getDfsPreQuery()); + assertEquals("Expand phase took should match", 5L, protoPhaseTook.getExpand()); + assertEquals("Can match phase took should match", 5L, protoPhaseTook.getCanMatch()); + } + + public void testPhaseTookProtoUtilsToProtoWithNullPhaseTook() { + // Call the method under test with null + PhaseTook protoPhaseTook = SearchResponseProtoUtils.PhaseTookProtoUtils.toProto(null); + + // Verify the result + assertNotNull("Proto phase took should not be null", protoPhaseTook); + assertEquals("Query phase took should be 0", 0L, protoPhaseTook.getQuery()); + assertEquals("Fetch phase took should be 0", 0L, protoPhaseTook.getFetch()); + assertEquals("DFS query phase took should be 0", 0L, protoPhaseTook.getDfsQuery()); + assertEquals("DFS pre-query phase took should be 0", 0L, protoPhaseTook.getDfsPreQuery()); + assertEquals("Expand phase took should be 0", 0L, protoPhaseTook.getExpand()); + assertEquals("Can match phase took should be 0", 0L, protoPhaseTook.getCanMatch()); + } + + public void testClustersProtoUtilsToProtoWithNonZeroClusters() throws IOException { + // Create a mock SearchResponse.Clusters + SearchResponse.Clusters clusters = new SearchResponse.Clusters(3, 2, 1); + + // Create a builder to populate + org.opensearch.protobufs.ResponseBody.Builder builder = org.opensearch.protobufs.ResponseBody.newBuilder(); + + // Call the method under test + SearchResponseProtoUtils.ClustersProtoUtils.toProto(builder, clusters); + + // Verify the result + assertTrue("Clusters should be present", builder.hasClusters()); + assertEquals("Total clusters should match", 3, builder.getClusters().getTotal()); + assertEquals("Successful clusters should match", 2, builder.getClusters().getSuccessful()); + assertEquals("Skipped clusters should match", 1, builder.getClusters().getSkipped()); + } + + public void testClustersProtoUtilsToProtoWithZeroClusters() throws IOException { + // Create a mock SearchResponse.Clusters with zero total + SearchResponse.Clusters clusters = new SearchResponse.Clusters(0, 0, 0); + + // Create a builder to populate + org.opensearch.protobufs.ResponseBody.Builder builder = org.opensearch.protobufs.ResponseBody.newBuilder(); + + // Call the method under test + SearchResponseProtoUtils.ClustersProtoUtils.toProto(builder, clusters); + + // Verify the result + assertFalse("Clusters should not be present", builder.hasClusters()); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/services/SearchServiceImplTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/services/SearchServiceImplTests.java new file mode 100644 index 0000000000000..fddb00da495ce --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/services/SearchServiceImplTests.java @@ -0,0 +1,73 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.services; + +import org.opensearch.protobufs.SearchRequest; +import org.opensearch.protobufs.SearchRequestBody; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.transport.client.node.NodeClient; +import org.junit.Before; + +import java.io.IOException; + +import io.grpc.stub.StreamObserver; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.verify; + +public class SearchServiceImplTests extends OpenSearchTestCase { + + private SearchServiceImpl service; + + @Mock + private NodeClient client; + + @Mock + private StreamObserver responseObserver; + + @Before + public void setup() throws IOException { + MockitoAnnotations.openMocks(this); + service = new SearchServiceImpl(client); + } + + public void testSearchSuccess() throws IOException { + // Create a test request + SearchRequest request = createTestSearchRequest(); + + // Call the search method + service.search(request, responseObserver); + + // Verify that client.search was called with any SearchRequest and any ActionListener + verify(client).search(any(org.opensearch.action.search.SearchRequest.class), any()); + } + + public void testSearchError() throws IOException { + // Create a test request + SearchRequest request = createTestSearchRequest(); + + // Make the client throw an exception when search is called + doThrow(new RuntimeException("Test exception")).when(client).search(any(org.opensearch.action.search.SearchRequest.class), any()); + + // Call the search method + service.search(request, responseObserver); + + // Verify that the error was sent + verify(responseObserver).onError(any(RuntimeException.class)); + } + + private SearchRequest createTestSearchRequest() { + SearchRequestBody requestBody = SearchRequestBody.newBuilder().build(); + + return SearchRequest.newBuilder().setRequestBody(requestBody).build(); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/SearchResponse.java b/server/src/main/java/org/opensearch/action/search/SearchResponse.java index 0d55fbf2e7f88..c9568b4d77791 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchResponse.java +++ b/server/src/main/java/org/opensearch/action/search/SearchResponse.java @@ -709,6 +709,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeMap(phaseTookMap, StreamOutput::writeString, StreamOutput::writeLong); } + public Map getPhaseTookMap() { + return phaseTookMap; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(PHASE_TOOK.getPreferredName()); diff --git a/server/src/main/java/org/opensearch/index/query/Operator.java b/server/src/main/java/org/opensearch/index/query/Operator.java index ee8c93ce76ecb..d3add68efc476 100644 --- a/server/src/main/java/org/opensearch/index/query/Operator.java +++ b/server/src/main/java/org/opensearch/index/query/Operator.java @@ -85,7 +85,7 @@ public static Operator fromString(String op) { return valueOf(op.toUpperCase(Locale.ROOT)); } - private static IllegalArgumentException newOperatorException(String op) { + public static IllegalArgumentException newOperatorException(String op) { return new IllegalArgumentException( "operator needs to be either " + CollectionUtils.arrayAsArrayList(values()) + ", but not [" + op + "]" ); diff --git a/server/src/main/java/org/opensearch/search/SearchHit.java b/server/src/main/java/org/opensearch/search/SearchHit.java index 6391353cfe5b1..c88291ced34ef 100644 --- a/server/src/main/java/org/opensearch/search/SearchHit.java +++ b/server/src/main/java/org/opensearch/search/SearchHit.java @@ -469,6 +469,34 @@ public Map getFields() { } } + /** + * A map of hit fields (from field name to hit fields) if additional fields + * were required to be loaded. + */ + public Map getMetaFields() { + if (!metaFields.isEmpty()) { + final Map fields = new HashMap<>(); + fields.putAll(metaFields); + return fields; + } else { + return emptyMap(); + } + } + + /** + * A map of hit fields (from field name to hit fields) if additional fields + * were required to be loaded. + */ + public Map getDocumentFields() { + if (!documentFields.isEmpty()) { + final Map fields = new HashMap<>(); + fields.putAll(documentFields); + return fields; + } else { + return emptyMap(); + } + } + /** * A map of highlighted fields. */ From 155f8926f679390d9dd3caaf35dbc59637e89733 Mon Sep 17 00:00:00 2001 From: expani1729 <110471048+expani@users.noreply.github.com> Date: Wed, 9 Apr 2025 15:32:51 -0700 Subject: [PATCH 184/550] Increased window matching Lucene's BooleanScorer (#17824) Signed-off-by: expani --- .../org/opensearch/search/internal/CancellableBulkScorer.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/opensearch/search/internal/CancellableBulkScorer.java b/server/src/main/java/org/opensearch/search/internal/CancellableBulkScorer.java index 585d0baba8717..1e183a194d598 100644 --- a/server/src/main/java/org/opensearch/search/internal/CancellableBulkScorer.java +++ b/server/src/main/java/org/opensearch/search/internal/CancellableBulkScorer.java @@ -49,7 +49,7 @@ final class CancellableBulkScorer extends BulkScorer { // we use the BooleanScorer window size as a base interval in order to make sure that we do not // slow down boolean queries - private static final int INITIAL_INTERVAL = 1 << 11; + private static final int INITIAL_INTERVAL = 1 << 12; // No point in having intervals that are larger than 1M private static final int MAX_INTERVAL = 1 << 20; From 9035f501114ca1fec8692c20dfce75fe37556051 Mon Sep 17 00:00:00 2001 From: Yupeng Fu Date: Wed, 9 Apr 2025 15:50:11 -0700 Subject: [PATCH 185/550] [Pull-based ingestion] make maxPollSize and pollTimeout in IngestionSource configurable (#17863) * make maxPollSize and pollTimeout in IngestionSource configurable Signed-off-by: Yupeng Fu * changelog Signed-off-by: Yupeng Fu * comment Signed-off-by: Yupeng Fu --------- Signed-off-by: Yupeng Fu --- CHANGELOG.md | 1 + .../cluster/metadata/IndexMetadata.java | 28 ++++++++++++ .../cluster/metadata/IngestionSource.java | 44 +++++++++++++++++-- .../index/engine/IngestionEngine.java | 4 +- .../pollingingest/DefaultStreamPoller.java | 5 ++- .../metadata/IngestionSourceTests.java | 12 ++++- 6 files changed, 86 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c764c495051b6..28ec3f370d0ce 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,6 +32,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Disable the index API for ingestion engine ([#17768](https://github.com/opensearch-project/OpenSearch/pull/17768)) - Add SearchService and Search GRPC endpoint ([#17830](https://github.com/opensearch-project/OpenSearch/pull/17830)) - Add update and delete support in pull-based ingestion ([#17822](https://github.com/opensearch-project/OpenSearch/pull/17822)) +- Allow maxPollSize and pollTimeout in IngestionSource to be configurable ([#17863](https://github.com/opensearch-project/OpenSearch/pull/17863)) ### Changed - Migrate BC libs to their FIPS counterparts ([#14912](https://github.com/opensearch-project/OpenSearch/pull/14912)) diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index 04b10a43ffa10..f32389ba1c951 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -807,6 +807,30 @@ public Iterator> settings() { Property.Dynamic ); + /** + * Defines the max poll size per batch for pull-based ingestion. + */ + public static final String SETTING_INGESTION_SOURCE_MAX_POLL_SIZE = "index.ingestion_source.poll.max_batch_size"; + public static final Setting INGESTION_SOURCE_MAX_POLL_SIZE = Setting.longSetting( + SETTING_INGESTION_SOURCE_MAX_POLL_SIZE, + 1000, + 0, + Property.IndexScope, + Property.Dynamic + ); + + /** + * Defines the poll timeout for pull-based ingestion in milliseconds. + */ + public static final String SETTING_INGESTION_SOURCE_POLL_TIMEOUT = "index.ingestion_source.poll.timeout"; + public static final Setting INGESTION_SOURCE_POLL_TIMEOUT = Setting.intSetting( + SETTING_INGESTION_SOURCE_POLL_TIMEOUT, + 1000, + 0, + Property.IndexScope, + Property.Dynamic + ); + public static final Setting.AffixSetting INGESTION_SOURCE_PARAMS_SETTING = Setting.prefixKeySetting( "index.ingestion_source.param.", key -> new Setting<>(key, "", (value) -> { @@ -1047,9 +1071,13 @@ public IngestionSource getIngestionSource() { final IngestionErrorStrategy.ErrorStrategy errorStrategy = INGESTION_SOURCE_ERROR_STRATEGY_SETTING.get(settings); final Map ingestionSourceParams = INGESTION_SOURCE_PARAMS_SETTING.getAsMap(settings); + final long maxPollSize = INGESTION_SOURCE_MAX_POLL_SIZE.get(settings); + final int pollTimeout = INGESTION_SOURCE_POLL_TIMEOUT.get(settings); return new IngestionSource.Builder(ingestionSourceType).setParams(ingestionSourceParams) .setPointerInitReset(pointerInitReset) .setErrorStrategy(errorStrategy) + .setMaxPollSize(maxPollSize) + .setPollTimeout(pollTimeout) .build(); } return null; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IngestionSource.java b/server/src/main/java/org/opensearch/cluster/metadata/IngestionSource.java index b8ffa890ce519..d3c44c1e7027c 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IngestionSource.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IngestionSource.java @@ -9,6 +9,7 @@ package org.opensearch.cluster.metadata; import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.settings.Settings; import org.opensearch.indices.pollingingest.IngestionErrorStrategy; import org.opensearch.indices.pollingingest.StreamPoller; @@ -16,6 +17,9 @@ import java.util.Map; import java.util.Objects; +import static org.opensearch.cluster.metadata.IndexMetadata.INGESTION_SOURCE_MAX_POLL_SIZE; +import static org.opensearch.cluster.metadata.IndexMetadata.INGESTION_SOURCE_POLL_TIMEOUT; + /** * Class encapsulating the configuration of an ingestion source. */ @@ -25,17 +29,23 @@ public class IngestionSource { private final PointerInitReset pointerInitReset; private final IngestionErrorStrategy.ErrorStrategy errorStrategy; private final Map params; + private final long maxPollSize; + private final int pollTimeout; private IngestionSource( String type, PointerInitReset pointerInitReset, IngestionErrorStrategy.ErrorStrategy errorStrategy, - Map params + Map params, + long maxPollSize, + int pollTimeout ) { this.type = type; this.pointerInitReset = pointerInitReset; this.params = params; this.errorStrategy = errorStrategy; + this.maxPollSize = maxPollSize; + this.pollTimeout = pollTimeout; } public String getType() { @@ -54,6 +64,14 @@ public Map params() { return params; } + public long getMaxPollSize() { + return maxPollSize; + } + + public int getPollTimeout() { + return pollTimeout; + } + @Override public boolean equals(Object o) { if (this == o) return true; @@ -62,12 +80,14 @@ public boolean equals(Object o) { return Objects.equals(type, ingestionSource.type) && Objects.equals(pointerInitReset, ingestionSource.pointerInitReset) && Objects.equals(errorStrategy, ingestionSource.errorStrategy) - && Objects.equals(params, ingestionSource.params); + && Objects.equals(params, ingestionSource.params) + && Objects.equals(maxPollSize, ingestionSource.maxPollSize) + && Objects.equals(pollTimeout, ingestionSource.pollTimeout); } @Override public int hashCode() { - return Objects.hash(type, pointerInitReset, params, errorStrategy); + return Objects.hash(type, pointerInitReset, params, errorStrategy, maxPollSize, pollTimeout); } @Override @@ -84,6 +104,10 @@ public String toString() { + '\'' + ", params=" + params + + ", maxPollSize=" + + maxPollSize + + ", pollTimeout=" + + pollTimeout + '}'; } @@ -137,6 +161,8 @@ public static class Builder { private PointerInitReset pointerInitReset; private IngestionErrorStrategy.ErrorStrategy errorStrategy; private Map params; + private long maxPollSize = INGESTION_SOURCE_MAX_POLL_SIZE.getDefault(Settings.EMPTY); + private int pollTimeout = INGESTION_SOURCE_POLL_TIMEOUT.getDefault(Settings.EMPTY); public Builder(String type) { this.type = type; @@ -165,13 +191,23 @@ public Builder setParams(Map params) { return this; } + public Builder setMaxPollSize(long maxPollSize) { + this.maxPollSize = maxPollSize; + return this; + } + public Builder addParam(String key, Object value) { this.params.put(key, value); return this; } + public Builder setPollTimeout(int pollTimeout) { + this.pollTimeout = pollTimeout; + return this; + } + public IngestionSource build() { - return new IngestionSource(type, pointerInitReset, errorStrategy, params); + return new IngestionSource(type, pointerInitReset, errorStrategy, params, maxPollSize, pollTimeout); } } diff --git a/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java b/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java index 6d5f112efe594..bd17ee2170121 100644 --- a/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java @@ -121,7 +121,9 @@ public void start() { resetState, resetValue, ingestionErrorStrategy, - initialPollerState + initialPollerState, + ingestionSource.getMaxPollSize(), + ingestionSource.getPollTimeout() ); streamPoller.start(); } diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java b/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java index e1a4f7d3b4b7d..4b4a44e13d1df 100644 --- a/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java +++ b/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java @@ -32,7 +32,6 @@ public class DefaultStreamPoller implements StreamPoller { private static final Logger logger = LogManager.getLogger(DefaultStreamPoller.class); - // TODO: make this configurable public static final long MAX_POLL_SIZE = 1000; public static final int POLL_TIMEOUT = 1000; @@ -77,7 +76,9 @@ public DefaultStreamPoller( ResetState resetState, String resetValue, IngestionErrorStrategy errorStrategy, - State initialState + State initialState, + long maxPollSize, + int pollTimeout ) { this( startPointer, diff --git a/server/src/test/java/org/opensearch/cluster/metadata/IngestionSourceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/IngestionSourceTests.java index 1e24c5f7df4a0..11a12c5e753f8 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/IngestionSourceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/IngestionSourceTests.java @@ -36,6 +36,8 @@ public void testConstructorAndGetters() { assertEquals("1000", source.getPointerInitReset().getValue()); assertEquals(DROP, source.getErrorStrategy()); assertEquals(params, source.params()); + assertEquals(1000, source.getMaxPollSize()); + assertEquals(1000, source.getPollTimeout()); } public void testEquals() { @@ -44,6 +46,8 @@ public void testEquals() { IngestionSource source1 = new IngestionSource.Builder("type").setParams(params1) .setPointerInitReset(pointerInitReset) .setErrorStrategy(DROP) + .setMaxPollSize(500) + .setPollTimeout(500) .build(); Map params2 = new HashMap<>(); @@ -51,6 +55,8 @@ public void testEquals() { IngestionSource source2 = new IngestionSource.Builder("type").setParams(params2) .setPointerInitReset(pointerInitReset) .setErrorStrategy(DROP) + .setMaxPollSize(500) + .setPollTimeout(500) .build(); assertTrue(source1.equals(source2)); assertTrue(source2.equals(source1)); @@ -68,6 +74,8 @@ public void testHashCode() { IngestionSource source1 = new IngestionSource.Builder("type").setParams(params1) .setPointerInitReset(pointerInitReset) .setErrorStrategy(DROP) + .setMaxPollSize(500) + .setPollTimeout(500) .build(); Map params2 = new HashMap<>(); @@ -75,6 +83,8 @@ public void testHashCode() { IngestionSource source2 = new IngestionSource.Builder("type").setParams(params2) .setPointerInitReset(pointerInitReset) .setErrorStrategy(DROP) + .setMaxPollSize(500) + .setPollTimeout(500) .build(); assertEquals(source1.hashCode(), source2.hashCode()); @@ -93,7 +103,7 @@ public void testToString() { .setErrorStrategy(DROP) .build(); String expected = - "IngestionSource{type='type',pointer_init_reset='PointerInitReset{type='REWIND_BY_OFFSET', value=1000}',error_strategy='DROP', params={key=value}}"; + "IngestionSource{type='type',pointer_init_reset='PointerInitReset{type='REWIND_BY_OFFSET', value=1000}',error_strategy='DROP', params={key=value}, maxPollSize=1000, pollTimeout=1000}"; assertEquals(expected, source.toString()); } } From 162815265d8e21f6b3ca6a680726b5ce9bd8ddd8 Mon Sep 17 00:00:00 2001 From: Harsha Vamsi Kalluri Date: Wed, 9 Apr 2025 17:30:13 -0700 Subject: [PATCH 186/550] Remove feature flag for ApproximatePointRangeQuery (#17769) Signed-off-by: Harsha Vamsi Kalluri --- CHANGELOG.md | 1 + .../mapper/ScaledFloatFieldTypeTests.java | 5 +- .../opensearch/common/util/FeatureFlags.java | 9 -- .../index/mapper/DateFieldMapper.java | 27 ++-- .../index/mapper/NumberFieldMapper.java | 41 +++--- .../opensearch/index/search/NestedHelper.java | 5 + .../ApproximatePointRangeQuery.java | 111 +++++++--------- .../approximate/ApproximateScoreQuery.java | 5 +- .../search/internal/ContextIndexSearcher.java | 6 +- .../index/mapper/DateFieldTypeTests.java | 104 +++++---------- .../index/mapper/NumberFieldTypeTests.java | 31 +++-- ...angeFieldQueryStringQueryBuilderTests.java | 18 +-- .../index/mapper/RangeFieldTypeTests.java | 5 - .../query/QueryStringQueryBuilderTests.java | 18 +-- .../index/query/RangeQueryBuilderTests.java | 63 ++-------- .../ApproximatePointRangeQueryTests.java | 119 ++++++++++-------- .../ApproximateScoreQueryTests.java | 9 +- 17 files changed, 242 insertions(+), 335 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 28ec3f370d0ce..eb19d6d86f7f4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -68,6 +68,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Removed - Remove deprecated `batch_size` parameter from `_bulk` ([#14283](https://github.com/opensearch-project/OpenSearch/issues/14283)) +- Remove `FeatureFlags.APPROXIMATE_POINT_RANGE_QUERY_SETTING` since range query approximation is no longer experimental ([#17769](https://github.com/opensearch-project/OpenSearch/pull/17769)) ### Fixed - Fix bytes parameter on `_cat/recovery` ([#17598](https://github.com/opensearch-project/OpenSearch/pull/17598)) diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldTypeTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldTypeTests.java index a653edbd05992..97976d0db0b96 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldTypeTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldTypeTests.java @@ -51,6 +51,7 @@ import org.opensearch.index.fielddata.IndexNumericFieldData; import org.opensearch.index.fielddata.LeafNumericFieldData; import org.opensearch.index.fielddata.SortedNumericDoubleValues; +import org.opensearch.search.approximate.ApproximateScoreQuery; import java.io.IOException; import java.util.Arrays; @@ -167,8 +168,8 @@ public void testRoundsLowerBoundCorrectly() { } private String getQueryString(Query query) { - assertTrue(query instanceof IndexOrDocValuesQuery); - return ((IndexOrDocValuesQuery) query).getIndexQuery().toString(); + assertTrue(query instanceof ApproximateScoreQuery); + return ((IndexOrDocValuesQuery) ((ApproximateScoreQuery) query).getOriginalQuery()).getIndexQuery().toString(); } public void testValueForSearch() { diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java index 8fa914438c1c4..70461e711679e 100644 --- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -122,15 +122,6 @@ public class FeatureFlags { Property.NodeScope ); - /** - * Gates the functionality of ApproximatePointRangeQuery where we approximate query results. - */ - public static final String APPROXIMATE_POINT_RANGE_QUERY = FEATURE_FLAG_PREFIX + "approximate_point_range_query.enabled"; - public static final Setting APPROXIMATE_POINT_RANGE_QUERY_SETTING = Setting.boolSetting( - APPROXIMATE_POINT_RANGE_QUERY, - false, - Property.NodeScope - ); public static final String TERM_VERSION_PRECOMMIT_ENABLE = OS_EXPERIMENTAL_PREFIX + "optimization.termversion.precommit.enabled"; public static final Setting TERM_VERSION_PRECOMMIT_ENABLE_SETTING = Setting.boolSetting( TERM_VERSION_PRECOMMIT_ENABLE, diff --git a/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java index ec5b128bf3173..3e96f7651aece 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java @@ -486,23 +486,16 @@ public Query rangeQuery( } else { query = pointRangeQuery; } - if (FeatureFlags.isEnabled(FeatureFlags.APPROXIMATE_POINT_RANGE_QUERY_SETTING)) { - return new ApproximateScoreQuery( - query, - new ApproximatePointRangeQuery( - name(), - pack(new long[] { l }).bytes, - pack(new long[] { u }).bytes, - new long[] { l }.length - ) { - @Override - protected String toString(int dimension, byte[] value) { - return Long.toString(LongPoint.decodeDimension(value, 0)); - } - } - ); - } - return query; + return new ApproximateScoreQuery( + query, + new ApproximatePointRangeQuery( + name(), + pack(new long[] { l }).bytes, + pack(new long[] { u }).bytes, + new long[] { l }.length, + ApproximatePointRangeQuery.LONG_FORMAT + ) + ); } // Not searchable. Must have doc values. diff --git a/server/src/main/java/org/opensearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/NumberFieldMapper.java index faf3f1bb654c8..1112734247d0e 100644 --- a/server/src/main/java/org/opensearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/NumberFieldMapper.java @@ -69,6 +69,8 @@ import org.opensearch.index.fielddata.plain.SortedNumericIndexFieldData; import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.DocValueFormat; +import org.opensearch.search.approximate.ApproximatePointRangeQuery; +import org.opensearch.search.approximate.ApproximateScoreQuery; import org.opensearch.search.lookup.SearchLookup; import org.opensearch.search.query.BitmapDocValuesQuery; import org.opensearch.search.query.BitmapIndexQuery; @@ -1058,23 +1060,34 @@ public Query rangeQuery( QueryShardContext context ) { return longRangeQuery(lowerTerm, upperTerm, includeLower, includeUpper, (l, u) -> { - if (isSearchable && hasDocValues) { - Query query = LongPoint.newRangeQuery(field, l, u); - Query dvQuery = SortedNumericDocValuesField.newSlowRangeQuery(field, l, u); - query = new IndexOrDocValuesQuery(query, dvQuery); - if (context.indexSortedOnField(field)) { - query = new IndexSortSortedNumericDocValuesRangeQuery(field, l, u, query); + Query dvQuery = hasDocValues ? SortedNumericDocValuesField.newSlowRangeQuery(field, l, u) : null; + if (isSearchable) { + Query pointRangeQuery = LongPoint.newRangeQuery(field, l, u); + Query query; + if (dvQuery != null) { + query = new IndexOrDocValuesQuery(pointRangeQuery, dvQuery); + if (context.indexSortedOnField(field)) { + query = new IndexSortSortedNumericDocValuesRangeQuery(field, l, u, query); + } + } else { + query = pointRangeQuery; } - return query; + return new ApproximateScoreQuery( + query, + new ApproximatePointRangeQuery( + field, + LongPoint.pack(new long[] { l }).bytes, + LongPoint.pack(new long[] { u }).bytes, + new long[] { l }.length, + ApproximatePointRangeQuery.LONG_FORMAT + ) + ); + } - if (hasDocValues) { - Query query = SortedNumericDocValuesField.newSlowRangeQuery(field, l, u); - if (context.indexSortedOnField(field)) { - query = new IndexSortSortedNumericDocValuesRangeQuery(field, l, u, query); - } - return query; + if (context.indexSortedOnField(field)) { + dvQuery = new IndexSortSortedNumericDocValuesRangeQuery(field, l, u, dvQuery); } - return LongPoint.newRangeQuery(field, l, u); + return dvQuery; }); } diff --git a/server/src/main/java/org/opensearch/index/search/NestedHelper.java b/server/src/main/java/org/opensearch/index/search/NestedHelper.java index 33094455b0382..fd8527924083a 100644 --- a/server/src/main/java/org/opensearch/index/search/NestedHelper.java +++ b/server/src/main/java/org/opensearch/index/search/NestedHelper.java @@ -46,6 +46,7 @@ import org.apache.lucene.search.TermQuery; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.ObjectMapper; +import org.opensearch.search.approximate.ApproximateScoreQuery; /** Utility class to filter parent and children clauses when building nested * queries. @@ -85,6 +86,8 @@ public boolean mightMatchNestedDocs(Query query) { return mightMatchNestedDocs(((PointRangeQuery) query).getField()); } else if (query instanceof IndexOrDocValuesQuery) { return mightMatchNestedDocs(((IndexOrDocValuesQuery) query).getIndexQuery()); + } else if (query instanceof ApproximateScoreQuery) { + return mightMatchNestedDocs(((ApproximateScoreQuery) query).getOriginalQuery()); } else if (query instanceof BooleanQuery) { final BooleanQuery bq = (BooleanQuery) query; final boolean hasRequiredClauses = bq.clauses().stream().anyMatch(BooleanClause::isRequired); @@ -155,6 +158,8 @@ public boolean mightMatchNonNestedDocs(Query query, String nestedPath) { return mightMatchNonNestedDocs(((PointRangeQuery) query).getField(), nestedPath); } else if (query instanceof IndexOrDocValuesQuery) { return mightMatchNonNestedDocs(((IndexOrDocValuesQuery) query).getIndexQuery(), nestedPath); + } else if (query instanceof ApproximateScoreQuery) { + return mightMatchNonNestedDocs(((ApproximateScoreQuery) query).getOriginalQuery(), nestedPath); } else if (query instanceof BooleanQuery) { final BooleanQuery bq = (BooleanQuery) query; final boolean hasRequiredClauses = bq.clauses().stream().anyMatch(BooleanClause::isRequired); diff --git a/server/src/main/java/org/opensearch/search/approximate/ApproximatePointRangeQuery.java b/server/src/main/java/org/opensearch/search/approximate/ApproximatePointRangeQuery.java index fe3d66c674a82..48f7671b21971 100644 --- a/server/src/main/java/org/opensearch/search/approximate/ApproximatePointRangeQuery.java +++ b/server/src/main/java/org/opensearch/search/approximate/ApproximatePointRangeQuery.java @@ -8,6 +8,7 @@ package org.opensearch.search.approximate; +import org.apache.lucene.document.LongPoint; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.PointValues; @@ -16,6 +17,7 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.PointRangeQuery; +import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryVisitor; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; @@ -24,41 +26,51 @@ import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.DocIdSetBuilder; import org.apache.lucene.util.IntsRef; -import org.opensearch.index.query.RangeQueryBuilder; import org.opensearch.search.internal.SearchContext; import org.opensearch.search.sort.FieldSortBuilder; import org.opensearch.search.sort.SortOrder; import java.io.IOException; -import java.util.Arrays; import java.util.Objects; +import java.util.function.Function; /** * An approximate-able version of {@link PointRangeQuery}. It creates an instance of {@link PointRangeQuery} but short-circuits the intersect logic * after {@code size} is hit */ -public abstract class ApproximatePointRangeQuery extends ApproximateQuery { +public class ApproximatePointRangeQuery extends ApproximateQuery { + public static final Function LONG_FORMAT = bytes -> Long.toString(LongPoint.decodeDimension(bytes, 0)); private int size; private SortOrder sortOrder; public final PointRangeQuery pointRangeQuery; - protected ApproximatePointRangeQuery(String field, byte[] lowerPoint, byte[] upperPoint, int numDims) { - this(field, lowerPoint, upperPoint, numDims, 10_000, null); + public ApproximatePointRangeQuery( + String field, + byte[] lowerPoint, + byte[] upperPoint, + int numDims, + Function valueToString + ) { + this(field, lowerPoint, upperPoint, numDims, SearchContext.DEFAULT_TRACK_TOTAL_HITS_UP_TO, null, valueToString); } - protected ApproximatePointRangeQuery(String field, byte[] lowerPoint, byte[] upperPoint, int numDims, int size) { - this(field, lowerPoint, upperPoint, numDims, size, null); - } - - protected ApproximatePointRangeQuery(String field, byte[] lowerPoint, byte[] upperPoint, int numDims, int size, SortOrder sortOrder) { + protected ApproximatePointRangeQuery( + String field, + byte[] lowerPoint, + byte[] upperPoint, + int numDims, + int size, + SortOrder sortOrder, + Function valueToString + ) { this.size = size; this.sortOrder = sortOrder; this.pointRangeQuery = new PointRangeQuery(field, lowerPoint, upperPoint, numDims) { @Override protected String toString(int dimension, byte[] value) { - return super.toString(field); + return valueToString.apply(value); } }; } @@ -79,6 +91,11 @@ public void setSortOrder(SortOrder sortOrder) { this.sortOrder = sortOrder; } + @Override + public Query rewrite(IndexSearcher indexSearcher) throws IOException { + return super.rewrite(indexSearcher); + } + @Override public void visit(QueryVisitor visitor) { pointRangeQuery.visit(visitor); @@ -344,7 +361,6 @@ public ScorerSupplier scorerSupplier(LeafReaderContext context) throws IOExcepti if (checkValidPointValues(values) == false) { return null; } - final Weight weight = this; if (size > values.size()) { return pointRangeQueryWeight.scorerSupplier(context); } else { @@ -426,17 +442,26 @@ public boolean canApproximate(SearchContext context) { } // size 0 could be set for caching if (context.from() + context.size() == 0) { - this.setSize(10_000); + this.setSize(SearchContext.DEFAULT_TRACK_TOTAL_HITS_UP_TO); + } else { + this.setSize(Math.max(context.from() + context.size(), context.trackTotalHitsUpTo())); } - this.setSize(Math.max(context.from() + context.size(), context.trackTotalHitsUpTo())); if (context.request() != null && context.request().source() != null) { FieldSortBuilder primarySortField = FieldSortBuilder.getPrimaryFieldSortOrNull(context.request().source()); - if (primarySortField != null - && primarySortField.missing() == null - && primarySortField.getFieldName().equals(((RangeQueryBuilder) context.request().source().query()).fieldName())) { - if (primarySortField.order() == SortOrder.DESC) { - this.setSortOrder(SortOrder.DESC); + if (primarySortField != null) { + if (!primarySortField.fieldName().equals(pointRangeQuery.getField())) { + return false; + } + if (primarySortField.missing() != null) { + // Cannot sort documents missing this field. + return false; } + if (context.request().source().searchAfter() != null) { + // TODO: We *could* optimize searchAfter, especially when this is the only sort field, but existing pruning is pretty + // good. + return false; + } + this.setSortOrder(primarySortField.order()); } } return true; @@ -453,56 +478,16 @@ public final boolean equals(Object o) { } private boolean equalsTo(ApproximatePointRangeQuery other) { - return Objects.equals(pointRangeQuery.getField(), other.pointRangeQuery.getField()) - && pointRangeQuery.getNumDims() == other.pointRangeQuery.getNumDims() - && pointRangeQuery.getBytesPerDim() == other.pointRangeQuery.getBytesPerDim() - && Arrays.equals(pointRangeQuery.getLowerPoint(), other.pointRangeQuery.getLowerPoint()) - && Arrays.equals(pointRangeQuery.getUpperPoint(), other.pointRangeQuery.getUpperPoint()); + return Objects.equals(pointRangeQuery, other.pointRangeQuery); } @Override public final String toString(String field) { final StringBuilder sb = new StringBuilder(); - if (pointRangeQuery.getField().equals(field) == false) { - sb.append(pointRangeQuery.getField()); - sb.append(':'); - } - - // print ourselves as "range per dimension" - for (int i = 0; i < pointRangeQuery.getNumDims(); i++) { - if (i > 0) { - sb.append(','); - } - - int startOffset = pointRangeQuery.getBytesPerDim() * i; - - sb.append('['); - sb.append( - toString( - i, - ArrayUtil.copyOfSubArray(pointRangeQuery.getLowerPoint(), startOffset, startOffset + pointRangeQuery.getBytesPerDim()) - ) - ); - sb.append(" TO "); - sb.append( - toString( - i, - ArrayUtil.copyOfSubArray(pointRangeQuery.getUpperPoint(), startOffset, startOffset + pointRangeQuery.getBytesPerDim()) - ) - ); - sb.append(']'); - } + sb.append("Approximate("); + sb.append(pointRangeQuery.toString()); + sb.append(")"); return sb.toString(); } - - /** - * Returns a string of a single value in a human-readable format for debugging. This is used by - * {@link #toString()}. - * - * @param dimension dimension of the particular value - * @param value single value, never null - * @return human readable value for debugging - */ - protected abstract String toString(int dimension, byte[] value); } diff --git a/server/src/main/java/org/opensearch/search/approximate/ApproximateScoreQuery.java b/server/src/main/java/org/opensearch/search/approximate/ApproximateScoreQuery.java index 2395142c606ae..6b39606620716 100644 --- a/server/src/main/java/org/opensearch/search/approximate/ApproximateScoreQuery.java +++ b/server/src/main/java/org/opensearch/search/approximate/ApproximateScoreQuery.java @@ -42,9 +42,10 @@ public ApproximateQuery getApproximationQuery() { } @Override - public final Query rewrite(IndexSearcher indexSearcher) throws IOException { + public Query rewrite(IndexSearcher indexSearcher) throws IOException { if (resolvedQuery == null) { - throw new IllegalStateException("Cannot rewrite resolved query without setContext being called"); + // Default to the original query. This suggests that we were not called from ContextIndexSearcher. + return originalQuery.rewrite(indexSearcher); } return resolvedQuery.rewrite(indexSearcher); } diff --git a/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java b/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java index 3215759b59259..bf570a0dd0d60 100644 --- a/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java +++ b/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java @@ -191,6 +191,9 @@ public void setAggregatedDfs(AggregatedDfs aggregatedDfs) { @Override public Query rewrite(Query original) throws IOException { + if (original instanceof ApproximateScoreQuery) { + ((ApproximateScoreQuery) original).setContext(searchContext); + } if (profiler != null) { profiler.startRewriteTime(); } @@ -221,9 +224,6 @@ public Weight createWeight(Query query, ScoreMode scoreMode, float boost) throws profiler.pollLastElement(); } return new ProfileWeight(query, weight, profile); - } else if (query instanceof ApproximateScoreQuery) { - ((ApproximateScoreQuery) query).setContext(searchContext); - return super.createWeight(query, scoreMode, boost); } else { return super.createWeight(query, scoreMode, boost); } diff --git a/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java index e3f2f6e5ea27c..3fda7bcb1616a 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java @@ -65,7 +65,6 @@ import org.opensearch.common.time.DateFormatters; import org.opensearch.common.time.DateMathParser; import org.opensearch.common.util.BigArrays; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.io.IOUtils; import org.opensearch.index.IndexSettings; import org.opensearch.index.fielddata.IndexNumericFieldData; @@ -81,6 +80,7 @@ import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.approximate.ApproximatePointRangeQuery; import org.opensearch.search.approximate.ApproximateScoreQuery; +import org.opensearch.test.TestSearchContext; import org.joda.time.DateTimeZone; import java.io.IOException; @@ -91,9 +91,7 @@ import java.util.List; import java.util.Locale; -import static org.hamcrest.CoreMatchers.is; import static org.apache.lucene.document.LongPoint.pack; -import static org.junit.Assume.assumeThat; public class DateFieldTypeTests extends FieldTypeTestCase { @@ -239,18 +237,9 @@ public void testTermQuery() { "field", pack(new long[] { instant }).bytes, pack(new long[] { instant + 999 }).bytes, - new long[] { instant }.length - ) { - @Override - protected String toString(int dimension, byte[] value) { - return Long.toString(LongPoint.decodeDimension(value, 0)); - } - } - ); - assumeThat( - "Using Approximate Range Query as default", - FeatureFlags.isEnabled(FeatureFlags.APPROXIMATE_POINT_RANGE_QUERY), - is(true) + new long[] { instant }.length, + ApproximatePointRangeQuery.LONG_FORMAT + ) ); assertEquals(expected, ft.termQuery(date, context)); @@ -298,32 +287,17 @@ public void testRangeQuery() throws IOException { String date2 = "2016-04-28T11:33:52"; long instant1 = DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(date1)).toInstant().toEpochMilli(); long instant2 = DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(date2)).toInstant().toEpochMilli() + 999; - Query expected = new ApproximateScoreQuery( - new IndexOrDocValuesQuery( - LongPoint.newRangeQuery("field", instant1, instant2), - SortedNumericDocValuesField.newSlowRangeQuery("field", instant1, instant2) - ), - new ApproximatePointRangeQuery( - "field", - pack(new long[] { instant1 }).bytes, - pack(new long[] { instant2 }).bytes, - new long[] { instant1 }.length - ) { - @Override - protected String toString(int dimension, byte[] value) { - return Long.toString(LongPoint.decodeDimension(value, 0)); - } - } - ); - assumeThat( - "Using Approximate Range Query as default", - FeatureFlags.isEnabled(FeatureFlags.APPROXIMATE_POINT_RANGE_QUERY), - is(true) - ); - assertEquals( - expected, - ft.rangeQuery(date1, date2, true, true, null, null, null, context).rewrite(new IndexSearcher(new MultiReader())) + Query expected = new ApproximatePointRangeQuery( + "field", + pack(new long[] { instant1 }).bytes, + pack(new long[] { instant2 }).bytes, + new long[] { instant1 }.length, + ApproximatePointRangeQuery.LONG_FORMAT ); + Query rangeQuery = ft.rangeQuery(date1, date2, true, true, null, null, null, context); + assertTrue(rangeQuery instanceof ApproximateScoreQuery); + ((ApproximateScoreQuery) rangeQuery).setContext(new TestSearchContext(context)); + assertEquals(expected, rangeQuery.rewrite(new IndexSearcher(new MultiReader()))); instant1 = nowInMillis; instant2 = instant1 + 100; @@ -337,20 +311,11 @@ protected String toString(int dimension, byte[] value) { "field", pack(new long[] { instant1 }).bytes, pack(new long[] { instant2 }).bytes, - new long[] { instant1 }.length - ) { - @Override - protected String toString(int dimension, byte[] value) { - return Long.toString(LongPoint.decodeDimension(value, 0)); - } - } + new long[] { instant1 }.length, + ApproximatePointRangeQuery.LONG_FORMAT + ) ) ); - assumeThat( - "Using Approximate Range Query as default", - FeatureFlags.isEnabled(FeatureFlags.APPROXIMATE_POINT_RANGE_QUERY), - is(true) - ); assertEquals(expected, ft.rangeQuery("now", instant2, true, true, null, null, null, context)); MappedFieldType unsearchable = new DateFieldType( @@ -408,30 +373,21 @@ public void testRangeQueryWithIndexSort() { long instant2 = DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(date2)).toInstant().toEpochMilli() + 999; Query dvQuery = SortedNumericDocValuesField.newSlowRangeQuery("field", instant1, instant2); - Query expected = new IndexSortSortedNumericDocValuesRangeQuery( - "field", - instant1, - instant2, - new ApproximateScoreQuery( - new IndexOrDocValuesQuery(LongPoint.newRangeQuery("field", instant1, instant2), dvQuery), - new ApproximatePointRangeQuery( - "field", - pack(new long[] { instant1 }).bytes, - pack(new long[] { instant2 }).bytes, - new long[] { instant1 }.length - ) { - @Override - protected String toString(int dimension, byte[] value) { - return Long.toString(LongPoint.decodeDimension(value, 0)); - } - } + Query expected = new ApproximateScoreQuery( + new IndexSortSortedNumericDocValuesRangeQuery( + "field", + instant1, + instant2, + new IndexOrDocValuesQuery(LongPoint.newRangeQuery("field", instant1, instant2), dvQuery) + ), + new ApproximatePointRangeQuery( + "field", + pack(new long[] { instant1 }).bytes, + pack(new long[] { instant2 }).bytes, + new long[] { instant1 }.length, + ApproximatePointRangeQuery.LONG_FORMAT ) ); - assumeThat( - "Using Approximate Range Query as default", - FeatureFlags.isEnabled(FeatureFlags.APPROXIMATE_POINT_RANGE_QUERY), - is(true) - ); assertEquals(expected, ft.rangeQuery(date1, date2, true, true, null, null, null, context)); } diff --git a/server/src/test/java/org/opensearch/index/mapper/NumberFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/NumberFieldTypeTests.java index e1551e225b307..6382f033d74d5 100644 --- a/server/src/test/java/org/opensearch/index/mapper/NumberFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/NumberFieldTypeTests.java @@ -75,6 +75,8 @@ import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.DocValueFormat; import org.opensearch.search.MultiValueMode; +import org.opensearch.search.approximate.ApproximatePointRangeQuery; +import org.opensearch.search.approximate.ApproximateScoreQuery; import org.opensearch.search.query.BitmapDocValuesQuery; import org.opensearch.search.query.BitmapIndexQuery; import org.junit.Before; @@ -96,6 +98,7 @@ import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; +import static org.apache.lucene.document.LongPoint.pack; public class NumberFieldTypeTests extends FieldTypeTestCase { @@ -392,9 +395,9 @@ public void testUnsignedLongRangeQueryWithDecimalParts() { public void testLongRangeQuery() { MappedFieldType ft = new NumberFieldMapper.NumberFieldType("field", NumberFieldMapper.NumberType.LONG); - Query expected = new IndexOrDocValuesQuery( - LongPoint.newRangeQuery("field", 1, 3), - SortedNumericDocValuesField.newSlowRangeQuery("field", 1, 3) + Query expected = new ApproximateScoreQuery( + new IndexOrDocValuesQuery(LongPoint.newRangeQuery("field", 1, 3), SortedNumericDocValuesField.newSlowRangeQuery("field", 1, 3)), + new ApproximatePointRangeQuery("field", pack(1).bytes, pack(3).bytes, 1, ApproximatePointRangeQuery.LONG_FORMAT) ); assertEquals(expected, ft.rangeQuery("1", "3", true, true, null, null, null, MOCK_QSC)); @@ -681,7 +684,11 @@ public void doTestDocValueRangeQueries(NumberType type, Supplier valueSu true, MOCK_QSC ); - assertThat(query, either(instanceOf(IndexOrDocValuesQuery.class)).or(instanceOf(MatchNoDocsQuery.class))); + assertThat( + query, + either(instanceOf(IndexOrDocValuesQuery.class)).or(instanceOf(MatchNoDocsQuery.class)) + .or(instanceOf(ApproximateScoreQuery.class)) + ); if (query instanceof IndexOrDocValuesQuery) { IndexOrDocValuesQuery indexOrDvQuery = (IndexOrDocValuesQuery) query; assertEquals(searcher.count(indexOrDvQuery.getIndexQuery()), searcher.count(indexOrDvQuery.getRandomAccessQuery())); @@ -764,10 +771,20 @@ public void doTestIndexSortRangeQueries(NumberType type, Supplier valueS true, context ); - assertThat(query, instanceOf(IndexSortSortedNumericDocValuesRangeQuery.class)); + assertThat( + query, + either(instanceOf(IndexSortSortedNumericDocValuesRangeQuery.class)).or(instanceOf(ApproximateScoreQuery.class)) + ); - Query fallbackQuery = ((IndexSortSortedNumericDocValuesRangeQuery) query).getFallbackQuery(); - assertThat(fallbackQuery, instanceOf(IndexOrDocValuesQuery.class)); + Query fallbackQuery; + if (query instanceof IndexSortSortedNumericDocValuesRangeQuery) { + fallbackQuery = ((IndexSortSortedNumericDocValuesRangeQuery) query).getFallbackQuery(); + assertThat(fallbackQuery, instanceOf(IndexOrDocValuesQuery.class)); + } else { + fallbackQuery = ((IndexSortSortedNumericDocValuesRangeQuery) ((ApproximateScoreQuery) query).getOriginalQuery()) + .getFallbackQuery(); + assertThat(fallbackQuery, instanceOf(IndexOrDocValuesQuery.class)); + } IndexOrDocValuesQuery indexOrDvQuery = (IndexOrDocValuesQuery) fallbackQuery; assertEquals(searcher.count(query), searcher.count(indexOrDvQuery.getIndexQuery())); diff --git a/server/src/test/java/org/opensearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java index 8753f58628e82..8591a1e2bb8df 100644 --- a/server/src/test/java/org/opensearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java @@ -46,7 +46,6 @@ import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.network.InetAddresses; import org.opensearch.common.time.DateMathParser; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.query.QueryStringQueryBuilder; import org.opensearch.lucene.queries.BinaryDocValuesRangeQuery; @@ -57,11 +56,9 @@ import java.io.IOException; import java.net.InetAddress; -import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.Matchers.either; import static org.hamcrest.core.IsInstanceOf.instanceOf; import static org.apache.lucene.document.LongPoint.pack; -import static org.junit.Assume.assumeThat; public class RangeFieldQueryStringQueryBuilderTests extends AbstractQueryTestCase { @@ -185,11 +182,6 @@ public void testDateRangeQuery() throws Exception { parser.parse(lowerBoundExact, () -> 0).toEpochMilli(), parser.parse(upperBoundExact, () -> 0).toEpochMilli() ); - assumeThat( - "Using Approximate Range Query as default", - FeatureFlags.isEnabled(FeatureFlags.APPROXIMATE_POINT_RANGE_QUERY), - is(true) - ); assertEquals( new ApproximateScoreQuery( new IndexOrDocValuesQuery( @@ -204,13 +196,9 @@ public void testDateRangeQuery() throws Exception { DATE_FIELD_NAME, pack(new long[] { parser.parse(lowerBoundExact, () -> 0).toEpochMilli() }).bytes, pack(new long[] { parser.parse(upperBoundExact, () -> 0).toEpochMilli() }).bytes, - new long[] { parser.parse(lowerBoundExact, () -> 0).toEpochMilli() }.length - ) { - @Override - protected String toString(int dimension, byte[] value) { - return Long.toString(LongPoint.decodeDimension(value, 0)); - } - } + new long[] { parser.parse(lowerBoundExact, () -> 0).toEpochMilli() }.length, + ApproximatePointRangeQuery.LONG_FORMAT + ) ), queryOnDateField ); diff --git a/server/src/test/java/org/opensearch/index/mapper/RangeFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/RangeFieldTypeTests.java index 3fadfb9b2f976..5065c64d8f8fd 100644 --- a/server/src/test/java/org/opensearch/index/mapper/RangeFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/RangeFieldTypeTests.java @@ -286,11 +286,6 @@ public void testDateRangeQueryUsingMappingFormatLegacy() { // compare lower and upper bounds with what we would get on a `date` field DateFieldType dateFieldType = new DateFieldType("field", DateFieldMapper.Resolution.MILLISECONDS, formatter); final Query queryOnDateField = dateFieldType.rangeQuery(from, to, true, true, relation, null, fieldType.dateMathParser(), context); - assumeThat( - "Using Approximate Range Query as default", - FeatureFlags.isEnabled(FeatureFlags.APPROXIMATE_POINT_RANGE_QUERY), - is(true) - ); assertEquals( "field:[1465975790000 TO 1466062190999]", ((IndexOrDocValuesQuery) ((ApproximateScoreQuery) queryOnDateField).getOriginalQuery()).getIndexQuery().toString() diff --git a/server/src/test/java/org/opensearch/index/query/QueryStringQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/QueryStringQueryBuilderTests.java index 9e2dae881d779..4458271124693 100644 --- a/server/src/test/java/org/opensearch/index/query/QueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/QueryStringQueryBuilderTests.java @@ -70,7 +70,6 @@ import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.Fuzziness; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.mapper.FieldNamesFieldMapper; @@ -101,9 +100,7 @@ import static org.hamcrest.CoreMatchers.hasItems; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.is; import static org.apache.lucene.document.LongPoint.pack; -import static org.junit.Assume.assumeThat; public class QueryStringQueryBuilderTests extends AbstractQueryTestCase { @@ -853,11 +850,6 @@ public void testToQueryDateWithTimeZone() throws Exception { QueryStringQueryBuilder qsq = queryStringQuery(DATE_FIELD_NAME + ":1970-01-01"); QueryShardContext context = createShardContext(); Query query = qsq.toQuery(context); - assumeThat( - "Using Approximate Range Query as default", - FeatureFlags.isEnabled(FeatureFlags.APPROXIMATE_POINT_RANGE_QUERY), - is(true) - ); assertThat(query, instanceOf(ApproximateScoreQuery.class)); long lower = 0; // 1970-01-01T00:00:00.999 UTC long upper = 86399999; // 1970-01-01T23:59:59.999 UTC @@ -877,13 +869,9 @@ private ApproximateScoreQuery calculateExpectedDateQuery(long lower, long upper) DATE_FIELD_NAME, pack(new long[] { lower }).bytes, pack(new long[] { upper }).bytes, - new long[] { lower }.length - ) { - @Override - protected String toString(int dimension, byte[] value) { - return Long.toString(LongPoint.decodeDimension(value, 0)); - } - } + new long[] { lower }.length, + ApproximatePointRangeQuery.LONG_FORMAT + ) ); } diff --git a/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java index 34f4657bad100..71d5d69a1c0e7 100644 --- a/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java @@ -47,7 +47,6 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.common.geo.ShapeRelation; import org.opensearch.common.lucene.BytesRefs; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.ParsingException; import org.opensearch.index.mapper.DateFieldMapper; import org.opensearch.index.mapper.FieldNamesFieldMapper; @@ -69,12 +68,10 @@ import java.util.Map; import static org.opensearch.index.query.QueryBuilders.rangeQuery; -import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.sameInstance; import static org.apache.lucene.document.LongPoint.pack; -import static org.junit.Assume.assumeThat; public class RangeQueryBuilderTests extends AbstractQueryTestCase { @Override @@ -190,11 +187,6 @@ protected void doAssertLuceneQuery(RangeQueryBuilder queryBuilder, Query query, assertThat(termRangeQuery.includesLower(), equalTo(queryBuilder.includeLower())); assertThat(termRangeQuery.includesUpper(), equalTo(queryBuilder.includeUpper())); } else if (expectedFieldName.equals(DATE_FIELD_NAME)) { - assumeThat( - "Using Approximate Range Query as default", - FeatureFlags.isEnabled(FeatureFlags.APPROXIMATE_POINT_RANGE_QUERY), - is(true) - ); assertThat(query, instanceOf(ApproximateScoreQuery.class)); Query approximationQuery = ((ApproximateScoreQuery) query).getApproximationQuery(); assertThat(approximationQuery, instanceOf(ApproximateQuery.class)); @@ -258,13 +250,9 @@ protected void doAssertLuceneQuery(RangeQueryBuilder queryBuilder, Query query, DATE_FIELD_NAME, pack(new long[] { minLong }).bytes, pack(new long[] { maxLong }).bytes, - new long[] { minLong }.length - ) { - @Override - protected String toString(int dimension, byte[] value) { - return Long.toString(LongPoint.decodeDimension(value, 0)); - } - } + new long[] { minLong }.length, + ApproximatePointRangeQuery.LONG_FORMAT + ) ), query ); @@ -332,11 +320,6 @@ public void testDateRangeQueryFormat() throws IOException { + " }\n" + "}"; Query parsedQuery = parseQuery(query).toQuery(createShardContext()); - assumeThat( - "Using Approximate Range Query as default", - FeatureFlags.isEnabled(FeatureFlags.APPROXIMATE_POINT_RANGE_QUERY), - is(true) - ); assertThat(parsedQuery, instanceOf(ApproximateScoreQuery.class)); Query approximationQuery = ((ApproximateScoreQuery) parsedQuery).getApproximationQuery(); assertThat(approximationQuery, instanceOf(ApproximateQuery.class)); @@ -354,13 +337,9 @@ public void testDateRangeQueryFormat() throws IOException { DATE_FIELD_NAME, pack(new long[] { lower }).bytes, pack(new long[] { upper }).bytes, - new long[] { lower }.length - ) { - @Override - protected String toString(int dimension, byte[] value) { - return Long.toString(LongPoint.decodeDimension(value, 0)); - } - } + new long[] { lower }.length, + ApproximatePointRangeQuery.LONG_FORMAT + ) ), parsedQuery ); @@ -392,11 +371,6 @@ public void testDateRangeBoundaries() throws IOException { + " }\n" + "}\n"; Query parsedQuery = parseQuery(query).toQuery(createShardContext()); - assumeThat( - "Using Approximate Range Query as default", - FeatureFlags.isEnabled(FeatureFlags.APPROXIMATE_POINT_RANGE_QUERY), - is(true) - ); assertThat(parsedQuery, instanceOf(ApproximateScoreQuery.class)); long lower = DateTime.parse("2014-11-01T00:00:00.000+00").getMillis(); @@ -411,13 +385,9 @@ public void testDateRangeBoundaries() throws IOException { DATE_FIELD_NAME, pack(new long[] { lower }).bytes, pack(new long[] { upper }).bytes, - new long[] { lower }.length - ) { - @Override - protected String toString(int dimension, byte[] value) { - return Long.toString(LongPoint.decodeDimension(value, 0)); - } - } + new long[] { lower }.length, + ApproximatePointRangeQuery.LONG_FORMAT + ) ) , @@ -448,13 +418,9 @@ protected String toString(int dimension, byte[] value) { DATE_FIELD_NAME, pack(new long[] { lower }).bytes, pack(new long[] { upper }).bytes, - new long[] { lower }.length - ) { - @Override - protected String toString(int dimension, byte[] value) { - return Long.toString(LongPoint.decodeDimension(value, 0)); - } - } + new long[] { lower }.length, + ApproximatePointRangeQuery.LONG_FORMAT + ) ) , @@ -478,11 +444,6 @@ public void testDateRangeQueryTimezone() throws IOException { Query parsedQuery = parseQuery(query).toQuery(context); assertThat(parsedQuery, instanceOf(DateRangeIncludingNowQuery.class)); parsedQuery = ((DateRangeIncludingNowQuery) parsedQuery).getQuery(); - assumeThat( - "Using Approximate Range Query as default", - FeatureFlags.isEnabled(FeatureFlags.APPROXIMATE_POINT_RANGE_QUERY), - is(true) - ); assertThat(parsedQuery, instanceOf(ApproximateScoreQuery.class)); parsedQuery = ((ApproximateScoreQuery) parsedQuery).getApproximationQuery(); assertThat(parsedQuery, instanceOf(ApproximateQuery.class)); diff --git a/server/src/test/java/org/opensearch/search/approximate/ApproximatePointRangeQueryTests.java b/server/src/test/java/org/opensearch/search/approximate/ApproximatePointRangeQueryTests.java index 5d9b523c35f82..4f8bab569db5c 100644 --- a/server/src/test/java/org/opensearch/search/approximate/ApproximatePointRangeQueryTests.java +++ b/server/src/test/java/org/opensearch/search/approximate/ApproximatePointRangeQueryTests.java @@ -60,11 +60,13 @@ public void testApproximateRangeEqualsActualRange() throws IOException { try { long lower = RandomNumbers.randomLongBetween(random(), -100, 200); long upper = lower + RandomNumbers.randomLongBetween(random(), 0, 100); - Query approximateQuery = new ApproximatePointRangeQuery("point", pack(lower).bytes, pack(upper).bytes, dims) { - protected String toString(int dimension, byte[] value) { - return Long.toString(LongPoint.decodeDimension(value, 0)); - } - }; + Query approximateQuery = new ApproximatePointRangeQuery( + "point", + pack(lower).bytes, + pack(upper).bytes, + dims, + ApproximatePointRangeQuery.LONG_FORMAT + ); Query query = LongPoint.newRangeQuery("point", lower, upper); IndexSearcher searcher = new IndexSearcher(reader); TopDocs topDocs = searcher.search(approximateQuery, 10); @@ -100,11 +102,13 @@ public void testApproximateRangeWithDefaultSize() throws IOException { try { long lower = 0; long upper = 1000; - Query approximateQuery = new ApproximatePointRangeQuery("point", pack(lower).bytes, pack(upper).bytes, dims) { - protected String toString(int dimension, byte[] value) { - return Long.toString(LongPoint.decodeDimension(value, 0)); - } - }; + Query approximateQuery = new ApproximatePointRangeQuery( + "point", + pack(lower).bytes, + pack(upper).bytes, + dims, + ApproximatePointRangeQuery.LONG_FORMAT + ); IndexSearcher searcher = new IndexSearcher(reader); TopDocs topDocs = searcher.search(approximateQuery, 10); assertEquals(topDocs.totalHits, new TotalHits(1000, TotalHits.Relation.EQUAL_TO)); @@ -138,11 +142,15 @@ public void testApproximateRangeWithSizeUnderDefault() throws IOException { try { long lower = 0; long upper = 45; - Query approximateQuery = new ApproximatePointRangeQuery("point", pack(lower).bytes, pack(upper).bytes, dims, 10) { - protected String toString(int dimension, byte[] value) { - return Long.toString(LongPoint.decodeDimension(value, 0)); - } - }; + Query approximateQuery = new ApproximatePointRangeQuery( + "point", + pack(lower).bytes, + pack(upper).bytes, + dims, + 10, + null, + ApproximatePointRangeQuery.LONG_FORMAT + ); IndexSearcher searcher = new IndexSearcher(reader); TopDocs topDocs = searcher.search(approximateQuery, 10); assertEquals(topDocs.totalHits, new TotalHits(10, TotalHits.Relation.EQUAL_TO)); @@ -182,12 +190,10 @@ public void testApproximateRangeWithSizeOverDefault() throws IOException { pack(lower).bytes, pack(upper).bytes, dims, - 11_000 - ) { - protected String toString(int dimension, byte[] value) { - return Long.toString(LongPoint.decodeDimension(value, 0)); - } - }; + 11_000, + null, + ApproximatePointRangeQuery.LONG_FORMAT + ); IndexSearcher searcher = new IndexSearcher(reader); TopDocs topDocs = searcher.search(approximateQuery, 11000); @@ -228,11 +234,15 @@ public void testApproximateRangeShortCircuit() throws IOException { try { long lower = 0; long upper = 100; - Query approximateQuery = new ApproximatePointRangeQuery("point", pack(lower).bytes, pack(upper).bytes, dims, 10) { - protected String toString(int dimension, byte[] value) { - return Long.toString(LongPoint.decodeDimension(value, 0)); - } - }; + Query approximateQuery = new ApproximatePointRangeQuery( + "point", + pack(lower).bytes, + pack(upper).bytes, + dims, + 10, + null, + ApproximatePointRangeQuery.LONG_FORMAT + ); Query query = LongPoint.newRangeQuery("point", lower, upper); IndexSearcher searcher = new IndexSearcher(reader); @@ -278,12 +288,9 @@ public void testApproximateRangeShortCircuitAscSort() throws IOException { pack(upper).bytes, dims, 10, - SortOrder.ASC - ) { - protected String toString(int dimension, byte[] value) { - return Long.toString(LongPoint.decodeDimension(value, 0)); - } - }; + SortOrder.ASC, + ApproximatePointRangeQuery.LONG_FORMAT + ); Query query = LongPoint.newRangeQuery("point", lower, upper); IndexSearcher searcher = new IndexSearcher(reader); @@ -312,11 +319,13 @@ protected String toString(int dimension, byte[] value) { } public void testSize() { - ApproximatePointRangeQuery query = new ApproximatePointRangeQuery("point", pack(0).bytes, pack(20).bytes, 1) { - protected String toString(int dimension, byte[] value) { - return Long.toString(LongPoint.decodeDimension(value, 0)); - } - }; + ApproximatePointRangeQuery query = new ApproximatePointRangeQuery( + "point", + pack(0).bytes, + pack(20).bytes, + 1, + ApproximatePointRangeQuery.LONG_FORMAT + ); assertEquals(query.getSize(), 10_000); query.setSize(100); @@ -325,11 +334,13 @@ protected String toString(int dimension, byte[] value) { } public void testSortOrder() { - ApproximatePointRangeQuery query = new ApproximatePointRangeQuery("point", pack(0).bytes, pack(20).bytes, 1) { - protected String toString(int dimension, byte[] value) { - return Long.toString(LongPoint.decodeDimension(value, 0)); - } - }; + ApproximatePointRangeQuery query = new ApproximatePointRangeQuery( + "point", + pack(0).bytes, + pack(20).bytes, + 1, + ApproximatePointRangeQuery.LONG_FORMAT + ); assertNull(query.getSortOrder()); query.setSortOrder(SortOrder.ASC); @@ -337,19 +348,23 @@ protected String toString(int dimension, byte[] value) { } public void testCanApproximate() { - ApproximatePointRangeQuery query = new ApproximatePointRangeQuery("point", pack(0).bytes, pack(20).bytes, 1) { - protected String toString(int dimension, byte[] value) { - return Long.toString(LongPoint.decodeDimension(value, 0)); - } - }; + ApproximatePointRangeQuery query = new ApproximatePointRangeQuery( + "point", + pack(0).bytes, + pack(20).bytes, + 1, + ApproximatePointRangeQuery.LONG_FORMAT + ); assertFalse(query.canApproximate(null)); - ApproximatePointRangeQuery queryCanApproximate = new ApproximatePointRangeQuery("point", pack(0).bytes, pack(20).bytes, 1) { - protected String toString(int dimension, byte[] value) { - return Long.toString(LongPoint.decodeDimension(value, 0)); - } - + ApproximatePointRangeQuery queryCanApproximate = new ApproximatePointRangeQuery( + "point", + pack(0).bytes, + pack(20).bytes, + 1, + ApproximatePointRangeQuery.LONG_FORMAT + ) { public boolean canApproximate(SearchContext context) { return true; } diff --git a/server/src/test/java/org/opensearch/search/approximate/ApproximateScoreQueryTests.java b/server/src/test/java/org/opensearch/search/approximate/ApproximateScoreQueryTests.java index aa45ea6744227..5af49efb30f68 100644 --- a/server/src/test/java/org/opensearch/search/approximate/ApproximateScoreQueryTests.java +++ b/server/src/test/java/org/opensearch/search/approximate/ApproximateScoreQueryTests.java @@ -46,12 +46,9 @@ protected String toString(int dimension, byte[] value) { "test-index", pack(new long[] { l }).bytes, pack(new long[] { u }).bytes, - new long[] { l }.length - ) { - protected String toString(int dimension, byte[] value) { - return Long.toString(LongPoint.decodeDimension(value, 0)); - } - }; + new long[] { l }.length, + ApproximatePointRangeQuery.LONG_FORMAT + ); ApproximateScoreQuery query = new ApproximateScoreQuery(originalQuery, approximateQuery); query.resolvedQuery = approximateQuery; From 967eee15eeb4219f2e7e5275c132cf86deff226a Mon Sep 17 00:00:00 2001 From: Sandeep Kumawat <2025sandeepkumawat@gmail.com> Date: Thu, 10 Apr 2025 13:41:33 +0530 Subject: [PATCH 187/550] Fix flaky test SegmentReplicationIT.testReplicaAlreadyAtCheckpoint (#17216) * Fix flaky test SegmentReplicationIT.testReplicaAlreadyAtCheckpoint Signed-off-by: skumwt * Fix flaky test SegmentReplicationIT.testReplicaAlreadyAtCheckpoint Signed-off-by: Sandeep Kumawat Signed-off-by: skumwt --------- Signed-off-by: skumwt Signed-off-by: Sandeep Kumawat Co-authored-by: skumwt --- .../replication/SegmentReplicationIT.java | 26 ++++++++++++++----- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java index 02637a1758a8d..608ae2e215b31 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java @@ -1892,17 +1892,29 @@ public void testReplicaAlreadyAtCheckpoint() throws Exception { // index a doc. client().prepareIndex(INDEX_NAME).setId("1").setSource("foo", randomInt()).get(); refresh(INDEX_NAME); + waitForSearchableDocs(1, primaryNode, replicaNode, replicaNode2); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNode)); ensureYellowAndNoInitializingShards(INDEX_NAME); - IndexShard replica_1 = getIndexShard(replicaNode, INDEX_NAME); - IndexShard replica_2 = getIndexShard(replicaNode2, INDEX_NAME); + AtomicReference replica_1 = new AtomicReference<>(); + AtomicReference replica_2 = new AtomicReference<>(); // wait until a replica is promoted & finishes engine flip, we don't care which one AtomicReference primary = new AtomicReference<>(); assertBusy(() -> { - assertTrue("replica should be promoted as a primary", replica_1.routingEntry().primary() || replica_2.routingEntry().primary()); - primary.set(replica_1.routingEntry().primary() ? replica_1 : replica_2); - }); + IndexShard replicaShard1 = getIndexShard(replicaNode, INDEX_NAME); + IndexShard replicaShard2 = getIndexShard(replicaNode2, INDEX_NAME); + + assertNotNull("Replica shard 1 should not be null", replicaShard1); + assertNotNull("Replica shard 2 should not be null", replicaShard2); + + replica_1.set(replicaShard1); + replica_2.set(replicaShard2); + assertTrue( + "replica should be promoted as a primary", + replica_1.get().routingEntry().primary() || replica_2.get().routingEntry().primary() + ); + primary.set(replica_1.get().routingEntry().primary() ? replica_1.get() : replica_2.get()); + }, 60, TimeUnit.SECONDS); FlushRequest request = new FlushRequest(INDEX_NAME); request.force(true); @@ -1910,8 +1922,8 @@ public void testReplicaAlreadyAtCheckpoint() throws Exception { assertBusy(() -> { assertEquals( - replica_1.getLatestReplicationCheckpoint().getSegmentInfosVersion(), - replica_2.getLatestReplicationCheckpoint().getSegmentInfosVersion() + replica_1.get().getLatestReplicationCheckpoint().getSegmentInfosVersion(), + replica_2.get().getLatestReplicationCheckpoint().getSegmentInfosVersion() ); }); From 18a3b75fb14217d39700fba367617d37f723293d Mon Sep 17 00:00:00 2001 From: Finn Date: Thu, 10 Apr 2025 04:43:22 -0700 Subject: [PATCH 188/550] Enable TLS for SecureNetty4GrpcServerTransport (#17796) - Adds SecureAuxTransportSettingsProvider to provide aux transports access to a javax SSLContext and cipher/client auth params for configuring TLS. - Implements SecureNetty4GrpcServerTransport to consume a SecureAuxTransportSettingsProvider for a TLS enabled gRPC transport. - Add aux transport type settings and port setttings for new secure transport. - Add logic to detect and register secure aux transports provided by plugins. - Integration tests for SecureNetty4GrpcServerTransport basic client cert authentication. Signed-off-by: Finn Carroll --- CHANGELOG.md | 1 + .../arrow/flight/bootstrap/FlightService.java | 1 + plugins/transport-grpc/README.md | 42 ++++ plugins/transport-grpc/build.gradle | 11 +- ....java => Netty4GrpcServerTransportIT.java} | 45 +++- .../SecureNetty4GrpcServerTransportIT.java | 223 ++++++++++++++++++ .../plugin/transport/grpc/GrpcPlugin.java | 41 +++- .../grpc/Netty4GrpcServerTransport.java | 47 +++- .../ssl/SecureNetty4GrpcServerTransport.java | 130 ++++++++++ .../transport/grpc/ssl/package-info.java | 12 + .../transport/grpc/GrpcPluginTests.java | 28 ++- .../grpc/Netty4GrpcServerTransportTests.java | 56 +++-- .../transport/grpc/ssl/NettyGrpcClient.java | 168 +++++++++++++ .../SecureNetty4GrpcServerTransportTests.java | 157 ++++++++++++ .../grpc/ssl/SecureSettingsHelpers.java | 169 +++++++++++++ .../src/test/resources/README.txt | 26 ++ .../test/resources/netty4-client-secure.jks | Bin 0 -> 2772 bytes .../test/resources/netty4-server-secure.jks | Bin 0 -> 2772 bytes .../common/network/NetworkModule.java | 31 +++ .../org/opensearch/plugins/NetworkPlugin.java | 22 ++ .../SecureAuxTransportSettingsProvider.java | 52 ++++ .../plugins/SecureSettingsFactory.java | 7 + .../transport/TransportAdapterProvider.java | 2 +- .../common/network/NetworkModuleTests.java | 7 + 24 files changed, 1239 insertions(+), 39 deletions(-) create mode 100644 plugins/transport-grpc/README.md rename plugins/transport-grpc/src/internalClusterTest/java/org/opensearch/plugin/transport/grpc/{GrpcTransportIT.java => Netty4GrpcServerTransportIT.java} (52%) create mode 100644 plugins/transport-grpc/src/internalClusterTest/java/org/opensearch/plugin/transport/grpc/ssl/SecureNetty4GrpcServerTransportIT.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/ssl/SecureNetty4GrpcServerTransport.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/ssl/package-info.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/ssl/NettyGrpcClient.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/ssl/SecureNetty4GrpcServerTransportTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/ssl/SecureSettingsHelpers.java create mode 100644 plugins/transport-grpc/src/test/resources/README.txt create mode 100644 plugins/transport-grpc/src/test/resources/netty4-client-secure.jks create mode 100644 plugins/transport-grpc/src/test/resources/netty4-server-secure.jks create mode 100644 server/src/main/java/org/opensearch/plugins/SecureAuxTransportSettingsProvider.java diff --git a/CHANGELOG.md b/CHANGELOG.md index eb19d6d86f7f4..aa4591734896f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Security Manager Replacement] Enhance Java Agent to intercept Runtime::halt ([#17757](https://github.com/opensearch-project/OpenSearch/pull/17757)) - [Security Manager Replacement] Phase off SecurityManager usage in favor of Java Agent ([#17861](https://github.com/opensearch-project/OpenSearch/pull/17861)) - Support AutoExpand for SearchReplica ([#17741](https://github.com/opensearch-project/OpenSearch/pull/17741)) +- Add TLS enabled SecureNetty4GrpcServerTransport ([#17796](https://github.com/opensearch-project/OpenSearch/pull/17796)) - Implement fixed interval refresh task scheduling ([#17777](https://github.com/opensearch-project/OpenSearch/pull/17777)) - [Tiered caching] Create a single cache manager for all the disk caches. ([#17513](https://github.com/opensearch-project/OpenSearch/pull/17513)) - Add GRPC DocumentService and Bulk endpoint ([#17727](https://github.com/opensearch-project/OpenSearch/pull/17727)) diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/FlightService.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/FlightService.java index 7735fc3df73e0..fdcbbf43d75bf 100644 --- a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/FlightService.java +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/FlightService.java @@ -134,6 +134,7 @@ public StreamManager getStreamManager() { * Retrieves the bound address of the FlightService. * @return The BoundTransportAddress instance. */ + @Override public BoundTransportAddress getBoundAddress() { return serverComponents.getBoundAddress(); } diff --git a/plugins/transport-grpc/README.md b/plugins/transport-grpc/README.md new file mode 100644 index 0000000000000..59c9bc94205b5 --- /dev/null +++ b/plugins/transport-grpc/README.md @@ -0,0 +1,42 @@ +# transport-grpc + +An auxiliary transport which runs in parallel to the REST API. +The `transport-grpc` plugin initializes a new client/server transport implementing a gRPC protocol on Netty4. + +Enable this transport with: + +``` +setting 'aux.transport.types', '[experimental-transport-grpc]' +setting 'aux.transport.experimental-transport-grpc.port', '9400-9500' //optional +``` + +For the secure transport: + +``` +setting 'aux.transport.types', '[experimental-secure-transport-grpc]' +setting 'aux.transport.experimental-secure-transport-grpc.port', '9400-9500' //optional +``` + +Other settings are agnostic as to the gRPC transport type: + +``` +setting 'grpc.publish_port', '9400' +setting 'grpc.host', '["0.0.0.0"]' +setting 'grpc.bind_host', '["0.0.0.0", "::", "10.0.0.1"]' +setting 'grpc.publish_host', '["thisnode.example.com"]' +setting 'grpc.netty.worker_count', '2' +``` + +## Testing + +### Unit Tests + +``` +./gradlew :plugins:transport-grpc:test +``` + +### Integration Tests + +``` +./gradlew :plugins:transport-grpc:internalClusterTest +``` diff --git a/plugins/transport-grpc/build.gradle b/plugins/transport-grpc/build.gradle index 3beed0ddc1bb0..12cbf0ecf76cf 100644 --- a/plugins/transport-grpc/build.gradle +++ b/plugins/transport-grpc/build.gradle @@ -1,5 +1,3 @@ -import org.gradle.api.attributes.java.TargetJvmEnvironment - /* * SPDX-License-Identifier: Apache-2.0 * @@ -8,6 +6,7 @@ import org.gradle.api.attributes.java.TargetJvmEnvironment * compatible open source license. */ +apply plugin: 'opensearch.testclusters' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { @@ -15,6 +14,13 @@ opensearchplugin { classname = 'org.opensearch.plugin.transport.grpc.GrpcPlugin' } +testClusters { + integTest { + plugin(project.path) + setting 'aux.transport.types', '[experimental-transport-grpc]' + } +} + dependencies { compileOnly "com.google.code.findbugs:jsr305:3.0.2" runtimeOnly "com.google.guava:guava:${versions.guava}" @@ -30,6 +36,7 @@ dependencies { implementation "io.grpc:grpc-util:${versions.grpc}" implementation "io.perfmark:perfmark-api:0.26.0" implementation "org.opensearch:protobufs:0.2.0" + testImplementation project(':test:framework') } tasks.named("dependencyLicenses").configure { diff --git a/plugins/transport-grpc/src/internalClusterTest/java/org/opensearch/plugin/transport/grpc/GrpcTransportIT.java b/plugins/transport-grpc/src/internalClusterTest/java/org/opensearch/plugin/transport/grpc/Netty4GrpcServerTransportIT.java similarity index 52% rename from plugins/transport-grpc/src/internalClusterTest/java/org/opensearch/plugin/transport/grpc/GrpcTransportIT.java rename to plugins/transport-grpc/src/internalClusterTest/java/org/opensearch/plugin/transport/grpc/Netty4GrpcServerTransportIT.java index a5e40c16b323e..48925feeb4464 100644 --- a/plugins/transport-grpc/src/internalClusterTest/java/org/opensearch/plugin/transport/grpc/GrpcTransportIT.java +++ b/plugins/transport-grpc/src/internalClusterTest/java/org/opensearch/plugin/transport/grpc/Netty4GrpcServerTransportIT.java @@ -8,35 +8,45 @@ package org.opensearch.plugin.transport.grpc; +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; +import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.network.NetworkAddress; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.plugin.transport.grpc.ssl.NettyGrpcClient; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; import java.net.InetSocketAddress; +import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.List; + +import io.grpc.health.v1.HealthCheckResponse; import static org.opensearch.plugin.transport.grpc.Netty4GrpcServerTransport.GRPC_TRANSPORT_SETTING_KEY; -import static org.opensearch.plugin.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_PORT; import static org.opensearch.plugins.NetworkPlugin.AuxTransport.AUX_TRANSPORT_TYPES_KEY; -@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 2) -public class GrpcTransportIT extends OpenSearchIntegTestCase { +public class Netty4GrpcServerTransportIT extends OpenSearchIntegTestCase { - @Override - protected Collection> nodePlugins() { - return Collections.singletonList(GrpcPlugin.class); + private TransportAddress randomNetty4GrpcServerTransportAddr() { + List addresses = new ArrayList<>(); + for (Netty4GrpcServerTransport transport : internalCluster().getInstances(Netty4GrpcServerTransport.class)) { + TransportAddress tAddr = new TransportAddress(transport.getBoundAddress().publishAddress().address()); + addresses.add(tAddr); + } + return randomFrom(addresses); } @Override protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put(SETTING_GRPC_PORT.getKey(), "0") - .put(AUX_TRANSPORT_TYPES_KEY, GRPC_TRANSPORT_SETTING_KEY) - .build(); + return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(AUX_TRANSPORT_TYPES_KEY, GRPC_TRANSPORT_SETTING_KEY).build(); + } + + @Override + protected Collection> nodePlugins() { + return Collections.singleton(GrpcPlugin.class); } public void testGrpcTransportStarted() { @@ -46,7 +56,7 @@ public void testGrpcTransportStarted() { assertNotNull("gRPC transport should be started on node " + nodeName, transport); // Verify that the transport is bound to an address - TransportAddress[] boundAddresses = transport.boundAddress().boundAddresses(); + TransportAddress[] boundAddresses = transport.getBoundAddress().boundAddresses(); assertTrue("gRPC transport should be bound to at least one address", boundAddresses.length > 0); // Log the bound addresses for debugging @@ -56,4 +66,15 @@ public void testGrpcTransportStarted() { } } } + + public void testStartGrpcTransportClusterHealth() throws Exception { + // REST api cluster health + ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().get(); + assertEquals(ClusterHealthStatus.GREEN, healthResponse.getStatus()); + + // gRPC transport service health + try (NettyGrpcClient client = new NettyGrpcClient.Builder().setAddress(randomNetty4GrpcServerTransportAddr()).build()) { + assertEquals(client.checkHealth(), HealthCheckResponse.ServingStatus.SERVING); + } + } } diff --git a/plugins/transport-grpc/src/internalClusterTest/java/org/opensearch/plugin/transport/grpc/ssl/SecureNetty4GrpcServerTransportIT.java b/plugins/transport-grpc/src/internalClusterTest/java/org/opensearch/plugin/transport/grpc/ssl/SecureNetty4GrpcServerTransportIT.java new file mode 100644 index 0000000000000..0027e29e8c239 --- /dev/null +++ b/plugins/transport-grpc/src/internalClusterTest/java/org/opensearch/plugin/transport/grpc/ssl/SecureNetty4GrpcServerTransportIT.java @@ -0,0 +1,223 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.ssl; + +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; +import org.opensearch.cluster.health.ClusterHealthStatus; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.plugin.transport.grpc.GrpcPlugin; +import org.opensearch.plugins.NetworkPlugin; +import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.SecureAuxTransportSettingsProvider; +import org.opensearch.plugins.SecureHttpTransportSettingsProvider; +import org.opensearch.plugins.SecureSettingsFactory; +import org.opensearch.plugins.SecureTransportSettingsProvider; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.transport.grpc.ssl.SecureNetty4GrpcServerTransport; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Optional; + +import io.grpc.health.v1.HealthCheckResponse; + +import static org.opensearch.plugin.transport.grpc.ssl.SecureSettingsHelpers.getServerClientAuthNone; +import static org.opensearch.plugin.transport.grpc.ssl.SecureSettingsHelpers.getServerClientAuthOptional; +import static org.opensearch.plugin.transport.grpc.ssl.SecureSettingsHelpers.getServerClientAuthRequired; +import static org.opensearch.plugins.NetworkPlugin.AuxTransport.AUX_TRANSPORT_TYPES_KEY; +import static org.opensearch.transport.grpc.ssl.SecureNetty4GrpcServerTransport.GRPC_SECURE_TRANSPORT_SETTING_KEY; + +public abstract class SecureNetty4GrpcServerTransportIT extends OpenSearchIntegTestCase { + + public static class MockSecurityPlugin extends Plugin implements NetworkPlugin { + public MockSecurityPlugin() {} + + static class MockSecureSettingsFactory implements SecureSettingsFactory { + @Override + public Optional getSecureTransportSettingsProvider(Settings settings) { + return Optional.empty(); + } + + @Override + public Optional getSecureHttpTransportSettingsProvider(Settings settings) { + return Optional.empty(); + } + + @Override + public Optional getSecureAuxTransportSettingsProvider(Settings settings) { + return Optional.empty(); + } + } + } + + protected TransportAddress randomNetty4GrpcServerTransportAddr() { + List addresses = new ArrayList<>(); + for (SecureNetty4GrpcServerTransport transport : internalCluster().getInstances(SecureNetty4GrpcServerTransport.class)) { + TransportAddress tAddr = new TransportAddress(transport.getBoundAddress().publishAddress().address()); + addresses.add(tAddr); + } + return randomFrom(addresses); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(AUX_TRANSPORT_TYPES_KEY, GRPC_SECURE_TRANSPORT_SETTING_KEY) + .build(); + } + + @Override + protected Collection> nodePlugins() { + return List.of(GrpcPlugin.class, MockSecurityPlugin.class); + } + + private SecureSettingsHelpers.ConnectExceptions tryConnectClient(NettyGrpcClient client) { + try { + HealthCheckResponse.ServingStatus status = client.checkHealth(); + if (status == HealthCheckResponse.ServingStatus.SERVING) { + return SecureSettingsHelpers.ConnectExceptions.NONE; + } else { + throw new RuntimeException("Illegal state - unexpected server status: " + status.toString()); + } + } catch (Exception e) { + return SecureSettingsHelpers.ConnectExceptions.get(e); + } + } + + protected SecureSettingsHelpers.ConnectExceptions plaintextClientConnect() throws Exception { + try (NettyGrpcClient client = new NettyGrpcClient.Builder().setAddress(randomNetty4GrpcServerTransportAddr()).build()) { + return tryConnectClient(client); + } + } + + protected SecureSettingsHelpers.ConnectExceptions insecureClientConnect() throws Exception { + try ( + NettyGrpcClient client = new NettyGrpcClient.Builder().setAddress(randomNetty4GrpcServerTransportAddr()).insecure(true).build() + ) { + return tryConnectClient(client); + } + } + + protected SecureSettingsHelpers.ConnectExceptions trustedCertClientConnect() throws Exception { + try ( + NettyGrpcClient client = new NettyGrpcClient.Builder().setAddress(randomNetty4GrpcServerTransportAddr()) + .clientAuth(true) + .build() + ) { + return tryConnectClient(client); + } + } + + public void testClusterHealth() { + ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().get(); + assertEquals(ClusterHealthStatus.GREEN, healthResponse.getStatus()); + } + + public static class SecureNetty4GrpcServerTransportNoAuthIT extends SecureNetty4GrpcServerTransportIT { + public static class NoAuthMockSecurityPlugin extends MockSecurityPlugin { + public NoAuthMockSecurityPlugin() {} + + @Override + public Optional getSecureSettingFactory(Settings settings) { + return Optional.of(new MockSecureSettingsFactory() { + @Override + public Optional getSecureAuxTransportSettingsProvider(Settings settings) { + return Optional.of(getServerClientAuthNone()); + } + }); + } + } + + @Override + protected Collection> nodePlugins() { + return List.of(GrpcPlugin.class, NoAuthMockSecurityPlugin.class); + } + + public void testPlaintextClientConnect() throws Exception { + assertEquals(plaintextClientConnect(), SecureSettingsHelpers.ConnectExceptions.UNAVAILABLE); + } + + public void testInsecureClientConnect() throws Exception { + assertEquals(insecureClientConnect(), SecureSettingsHelpers.ConnectExceptions.NONE); + } + + public void testTrustedClientConnect() throws Exception { + assertEquals(trustedCertClientConnect(), SecureSettingsHelpers.ConnectExceptions.NONE); + } + } + + public static class SecureNetty4GrpcServerTransportOptionalAuthIT extends SecureNetty4GrpcServerTransportIT { + public static class OptAuthMockSecurityPlugin extends MockSecurityPlugin { + public OptAuthMockSecurityPlugin() {} + + @Override + public Optional getSecureSettingFactory(Settings settings) { + return Optional.of(new MockSecureSettingsFactory() { + @Override + public Optional getSecureAuxTransportSettingsProvider(Settings settings) { + return Optional.of(getServerClientAuthOptional()); + } + }); + } + } + + @Override + protected Collection> nodePlugins() { + return List.of(GrpcPlugin.class, OptAuthMockSecurityPlugin.class); + } + + public void testPlaintextClientConnect() throws Exception { + assertEquals(plaintextClientConnect(), SecureSettingsHelpers.ConnectExceptions.UNAVAILABLE); + } + + public void testInsecureClientConnect() throws Exception { + assertEquals(insecureClientConnect(), SecureSettingsHelpers.ConnectExceptions.NONE); + } + + public void testTrustedClientConnect() throws Exception { + assertEquals(trustedCertClientConnect(), SecureSettingsHelpers.ConnectExceptions.NONE); + } + } + + public static class SecureNetty4GrpcServerTransportRequiredAuthIT extends SecureNetty4GrpcServerTransportIT { + public static class RequireAuthMockSecurityPlugin extends MockSecurityPlugin { + public RequireAuthMockSecurityPlugin() {} + + @Override + public Optional getSecureSettingFactory(Settings settings) { + return Optional.of(new MockSecureSettingsFactory() { + @Override + public Optional getSecureAuxTransportSettingsProvider(Settings settings) { + return Optional.of(getServerClientAuthRequired()); + } + }); + } + } + + @Override + protected Collection> nodePlugins() { + return List.of(GrpcPlugin.class, RequireAuthMockSecurityPlugin.class); + } + + public void testPlaintextClientConnect() throws Exception { + assertEquals(plaintextClientConnect(), SecureSettingsHelpers.ConnectExceptions.UNAVAILABLE); + } + + public void testInsecureClientConnect() throws Exception { + assertEquals(insecureClientConnect(), SecureSettingsHelpers.ConnectExceptions.BAD_CERT); + } + + public void testTrustedClientConnect() throws Exception { + assertEquals(trustedCertClientConnect(), SecureSettingsHelpers.ConnectExceptions.NONE); + } + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/GrpcPlugin.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/GrpcPlugin.java index 26e9721da4f44..d552e56b0f71b 100644 --- a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/GrpcPlugin.java +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/GrpcPlugin.java @@ -22,11 +22,13 @@ import org.opensearch.plugin.transport.grpc.services.SearchServiceImpl; import org.opensearch.plugins.NetworkPlugin; import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.SecureAuxTransportSettingsProvider; import org.opensearch.repositories.RepositoriesService; import org.opensearch.script.ScriptService; import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.client.Client; +import org.opensearch.transport.grpc.ssl.SecureNetty4GrpcServerTransport; import org.opensearch.watcher.ResourceWatcherService; import java.util.Collection; @@ -44,6 +46,8 @@ import static org.opensearch.plugin.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_PUBLISH_HOST; import static org.opensearch.plugin.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_PUBLISH_PORT; import static org.opensearch.plugin.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_WORKER_COUNT; +import static org.opensearch.transport.grpc.ssl.SecureNetty4GrpcServerTransport.GRPC_SECURE_TRANSPORT_SETTING_KEY; +import static org.opensearch.transport.grpc.ssl.SecureNetty4GrpcServerTransport.SETTING_GRPC_SECURE_PORT; /** * Main class for the gRPC plugin. @@ -88,13 +92,47 @@ public Map> getAuxTransports( ); } + /** + * Provides secure auxiliary transports for the plugin. + * Registered under a distinct key from gRPC transport. + * Consumes pluggable security settings as provided by a SecureAuxTransportSettingsProvider. + * + * @param settings The node settings + * @param threadPool The thread pool + * @param circuitBreakerService The circuit breaker service + * @param networkService The network service + * @param clusterSettings The cluster settings + * @param tracer The tracer + * @param secureAuxTransportSettingsProvider provides ssl context params + * @return A map of transport names to transport suppliers + */ + @Override + public Map> getSecureAuxTransports( + Settings settings, + ThreadPool threadPool, + CircuitBreakerService circuitBreakerService, + NetworkService networkService, + ClusterSettings clusterSettings, + SecureAuxTransportSettingsProvider secureAuxTransportSettingsProvider, + Tracer tracer + ) { + if (client == null) { + throw new RuntimeException("client cannot be null"); + } + List grpcServices = registerGRPCServices(new DocumentServiceImpl(client), new SearchServiceImpl(client)); + return Collections.singletonMap( + GRPC_SECURE_TRANSPORT_SETTING_KEY, + () -> new SecureNetty4GrpcServerTransport(settings, grpcServices, networkService, secureAuxTransportSettingsProvider) + ); + } + /** * Registers gRPC services to be exposed by the transport. * * @param services The gRPC services to register * @return A list of registered bindable services */ - protected List registerGRPCServices(BindableService... services) { + private List registerGRPCServices(BindableService... services) { return List.of(services); } @@ -107,6 +145,7 @@ protected List registerGRPCServices(BindableService... services public List> getSettings() { return List.of( SETTING_GRPC_PORT, + SETTING_GRPC_SECURE_PORT, SETTING_GRPC_HOST, SETTING_GRPC_PUBLISH_HOST, SETTING_GRPC_BIND_HOST, diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/Netty4GrpcServerTransport.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/Netty4GrpcServerTransport.java index 622834401970e..3107f762603f5 100644 --- a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/Netty4GrpcServerTransport.java +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/Netty4GrpcServerTransport.java @@ -32,9 +32,9 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; +import java.util.function.UnaryOperator; import io.grpc.BindableService; -import io.grpc.InsecureServerCredentials; import io.grpc.Server; import io.grpc.netty.shaded.io.grpc.netty.NettyServerBuilder; import io.grpc.netty.shaded.io.netty.channel.EventLoopGroup; @@ -115,14 +115,29 @@ public class Netty4GrpcServerTransport extends NetworkPlugin.AuxTransport { Setting.Property.NodeScope ); - private final Settings settings; + /** + * Port range on which servers bind. + */ + protected PortsRange port; + + /** + * Port settings are set using the transport type, in this case GRPC_TRANSPORT_SETTING_KEY. + * Child classes have distinct transport type keys and need to override these settings. + */ + protected String portSettingKey; + + /** + * Settings. + */ + protected final Settings settings; + private final NetworkService networkService; private final List services; - private final CopyOnWriteArrayList servers = new CopyOnWriteArrayList<>(); private final String[] bindHosts; private final String[] publishHosts; - private final PortsRange port; private final int nettyEventLoopThreads; + private final CopyOnWriteArrayList servers = new CopyOnWriteArrayList<>(); + private final List> serverBuilderConfigs = new ArrayList<>(); private volatile BoundTransportAddress boundAddress; private volatile EventLoopGroup eventLoopGroup; @@ -150,12 +165,23 @@ public Netty4GrpcServerTransport(Settings settings, List servic this.port = SETTING_GRPC_PORT.get(settings); this.nettyEventLoopThreads = SETTING_GRPC_WORKER_COUNT.get(settings); + this.portSettingKey = SETTING_GRPC_PORT.getKey(); } - BoundTransportAddress boundAddress() { + // public for tests + @Override + public BoundTransportAddress getBoundAddress() { return this.boundAddress; } + /** + * Inject a NettyServerBuilder configuration to be applied at server bind and start. + * @param configModifier builder configuration to set. + */ + protected void addServerConfig(UnaryOperator configModifier) { + serverBuilderConfigs.add(configModifier); + } + /** * Starts the gRPC server transport. * Initializes the event loop group and binds the server to the configured addresses. @@ -210,7 +236,7 @@ protected void doStop() { */ @Override protected void doClose() { - + eventLoopGroup.close(); } private void bindServer() { @@ -242,7 +268,7 @@ private void bindServer() { + publishInetAddress + "). " + "Please specify a unique port by setting " - + SETTING_GRPC_PORT.getKey() + + portSettingKey + " or " + SETTING_GRPC_PUBLISH_PORT.getKey() ); @@ -261,13 +287,18 @@ private TransportAddress bindAddress(InetAddress hostAddress, PortsRange portRan try { final InetSocketAddress address = new InetSocketAddress(hostAddress, portNumber); - final NettyServerBuilder serverBuilder = NettyServerBuilder.forAddress(address, InsecureServerCredentials.create()) + final NettyServerBuilder serverBuilder = NettyServerBuilder.forAddress(address) + .directExecutor() .bossEventLoopGroup(eventLoopGroup) .workerEventLoopGroup(eventLoopGroup) .channelType(NioServerSocketChannel.class) .addService(new HealthStatusManager().getHealthService()) .addService(ProtoReflectionService.newInstance()); + for (UnaryOperator op : serverBuilderConfigs) { + op.apply(serverBuilder); + } + services.forEach(serverBuilder::addService); Server srv = serverBuilder.build().start(); diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/ssl/SecureNetty4GrpcServerTransport.java b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/ssl/SecureNetty4GrpcServerTransport.java new file mode 100644 index 0000000000000..14e7fc2d8b227 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/ssl/SecureNetty4GrpcServerTransport.java @@ -0,0 +1,130 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.transport.grpc.ssl; + +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.transport.PortsRange; +import org.opensearch.plugin.transport.grpc.Netty4GrpcServerTransport; +import org.opensearch.plugins.SecureAuxTransportSettingsProvider; + +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLException; + +import java.security.NoSuchAlgorithmException; +import java.util.Collection; +import java.util.List; +import java.util.Locale; +import java.util.Optional; + +import io.grpc.BindableService; +import io.grpc.netty.shaded.io.grpc.netty.NettyServerBuilder; +import io.grpc.netty.shaded.io.netty.handler.ssl.ApplicationProtocolConfig; +import io.grpc.netty.shaded.io.netty.handler.ssl.ApplicationProtocolNames; +import io.grpc.netty.shaded.io.netty.handler.ssl.ClientAuth; +import io.grpc.netty.shaded.io.netty.handler.ssl.JdkSslContext; +import io.grpc.netty.shaded.io.netty.handler.ssl.SupportedCipherSuiteFilter; + +/** + * Netty4GrpcServerTransport with TLS enabled. + * Security settings injected through a SecureAuxTransportSettingsProvider. + */ +public class SecureNetty4GrpcServerTransport extends Netty4GrpcServerTransport { + private static final String[] DEFAULT_SSL_PROTOCOLS = { "TLSv1.3", "TLSv1.2", "TLSv1.1" }; + + /** + * Type key to select secure transport. + */ + public static final String GRPC_SECURE_TRANSPORT_SETTING_KEY = "experimental-secure-transport-grpc"; + + /** + * Distinct port setting required as it depends on transport type key. + */ + public static final Setting SETTING_GRPC_SECURE_PORT = AUX_TRANSPORT_PORT.getConcreteSettingForNamespace( + GRPC_SECURE_TRANSPORT_SETTING_KEY + ); + + /** + * In the case no SecureAuxTransportParameters restrict client auth mode to REQUIRE. + * Assume no enabled cipher suites. Allow ssl context implementation to select defaults. + */ + private static class DefaultParameters implements SecureAuxTransportSettingsProvider.SecureAuxTransportParameters { + @Override + public Optional clientAuth() { + return Optional.of(ClientAuth.REQUIRE.name()); + } + + @Override + public Collection cipherSuites() { + return List.of(); + } + } + + /** + * Creates a new SecureNetty4GrpcServerTransport instance and inject a SecureAuxTransportSslContext + * into the NettyServerBuilder config to enable TLS on the server. + * @param settings the configured settings. + * @param services the gRPC compatible services to be registered with the server. + * @param networkService the bind/publish addresses. + * @param secureTransportSettingsProvider TLS configuration settings. + */ + public SecureNetty4GrpcServerTransport( + Settings settings, + List services, + NetworkService networkService, + SecureAuxTransportSettingsProvider secureTransportSettingsProvider + ) { + super(settings, services, networkService); + this.port = SecureNetty4GrpcServerTransport.SETTING_GRPC_SECURE_PORT.get(settings); + this.portSettingKey = SecureNetty4GrpcServerTransport.SETTING_GRPC_SECURE_PORT.getKey(); + try { + JdkSslContext ctxt = getSslContext(settings, secureTransportSettingsProvider); + this.addServerConfig((NettyServerBuilder builder) -> builder.sslContext(ctxt)); + } catch (Exception e) { + throw new RuntimeException("Failed to build SslContext for " + SecureNetty4GrpcServerTransport.class.getName(), e); + } + } + + /** + * Construct JdkSslContext, wrapping javax SSLContext as supplied by SecureAuxTransportSettingsProvider with applied + * configurations settings in SecureAuxTransportParameters for this transport. + * If optional SSLContext is empty, use default context as configured through JDK. + * If SecureAuxTransportParameters empty, set ClientAuth OPTIONAL and allow all default supported ciphers. + * @param settings the configured settings. + * @param provider for SSLContext and SecureAuxTransportParameters (ClientAuth and enabled ciphers). + */ + private JdkSslContext getSslContext(Settings settings, SecureAuxTransportSettingsProvider provider) throws SSLException { + Optional sslContext = provider.buildSecureAuxServerTransportContext(settings, this); + if (sslContext.isEmpty()) { + try { + sslContext = Optional.of(SSLContext.getDefault()); + } catch (NoSuchAlgorithmException e) { + throw new SSLException("Failed to build default SSLContext for " + SecureNetty4GrpcServerTransport.class.getName(), e); + } + } + SecureAuxTransportSettingsProvider.SecureAuxTransportParameters params = provider.parameters().orElseGet(DefaultParameters::new); + ClientAuth clientAuth = ClientAuth.valueOf(params.clientAuth().orElseThrow().toUpperCase(Locale.ROOT)); + return new JdkSslContext( + sslContext.get(), + false, + params.cipherSuites(), + SupportedCipherSuiteFilter.INSTANCE, + new ApplicationProtocolConfig( + ApplicationProtocolConfig.Protocol.ALPN, + ApplicationProtocolConfig.SelectorFailureBehavior.NO_ADVERTISE, + ApplicationProtocolConfig.SelectedListenerFailureBehavior.ACCEPT, + ApplicationProtocolNames.HTTP_2 // gRPC -> always http2 + ), + clientAuth, + DEFAULT_SSL_PROTOCOLS, + true + ); + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/ssl/package-info.java b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/ssl/package-info.java new file mode 100644 index 0000000000000..bffc3e762a0f4 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/ssl/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * gRPC transport for OpenSearch implementing TLS. + */ +package org.opensearch.transport.grpc.ssl; diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/GrpcPluginTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/GrpcPluginTests.java index 974602bce3278..31e6d9e25715c 100644 --- a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/GrpcPluginTests.java +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/GrpcPluginTests.java @@ -18,6 +18,7 @@ import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.client.Client; +import org.opensearch.transport.grpc.ssl.SecureNetty4GrpcServerTransport; import org.junit.Before; import java.util.List; @@ -34,6 +35,9 @@ import static org.opensearch.plugin.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_PUBLISH_HOST; import static org.opensearch.plugin.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_PUBLISH_PORT; import static org.opensearch.plugin.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_WORKER_COUNT; +import static org.opensearch.plugin.transport.grpc.ssl.SecureSettingsHelpers.getServerClientAuthNone; +import static org.opensearch.transport.grpc.ssl.SecureNetty4GrpcServerTransport.GRPC_SECURE_TRANSPORT_SETTING_KEY; +import static org.opensearch.transport.grpc.ssl.SecureNetty4GrpcServerTransport.SETTING_GRPC_SECURE_PORT; public class GrpcPluginTests extends OpenSearchTestCase { @@ -87,6 +91,7 @@ public void testGetSettings() { // Verify that all expected settings are returned assertTrue("SETTING_GRPC_PORT should be included", settings.contains(SETTING_GRPC_PORT)); + assertTrue("SETTING_GRPC_SECURE_PORT should be included", settings.contains(SETTING_GRPC_SECURE_PORT)); assertTrue("SETTING_GRPC_HOST should be included", settings.contains(SETTING_GRPC_HOST)); assertTrue("SETTING_GRPC_PUBLISH_HOST should be included", settings.contains(SETTING_GRPC_PUBLISH_HOST)); assertTrue("SETTING_GRPC_BIND_HOST should be included", settings.contains(SETTING_GRPC_BIND_HOST)); @@ -94,7 +99,7 @@ public void testGetSettings() { assertTrue("SETTING_GRPC_PUBLISH_PORT should be included", settings.contains(SETTING_GRPC_PUBLISH_PORT)); // Verify the number of settings - assertEquals("Should return 6 settings", 6, settings.size()); + assertEquals("Should return 7 settings", 7, settings.size()); } public void testGetAuxTransports() { @@ -116,4 +121,25 @@ public void testGetAuxTransports() { NetworkPlugin.AuxTransport transport = transports.get(GRPC_TRANSPORT_SETTING_KEY).get(); assertTrue("Should return a Netty4GrpcServerTransport instance", transport instanceof Netty4GrpcServerTransport); } + + public void testGetSecureAuxTransports() { + Settings settings = Settings.builder().put(SETTING_GRPC_SECURE_PORT.getKey(), "9200-9300").build(); + + Map> transports = plugin.getSecureAuxTransports( + settings, + threadPool, + circuitBreakerService, + networkService, + clusterSettings, + getServerClientAuthNone(), + tracer + ); + + // Verify that the transport map contains the expected key + assertTrue("Should contain GRPC_SECURE_TRANSPORT_SETTING_KEY", transports.containsKey(GRPC_SECURE_TRANSPORT_SETTING_KEY)); + + // Verify that the supplier returns a Netty4GrpcServerTransport instance + NetworkPlugin.AuxTransport transport = transports.get(GRPC_SECURE_TRANSPORT_SETTING_KEY).get(); + assertTrue("Should return a SecureNetty4GrpcServerTransport instance", transport instanceof SecureNetty4GrpcServerTransport); + } } diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/Netty4GrpcServerTransportTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/Netty4GrpcServerTransportTests.java index dcade2e8bf880..198b92dad672c 100644 --- a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/Netty4GrpcServerTransportTests.java +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/Netty4GrpcServerTransportTests.java @@ -12,6 +12,7 @@ import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.plugin.transport.grpc.ssl.NettyGrpcClient; import org.opensearch.test.OpenSearchTestCase; import org.hamcrest.MatcherAssert; import org.junit.Before; @@ -19,6 +20,7 @@ import java.util.List; import io.grpc.BindableService; +import io.grpc.health.v1.HealthCheckResponse; import static org.hamcrest.Matchers.emptyArray; import static org.hamcrest.Matchers.not; @@ -38,13 +40,39 @@ public void testBasicStartAndStop() { try (Netty4GrpcServerTransport transport = new Netty4GrpcServerTransport(createSettings(), services, networkService)) { transport.start(); - MatcherAssert.assertThat(transport.boundAddress().boundAddresses(), not(emptyArray())); - assertNotNull(transport.boundAddress().publishAddress().address()); + MatcherAssert.assertThat(transport.getBoundAddress().boundAddresses(), not(emptyArray())); + assertNotNull(transport.getBoundAddress().publishAddress().address()); transport.stop(); } } + public void testGrpcTransportHealthcheck() { + try (Netty4GrpcServerTransport transport = new Netty4GrpcServerTransport(createSettings(), services, networkService)) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.getBoundAddress().boundAddresses()); + try (NettyGrpcClient client = new NettyGrpcClient.Builder().setAddress(remoteAddress).build()) { + assertEquals(client.checkHealth(), HealthCheckResponse.ServingStatus.SERVING); + } + transport.stop(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public void testGrpcTransportListServices() { + try (Netty4GrpcServerTransport transport = new Netty4GrpcServerTransport(createSettings(), services, networkService)) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.getBoundAddress().boundAddresses()); + try (NettyGrpcClient client = new NettyGrpcClient.Builder().setAddress(remoteAddress).build()) { + assertTrue(client.listServices().get().size() > 1); + } + transport.stop(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + public void testWithCustomPort() { // Create settings with a specific port Settings settings = Settings.builder().put(Netty4GrpcServerTransport.SETTING_GRPC_PORT.getKey(), "9000-9010").build(); @@ -52,8 +80,8 @@ public void testWithCustomPort() { try (Netty4GrpcServerTransport transport = new Netty4GrpcServerTransport(settings, services, networkService)) { transport.start(); - MatcherAssert.assertThat(transport.boundAddress().boundAddresses(), not(emptyArray())); - TransportAddress publishAddress = transport.boundAddress().publishAddress(); + MatcherAssert.assertThat(transport.getBoundAddress().boundAddresses(), not(emptyArray())); + TransportAddress publishAddress = transport.getBoundAddress().publishAddress(); assertNotNull(publishAddress.address()); assertTrue("Port should be in the specified range", publishAddress.getPort() >= 9000 && publishAddress.getPort() <= 9010); @@ -71,8 +99,8 @@ public void testWithCustomPublishPort() { try (Netty4GrpcServerTransport transport = new Netty4GrpcServerTransport(settings, services, networkService)) { transport.start(); - MatcherAssert.assertThat(transport.boundAddress().boundAddresses(), not(emptyArray())); - TransportAddress publishAddress = transport.boundAddress().publishAddress(); + MatcherAssert.assertThat(transport.getBoundAddress().boundAddresses(), not(emptyArray())); + TransportAddress publishAddress = transport.getBoundAddress().publishAddress(); assertNotNull(publishAddress.address()); assertEquals("Publish port should match the specified value", 9000, publishAddress.getPort()); @@ -90,8 +118,8 @@ public void testWithCustomHost() { try (Netty4GrpcServerTransport transport = new Netty4GrpcServerTransport(settings, services, networkService)) { transport.start(); - MatcherAssert.assertThat(transport.boundAddress().boundAddresses(), not(emptyArray())); - TransportAddress publishAddress = transport.boundAddress().publishAddress(); + MatcherAssert.assertThat(transport.getBoundAddress().boundAddresses(), not(emptyArray())); + TransportAddress publishAddress = transport.getBoundAddress().publishAddress(); assertNotNull(publishAddress.address()); assertEquals( "Host should match the specified value", @@ -113,8 +141,8 @@ public void testWithCustomBindHost() { try (Netty4GrpcServerTransport transport = new Netty4GrpcServerTransport(settings, services, networkService)) { transport.start(); - MatcherAssert.assertThat(transport.boundAddress().boundAddresses(), not(emptyArray())); - TransportAddress boundAddress = transport.boundAddress().boundAddresses()[0]; + MatcherAssert.assertThat(transport.getBoundAddress().boundAddresses(), not(emptyArray())); + TransportAddress boundAddress = transport.getBoundAddress().boundAddresses()[0]; assertNotNull(boundAddress.address()); assertEquals( "Bind host should match the specified value", @@ -136,8 +164,8 @@ public void testWithCustomPublishHost() { try (Netty4GrpcServerTransport transport = new Netty4GrpcServerTransport(settings, services, networkService)) { transport.start(); - MatcherAssert.assertThat(transport.boundAddress().boundAddresses(), not(emptyArray())); - TransportAddress publishAddress = transport.boundAddress().publishAddress(); + MatcherAssert.assertThat(transport.getBoundAddress().boundAddresses(), not(emptyArray())); + TransportAddress publishAddress = transport.getBoundAddress().publishAddress(); assertNotNull(publishAddress.address()); assertEquals( "Publish host should match the specified value", @@ -159,8 +187,8 @@ public void testWithCustomWorkerCount() { try (Netty4GrpcServerTransport transport = new Netty4GrpcServerTransport(settings, services, networkService)) { transport.start(); - MatcherAssert.assertThat(transport.boundAddress().boundAddresses(), not(emptyArray())); - assertNotNull(transport.boundAddress().publishAddress().address()); + MatcherAssert.assertThat(transport.getBoundAddress().boundAddresses(), not(emptyArray())); + assertNotNull(transport.getBoundAddress().publishAddress().address()); transport.stop(); } diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/ssl/NettyGrpcClient.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/ssl/NettyGrpcClient.java new file mode 100644 index 0000000000000..21e94a96a8285 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/ssl/NettyGrpcClient.java @@ -0,0 +1,168 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.ssl; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.core.common.transport.TransportAddress; + +import javax.net.ssl.SSLException; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; + +import io.grpc.ManagedChannel; +import io.grpc.health.v1.HealthCheckRequest; +import io.grpc.health.v1.HealthCheckResponse; +import io.grpc.health.v1.HealthGrpc; +import io.grpc.netty.shaded.io.grpc.netty.NettyChannelBuilder; +import io.grpc.netty.shaded.io.netty.handler.ssl.ApplicationProtocolConfig; +import io.grpc.netty.shaded.io.netty.handler.ssl.ApplicationProtocolNames; +import io.grpc.netty.shaded.io.netty.handler.ssl.SslContextBuilder; +import io.grpc.netty.shaded.io.netty.handler.ssl.SslProvider; +import io.grpc.netty.shaded.io.netty.handler.ssl.util.InsecureTrustManagerFactory; +import io.grpc.reflection.v1alpha.ServerReflectionGrpc; +import io.grpc.reflection.v1alpha.ServerReflectionRequest; +import io.grpc.reflection.v1alpha.ServerReflectionResponse; +import io.grpc.reflection.v1alpha.ServiceResponse; +import io.grpc.stub.StreamObserver; + +import static org.opensearch.plugin.transport.grpc.ssl.SecureSettingsHelpers.CLIENT_KEYSTORE; +import static org.opensearch.plugin.transport.grpc.ssl.SecureSettingsHelpers.getTestKeyManagerFactory; +import static io.grpc.internal.GrpcUtil.NOOP_PROXY_DETECTOR; + +public class NettyGrpcClient implements AutoCloseable { + private static final Logger logger = LogManager.getLogger(NettyGrpcClient.class); + private final ManagedChannel channel; + private final HealthGrpc.HealthBlockingStub healthStub; + private final ServerReflectionGrpc.ServerReflectionStub reflectionStub; + + public NettyGrpcClient(NettyChannelBuilder channelBuilder) { + channel = channelBuilder.build(); + healthStub = HealthGrpc.newBlockingStub(channel); + reflectionStub = ServerReflectionGrpc.newStub(channel); + } + + public void shutdown() throws InterruptedException { + channel.shutdown(); + if (!channel.awaitTermination(5, TimeUnit.SECONDS)) { + channel.shutdownNow(); // forced shutdown + if (!channel.awaitTermination(5, TimeUnit.SECONDS)) { + logger.warn("Unable to shutdown the managed channel gracefully"); + } + } + } + + @Override + public void close() throws Exception { + shutdown(); + } + + /** + * List available gRPC services available on server. + * Note: ProtoReflectionService only implements a streaming interface and has no blocking stub. + * @return services registered on the server. + */ + public CompletableFuture> listServices() { + CompletableFuture> respServices = new CompletableFuture<>(); + + StreamObserver responseObserver = new StreamObserver<>() { + final List services = new ArrayList<>(); + + @Override + public void onNext(ServerReflectionResponse response) { + if (response.hasListServicesResponse()) { + services.addAll(response.getListServicesResponse().getServiceList()); + } + } + + @Override + public void onError(Throwable t) { + respServices.completeExceptionally(t); + throw new RuntimeException(t); + } + + @Override + public void onCompleted() { + respServices.complete(services); + } + }; + + StreamObserver requestObserver = reflectionStub.serverReflectionInfo(responseObserver); + requestObserver.onNext(ServerReflectionRequest.newBuilder().setListServices("").build()); + requestObserver.onCompleted(); + return respServices; + } + + /** + * Request server status. + * @return HealthCheckResponse.ServingStatus. + */ + public HealthCheckResponse.ServingStatus checkHealth() { + return healthStub.check(HealthCheckRequest.newBuilder().build()).getStatus(); + } + + public static class Builder { + private Boolean clientAuth = false; + private Boolean insecure = false; + private TransportAddress addr; + + private static final ApplicationProtocolConfig CLIENT_ALPN = new ApplicationProtocolConfig( + ApplicationProtocolConfig.Protocol.ALPN, + ApplicationProtocolConfig.SelectorFailureBehavior.NO_ADVERTISE, + ApplicationProtocolConfig.SelectedListenerFailureBehavior.ACCEPT, + ApplicationProtocolNames.HTTP_2 + ); + + public Builder() {} + + public NettyGrpcClient build() throws SSLException { + NettyChannelBuilder channelBuilder = NettyChannelBuilder.forAddress(addr.getAddress(), addr.getPort()) + .proxyDetector(NOOP_PROXY_DETECTOR); + + if (clientAuth || insecure) { + SslContextBuilder builder = SslContextBuilder.forClient(); + builder.sslProvider(SslProvider.JDK); + builder.applicationProtocolConfig(CLIENT_ALPN); + if (clientAuth) { + builder.keyManager(getTestKeyManagerFactory(CLIENT_KEYSTORE)); + } + builder.trustManager(InsecureTrustManagerFactory.INSTANCE); + channelBuilder.sslContext(builder.build()); + } else { + channelBuilder.usePlaintext(); + } + + return new NettyGrpcClient(channelBuilder); + } + + public Builder setAddress(TransportAddress addr) { + this.addr = addr; + return this; + } + + /** + * Enable clientAuth - load client keystore. + */ + public Builder clientAuth(boolean enable) { + this.clientAuth = enable; + return this; + } + + /** + * Enable insecure TLS client. + */ + public Builder insecure(boolean enable) { + this.insecure = enable; + return this; + } + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/ssl/SecureNetty4GrpcServerTransportTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/ssl/SecureNetty4GrpcServerTransportTests.java new file mode 100644 index 0000000000000..1c841bf6f0d22 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/ssl/SecureNetty4GrpcServerTransportTests.java @@ -0,0 +1,157 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.ssl; + +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.transport.grpc.ssl.SecureNetty4GrpcServerTransport; +import org.junit.After; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import io.grpc.BindableService; +import io.grpc.StatusRuntimeException; +import io.grpc.health.v1.HealthCheckResponse; + +import static org.opensearch.plugin.transport.grpc.ssl.SecureSettingsHelpers.ConnectExceptions.BAD_CERT; +import static org.opensearch.plugin.transport.grpc.ssl.SecureSettingsHelpers.getServerClientAuthNone; +import static org.opensearch.plugin.transport.grpc.ssl.SecureSettingsHelpers.getServerClientAuthOptional; +import static org.opensearch.plugin.transport.grpc.ssl.SecureSettingsHelpers.getServerClientAuthRequired; + +public class SecureNetty4GrpcServerTransportTests extends OpenSearchTestCase { + private NetworkService networkService; + private final List services = new ArrayList<>(); + + static Settings createSettings() { + return Settings.builder().put(SecureNetty4GrpcServerTransport.SETTING_GRPC_PORT.getKey(), getPortRange()).build(); + } + + @Before + public void setup() { + networkService = new NetworkService(Collections.emptyList()); + } + + @After + public void shutdown() { + networkService = null; + } + + public void testGrpcSecureTransportStartStop() { + try ( + SecureNetty4GrpcServerTransport transport = new SecureNetty4GrpcServerTransport( + createSettings(), + services, + networkService, + getServerClientAuthNone() + ) + ) { + transport.start(); + assertTrue(transport.getBoundAddress().boundAddresses().length > 0); + assertNotNull(transport.getBoundAddress().publishAddress().address()); + transport.stop(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public void testGrpcInsecureAuthTLS() { + try ( + SecureNetty4GrpcServerTransport transport = new SecureNetty4GrpcServerTransport( + createSettings(), + services, + networkService, + getServerClientAuthNone() + ) + ) { + transport.start(); + assertTrue(transport.getBoundAddress().boundAddresses().length > 0); + assertNotNull(transport.getBoundAddress().publishAddress().address()); + final TransportAddress remoteAddress = randomFrom(transport.getBoundAddress().boundAddresses()); + + // Client without cert + NettyGrpcClient client = new NettyGrpcClient.Builder().setAddress(remoteAddress).insecure(true).build(); + assertEquals(client.checkHealth(), HealthCheckResponse.ServingStatus.SERVING); + client.close(); + + transport.stop(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public void testGrpcOptionalAuthTLS() { + try ( + SecureNetty4GrpcServerTransport transport = new SecureNetty4GrpcServerTransport( + createSettings(), + services, + networkService, + getServerClientAuthOptional() + ) + ) { + transport.start(); + assertTrue(transport.getBoundAddress().boundAddresses().length > 0); + assertNotNull(transport.getBoundAddress().publishAddress().address()); + final TransportAddress remoteAddress = randomFrom(transport.getBoundAddress().boundAddresses()); + + // Client without cert + NettyGrpcClient hasNoCertClient = new NettyGrpcClient.Builder().setAddress(remoteAddress).insecure(true).build(); + assertEquals(hasNoCertClient.checkHealth(), HealthCheckResponse.ServingStatus.SERVING); + hasNoCertClient.close(); + + // Client with trusted cert + NettyGrpcClient hasTrustedCertClient = new NettyGrpcClient.Builder().setAddress(remoteAddress).clientAuth(true).build(); + assertEquals(hasTrustedCertClient.checkHealth(), HealthCheckResponse.ServingStatus.SERVING); + hasTrustedCertClient.close(); + + transport.stop(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public void testGrpcRequiredAuthTLS() { + try ( + SecureNetty4GrpcServerTransport transport = new SecureNetty4GrpcServerTransport( + createSettings(), + services, + networkService, + getServerClientAuthRequired() + ) + ) { + transport.start(); + assertTrue(transport.getBoundAddress().boundAddresses().length > 0); + assertNotNull(transport.getBoundAddress().publishAddress().address()); + final TransportAddress remoteAddress = randomFrom(transport.getBoundAddress().boundAddresses()); + + // Client without cert + NettyGrpcClient hasNoCertClient = new NettyGrpcClient.Builder().setAddress(remoteAddress).insecure(true).build(); + assertThrows(StatusRuntimeException.class, hasNoCertClient::checkHealth); + try { + hasNoCertClient.checkHealth(); + } catch (Exception e) { + assertEquals(SecureSettingsHelpers.ConnectExceptions.get(e), BAD_CERT); + } + hasNoCertClient.close(); + + // Client with trusted cert + NettyGrpcClient hasTrustedCertClient = new NettyGrpcClient.Builder().setAddress(remoteAddress).clientAuth(true).build(); + assertEquals(hasTrustedCertClient.checkHealth(), HealthCheckResponse.ServingStatus.SERVING); + hasTrustedCertClient.close(); + + transport.stop(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/ssl/SecureSettingsHelpers.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/ssl/SecureSettingsHelpers.java new file mode 100644 index 0000000000000..955194ae7e5f1 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/ssl/SecureSettingsHelpers.java @@ -0,0 +1,169 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.ssl; + +import org.opensearch.common.settings.Settings; +import org.opensearch.plugins.NetworkPlugin; +import org.opensearch.plugins.SecureAuxTransportSettingsProvider; +import org.opensearch.transport.grpc.ssl.SecureNetty4GrpcServerTransport; + +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLException; +import javax.net.ssl.TrustManagerFactory; + +import java.io.IOException; +import java.security.KeyManagementException; +import java.security.KeyStore; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.security.SecureRandom; +import java.security.UnrecoverableKeyException; +import java.security.cert.CertificateException; +import java.util.Collection; +import java.util.List; +import java.util.Locale; +import java.util.Optional; + +import io.grpc.netty.shaded.io.netty.handler.ssl.ClientAuth; +import io.grpc.netty.shaded.io.netty.handler.ssl.util.InsecureTrustManagerFactory; + +import static org.opensearch.test.OpenSearchTestCase.randomFrom; + +public class SecureSettingsHelpers { + private static final String TEST_PASS = "password"; // used for all keystores + static final String SERVER_KEYSTORE = "/netty4-server-secure.jks"; + static final String CLIENT_KEYSTORE = "/netty4-client-secure.jks"; + static final String[] DEFAULT_SSL_PROTOCOLS = { "TLSv1.3", "TLSv1.2", "TLSv1.1" }; + static final String[] DEFAULT_CIPHERS = { + "TLS_AES_128_GCM_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" }; + + /** + * Exception messages for various types of TLS client/server connection failure. + * We would like to check to ensure a connection fails in the way we expect. + * However, depending on the default JDK provider exceptions may differ slightly, + * so we allow a couple different error messages for each possible error. + */ + public enum ConnectExceptions { + NONE(List.of("Connection succeeded")), + UNAVAILABLE(List.of("Network closed for unknown reason")), + BAD_CERT(List.of("bad_certificate", "certificate_required")); + + List msgList = null; + + ConnectExceptions(List exceptionMsg) { + this.msgList = exceptionMsg; + } + + public static ConnectExceptions get(Throwable e) { + if (e.getMessage() != null) { + for (ConnectExceptions exception : values()) { + if (exception == NONE) continue; // Skip success message + if (exception.msgList.stream().anyMatch(substring -> e.getMessage().contains(substring))) { + return exception; + } + } + } + if (e.getCause() != null) { + return get(e.getCause()); + } + throw new RuntimeException("Unexpected exception", e); + } + } + + public static KeyManagerFactory getTestKeyManagerFactory(String keystorePath) { + KeyManagerFactory keyManagerFactory; + try { + final KeyStore keyStore = KeyStore.getInstance(KeyStore.getDefaultType()); + keyStore.load(SecureNetty4GrpcServerTransport.class.getResourceAsStream(keystorePath), TEST_PASS.toCharArray()); + keyManagerFactory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); + keyManagerFactory.init(keyStore, TEST_PASS.toCharArray()); + } catch (UnrecoverableKeyException | CertificateException | KeyStoreException | IOException | NoSuchAlgorithmException e) { + throw new RuntimeException(e); + } + return keyManagerFactory; + } + + static TrustManagerFactory getTestTrustManagerFactory(String keystorePath) { + try { + final KeyStore trustStore = KeyStore.getInstance(KeyStore.getDefaultType()); + trustStore.load(SecureNetty4GrpcServerTransport.class.getResourceAsStream(keystorePath), TEST_PASS.toCharArray()); + TrustManagerFactory trustManagerFactory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); + trustManagerFactory.init(trustStore); + return trustManagerFactory; + } catch (KeyStoreException | CertificateException | NoSuchAlgorithmException | IOException e) { + throw new RuntimeException(e); + } + } + + static SecureAuxTransportSettingsProvider getSecureSettingsProvider( + String clientAuth, + KeyManagerFactory keyMngerFactory, + TrustManagerFactory trustMngerFactory + ) { + return new SecureAuxTransportSettingsProvider() { + @Override + public Optional buildSecureAuxServerTransportContext(Settings settings, NetworkPlugin.AuxTransport transport) + throws SSLException { + // Choose a random protocol from among supported test defaults + String protocol = randomFrom(DEFAULT_SSL_PROTOCOLS); + // Default JDK provider + SSLContext testContext; + try { + testContext = SSLContext.getInstance(protocol); + testContext.init(keyMngerFactory.getKeyManagers(), trustMngerFactory.getTrustManagers(), new SecureRandom()); + } catch (NoSuchAlgorithmException | KeyManagementException e) { + throw new SSLException("Failed to build mock provider", e); + } + return Optional.of(testContext); + } + + @Override + public Optional parameters() { + return Optional.of(new SecureAuxTransportParameters() { + @Override + public Optional clientAuth() { + return Optional.of(clientAuth); + } + + @Override + public Collection cipherSuites() { + return List.of(DEFAULT_CIPHERS); + } + }); + } + }; + } + + public static SecureAuxTransportSettingsProvider getServerClientAuthRequired() { + return getSecureSettingsProvider( + ClientAuth.REQUIRE.name().toUpperCase(Locale.ROOT), + getTestKeyManagerFactory(SERVER_KEYSTORE), + getTestTrustManagerFactory(CLIENT_KEYSTORE) + ); + } + + public static SecureAuxTransportSettingsProvider getServerClientAuthOptional() { + return getSecureSettingsProvider( + ClientAuth.OPTIONAL.name().toUpperCase(Locale.ROOT), + getTestKeyManagerFactory(SERVER_KEYSTORE), + getTestTrustManagerFactory(CLIENT_KEYSTORE) + ); + } + + public static SecureAuxTransportSettingsProvider getServerClientAuthNone() { + return getSecureSettingsProvider( + ClientAuth.NONE.name().toUpperCase(Locale.ROOT), + getTestKeyManagerFactory(SERVER_KEYSTORE), + InsecureTrustManagerFactory.INSTANCE + ); + } +} diff --git a/plugins/transport-grpc/src/test/resources/README.txt b/plugins/transport-grpc/src/test/resources/README.txt new file mode 100644 index 0000000000000..d2315aea07404 --- /dev/null +++ b/plugins/transport-grpc/src/test/resources/README.txt @@ -0,0 +1,26 @@ +#!/usr/bin/env bash +# +# This is README describes how the certificates in this directory were created. +# This file can also be executed as a script +# + +# 1. Create server & client certificate key + +openssl req -x509 -sha256 -newkey rsa:2048 -keyout server.key -out server.crt -days 8192 -nodes +openssl req -x509 -sha256 -newkey rsa:2048 -keyout client.key -out client.crt -days 8192 -nodes + +# 2. Export the certificates in pkcs12 format + +openssl pkcs12 -export -in server.crt -inkey server.key -out server.p12 -name netty4-server-secure -password pass:password +openssl pkcs12 -export -in client.crt -inkey client.key -out client.p12 -name netty4-client-secure -password pass:password + +# 3. Import the certificate into JDK keystore (PKCS12 type) + +keytool -importkeystore -srcstorepass password -destkeystore netty4-server-secure.jks -srckeystore server.p12 -srcstoretype PKCS12 -alias netty4-server-secure -deststorepass password +keytool -importkeystore -srcstorepass password -destkeystore netty4-client-secure.jks -srckeystore client.p12 -srcstoretype PKCS12 -alias netty4-client-secure -deststorepass password + +# 4. Clean up - Clean up pkcs12 keystores and private keys +rm client.key +rm client.p12 +rm server.key +rm server.p12 diff --git a/plugins/transport-grpc/src/test/resources/netty4-client-secure.jks b/plugins/transport-grpc/src/test/resources/netty4-client-secure.jks new file mode 100644 index 0000000000000000000000000000000000000000..3497de56fc956710a82778710bff9995eb25042d GIT binary patch literal 2772 zcma);c{CJk8^&jt8H^dbA{m3MS!V2%W$a5NX^PNTChJ(z$QsR9LSsvkb&!4Q%@)}v zM%GZM5TWc@k|f*fJKuMD&-d5&o##B~x$oa~U-zHac~B(i5Cf1AMS=!HSfrzkqj%sy zCLomr^#qZiuE*FJMFRK!mjr$ZB7qx@an13qhOqv}1%m<^s3fq?F+7jD`NsizX7zO0XdL4tz;h(y64%>UlVzyb!K1R*TAXk#Fr z5d@S5ais2LofSdAJZP403<7Mod_W}7&}9#48!iw`SSzz`(zZ;R3CJkOPIB3G--RqBKz-SyUs#TSj8XPVl=eO; zjO(mwoK(=!Uy3Omv#&Awd7T$|#>?_|gEj;IUVR;6Kgf;5u z!oJKoebBpj=~Zu>8+F7cWoJ>tr9+U$sa1TH&Ob1+{|hb-%=I<*V5;&Cu8F91XKXnS zb&e67YP`jCUGS#3R6~W1xYcFezBn=Z3A+CZbTr!LW2|vM4Z{hY4_C=|r>a?1Pks&N z6>G8H*Q~0$K|V(X4YO0*D|BByxpPf_p;M^EI=RY>ytoAUKy#!#)%um=j;3{ME6&VF zp8~_k%I$q6CknAT=gZVC)g#mJmE~C+`MuM&$l`5gLs?8KCb%_8Ja2Mw9N`sT@NT9s zI>C59wO^p#d`;?v%S>2k=Pu3`xUw9QGBmquHm72O4(Dr;*yc0&0iR_>KW7t+3bYrS z+dk)=#gOg$oBgjIgXEq5AF`Mmb$uf3O0iLm!eJG10#{Avat8LY6C2lxxX`}S_ZjbU z%!nC71Vsr|awN+#phyvRw`sX|28qR!4bKVQ)n-lSDB+4Bm{P+vT) z`c#AXj2e~fC{NrNu;O<59FR;as{92Hw8A&FbWI<`>Zs5!fA+cPaO&|3h+SlIVnLJb z;lnm2AI+|nT>H7jm06K&bFxmn01%LYRh%lR+yti99#T_2E;bd=e)E3zkdmB>6`4PX z5ZJeVp4TE!=I(&%`U~+X6gh1ypfz1bW)fxBsK>ZyOiJ7GjWHk=%ytAJnUnMg>dh?z zskx{Y=XyBH7Av~|ys4E3004+T)t>3$5~gBCm7F+fI16ichcu*!%7CG2yVb zq{)I^Zk?2hQ>1vxb+(*AJyWd8TQ!SB9zI-l{rhg6pKmZ)k7`d^G;Ocu_Z9zyv8k6# zkp0$KuCaxreGYkCet3@|*OSuU>87!QbIlEgwM2LNJ$oDkq;cN`r8rJxO#CDcf! zY60851@{Tgh#npLgRTc-Ep0}e9{}h#&3dm2&fEN0kikZnS0B|BKeNJsm1**itBo~w z*WUyTS=dKCbCT^V(e7{~`nTPeNA675H^0>X`kf`6EfURat@IN|O)X2H4`Cg{xh)HC zF?z`~s`aV5Yia0_R`&%nB2Ee|WgxW}znvHQ5ODv=ING!7oWLEca3S(xlP*b@7FqN1 zO3^3_JwwikSw-xG;Pc8WNj<9HgNHpog=1>;E#NgBPR6I;=|DfB+-Y;1+>5vq79FX= zVT#9aA+aP_Bv}U zF`sO z)t+bTR<=jbb|}?9amm7imPSYdya9N??c);!Py)yToB>_{_haIHOk@H6$CWdH2)G5n zqlEu4@}hY_T-NRcJQA&{q=-VPDXJ=~DJY{zpyWS8Ob{vwL^?)y8GwM}=gvP0@P9@4 zcoSlOTV4+v^|3r9U^AZ(@f!!`g#ET|$a*9TfK!C+QP?*d%Wt@p% zhlgC-nU|4rdRkrU>iNYYk|i7^E3yvM=Wak4-QfJdACu7}Exa_}eseOKdtUuq(-idM zp8m=VTdS{&8QVJFvVp-+^y%-0p)cx(l#+s*D_d^Yp1h!in2NBkF1Kekkqdj9mkQik zt*s=n8%LjQ7GBRMRD9u1LA*kgh@D z&L#H>iGh0byumyop^kRv30$w!Xh2x$+f^Q5i^bF^$OQ2${9>o|q=;Tm`tCWER#ry` zBCJu}X9+-V!72=fm`cv~D}Bnk@HWv_9@0PNVlI}fq>s!T1S7S1rgqw2~GdJ^q%9X?X-HFrD zH}c(S;p^QT4*a2YvG0kh+uCRK=+wAKWv@?lA~p>(ki(h_4SDQ@E9z34b-qp{KCk!E z^D%Tu(X9!NVla7o)fAWIz4qAYc(K_~(vdR&?zLrkvl=h`#h zy7=*NA^`z;Tu~29qX|ZDOT6*j?$0Gkj1hN34zQz*D~b($oYPBBiYiOT)tI@sUe7`_ zx%{E=Lcig%fhGhQ`pW^mi*5H_KY|iZb({=AZb5G+@^arBb4ZJnHiImyL=5g7G0!PI zI`_46_Q_8nHw&=0UmH2sKAlC7XaoJRq8solcUfS8BhXl)o1<(5;-bJ_T1Rpbg={|R zGFr{Fh5G)=>4EH$!gLA^zr}M#RcJ6JPKC}(5p4Y5F^jF~C#e&%0#K2qYPYKqmn|P0 zl#JV;M|n#}xt*nAOv^30myDR!hBo^QupKPwv>-*hw{NVSc0`s)%Z{2EIvMTw+lgNL z-XH)(e&gVNl)+U>EfGR|I-BMp#=5d!>r~UtL)=;Swh*x01N@W-Md#KBUGl7vMYZ6o zF}C72VG6hUJJNdg$Qi=uK9ZnRyTa0c|GHrnb*3iO3t)HemGdUQla}IZ)4_;>t=e! zZ$*u8qU=NieJMsGCBNXoVzJ%w!)(UWrQJTf3jHNtZsfsJU@J0JuCLOD;}&}-HZmN6 zr43T0zjP#PmQPc7G@1)i&GM&ACKB{g9;3`rXcYXw9 yZ+>sN@$N+?`*rn!n}YWoIy5?xKs@5Qg|d>nJRP$WgY|plTkQeS<{tlwpuYjS`{Ypo literal 0 HcmV?d00001 diff --git a/plugins/transport-grpc/src/test/resources/netty4-server-secure.jks b/plugins/transport-grpc/src/test/resources/netty4-server-secure.jks new file mode 100644 index 0000000000000000000000000000000000000000..5e7d09ded52d076756d67c486dcc3f928b4e4695 GIT binary patch literal 2772 zcma);XHXN`7KKyky%+>S4^jkz^rDD`CLn4kp(+uiOA8T*lz>PC>C&Wxpb+U&M0zKY zUX&szEg&EuhzN@1^4@*po%id#IdjgL_3g9vzrE%l2`nQZAQ(wt31MKCi!qAXX9qF@ zNdy*82!Z9+5yl`1(7yjFL7N~1X#EkcIog#Btp9Pbu>e6N0`%+=)J3}faX{ITXr$&p zk_{;j;getbu?>CPW=X~tM$WqVJbW}D9R>oO-~s`~kZcT0e|H2iLjg!Q1M~G5BcKZy z0+fSrHLVHr8VMa|DdZIGp4c*}f)F4h6^l}+v`>PtHOjIrs;i1Rg4-oF0A7=YalYO7k|YyUw4fibNm1mB2?l0q@HyKoQlcPY47Wu zIZ2l!dVd<2m8wM*Kki6Y@vxvPxR6q$!DOtOD+;9q3N4jNG-3@$&crZUo$H4ILQEc`|!VTP^FoYx)r zb`13F-9xBBik9V$EYHsehT~Jy;n5_+E+YL_Pi}Ri6W3(4SclMIf?{sDn&gNy+Q9IX zR`j}wHCN^##RWe5_+mJ##app$TdL{&JRF#vytnpCseCrTkm9$$sEKZB@e)M!mn;dBUa)Ya0~-SAcP zMpK8xv&S{VA;aWZ5zaa#D&-o$xdtzBkr5d$J%~($p(Em+E#C+c2UqLFEKO?rAJ@3W z@g`E9F3@$WvO~ZqO$hvD_vy6ww)5fCKFtnyv0pCk1Ppnrd;`FzA1pYP9G_Hn{=&Dg zwZYKPJP?FMAyEp}JO*rjCU?~|+gz`V|NWhP58Fv)L4!?J&iebr)$uk0!q4Db)a(rN z{Byj_cb)`Je~z)~W`;{z3UduR+-LjMJ76x#uUjd$cha}MerhTgfzP%023FkTE`jHLC#&~Scns+Ed zt@DP*wHS9f4}pb`1v_hNBaLOH4w4NdtL3ys<8p{rKda2<)4-(cLKc4jU+C)9-Y%WS zl1Uwb%;qs3!d!DZ|C?`jK5K@~!-o-d=DWoZDhMxO(`tW{$vIj53`M%_VfpW!%?!QF zJZ(cZJKlZKqPe1Pn$UY~J}DPY49l7fBxNPN{wR7CPM8lWm}mxup5P|$Rpz3Y7VK6>U%hKdSLAWr5TgyrayfZ z{M2w(!)sb~RnOfSGL1DubhU0yj}O`8%5>a=*A+qyX!pm#y9f# zW6&$_D#xF2a7Ow$*nMdO6Zkgs1K^w@CK*pmR_F`%zwyjl8`tDVXB!FYYx}QQWl|fp zjDNvm^f8yXGFWMtCz=;V4y-y&Ya<&xwfoYY_?|w`;V}IEvkZaM_ z_#d&T?ke*cWqYK?pSWb^QWVD#|B3LDreHKVm*^ya_U3Me#}$JJ0t6tg*Z8K`>L*G!Euujv z0~z1%U6~9WK4dQ@`Op6f8Y&2@GDsxm99!{Sq^DDu9E&n;xZN$1O7gOl&QsHFKJg^oOak^nO2W+5W|A_adm;0 z@r!nKCSNwDGJ>h`?9Fvuyn*D_t)i7Gmcpf!xp&sDO@AbBW+$c#{vqCdJlEeK zBU{F4C{=RRwwZRhe)9G6F5IYKy_7<@9sHOk8sed{>^O#7Eo-`q%a?6Az&vi3q(KnN zKGim)MB7?V%gC)=GhsFOjRzAp3MPuZJ()0ob;YC2UXkH0Ylw6TlT1sSLPCj4$yavq z^X@~Hm?fb!YCw^&Q5~Zx+_8Y9ZIYE*m^vz1bKN)!U*NAyH^gGB>G%%IQyYioRio4J z!dD4AZgxK?0TDwl1|qMr$5I!=H}ADeM+NNOx6yq7mGa%);#>05v88h1nVS=|*PN3LzO)wjN!=+mHWxjPN`lt3C&&dqA z?F`6yxr%zf{+w9PA=yP|oiinl?csCe@mt=kW1lZionBMW7uIDag>ccPRF1zHq( secureAuxTransportSettingsProviders = secureSettingsFactories.stream() + .map(p -> p.getSecureAuxTransportSettingsProvider(settings)) + .filter(Optional::isPresent) + .map(Optional::get) + .collect(Collectors.toList()); + + if (secureAuxTransportSettingsProviders.size() > 1) { + throw new IllegalArgumentException( + "there is more than one secure auxiliary transport settings provider: " + secureAuxTransportSettingsProviders + ); + } + for (NetworkPlugin plugin : plugins) { Map> httpTransportFactory = plugin.getHttpTransports( settings, @@ -274,6 +287,24 @@ public NetworkModule( } } + // Register any secure auxiliary transports if available + if (secureAuxTransportSettingsProviders.isEmpty() == false) { + final SecureAuxTransportSettingsProvider secureSettingProvider = secureAuxTransportSettingsProviders.iterator().next(); + + final Map> secureAuxTransportFactory = plugin.getSecureAuxTransports( + settings, + threadPool, + circuitBreakerService, + networkService, + clusterSettings, + secureSettingProvider, + tracer + ); + for (Map.Entry> entry : secureAuxTransportFactory.entrySet()) { + registerAuxTransport(entry.getKey(), entry.getValue()); + } + } + // Register any secure transports if available if (secureTransportSettingsProviders.isEmpty() == false) { final SecureTransportSettingsProvider secureSettingProvider = secureTransportSettingsProviders.iterator().next(); diff --git a/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java b/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java index 4442189373c93..b294c64e5cdce 100644 --- a/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java @@ -42,6 +42,7 @@ import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.transport.BoundTransportAddress; import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.http.HttpServerTransport; @@ -75,6 +76,7 @@ public interface NetworkPlugin { * bootstrap. To allow pluggable AuxTransports access to configurable port ranges we require the port range be provided * through an {@link org.opensearch.common.settings.Setting.AffixSetting} of the form 'AUX_SETTINGS_PREFIX.{aux-transport-key}.ports'. */ + @ExperimentalApi abstract class AuxTransport extends AbstractLifecycleComponent { public static final String AUX_SETTINGS_PREFIX = "aux.transport."; public static final String AUX_TRANSPORT_TYPES_KEY = AUX_SETTINGS_PREFIX + "types"; @@ -91,6 +93,9 @@ abstract class AuxTransport extends AbstractLifecycleComponent { Function.identity(), Setting.Property.NodeScope ); + + // public for tests + public abstract BoundTransportAddress getBoundAddress(); } /** @@ -159,6 +164,23 @@ default Map> getHttpTransports( return Collections.emptyMap(); } + /** + * Returns a map of secure {@link AuxTransport} suppliers. + * See {@link org.opensearch.plugins.NetworkPlugin.AuxTransport#AUX_TRANSPORT_TYPES_SETTING} to configure a specific implementation. + */ + @ExperimentalApi + default Map> getSecureAuxTransports( + Settings settings, + ThreadPool threadPool, + CircuitBreakerService circuitBreakerService, + NetworkService networkService, + ClusterSettings clusterSettings, + SecureAuxTransportSettingsProvider secureAuxTransportSettingsProvider, + Tracer tracer + ) { + return Collections.emptyMap(); + } + /** * Returns a map of secure {@link Transport} suppliers. * See {@link org.opensearch.common.network.NetworkModule#TRANSPORT_TYPE_KEY} to configure a specific implementation. diff --git a/server/src/main/java/org/opensearch/plugins/SecureAuxTransportSettingsProvider.java b/server/src/main/java/org/opensearch/plugins/SecureAuxTransportSettingsProvider.java new file mode 100644 index 0000000000000..6274807f12149 --- /dev/null +++ b/server/src/main/java/org/opensearch/plugins/SecureAuxTransportSettingsProvider.java @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugins; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.settings.Settings; + +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLException; + +import java.util.Collection; +import java.util.Optional; + +/** + * A security settings provider for auxiliary transports. + * @opensearch.experimental + */ +@ExperimentalApi +public interface SecureAuxTransportSettingsProvider { + /** + * Fetch an SSLContext as managed by pluggable security provider. + * @return an instance of SSLContext. + */ + default Optional buildSecureAuxServerTransportContext(Settings settings, NetworkPlugin.AuxTransport transport) + throws SSLException { + return Optional.empty(); + } + + /** + * Additional params required for configuring ALPN. + * @return an instance of {@link SecureAuxTransportSettingsProvider.SecureAuxTransportParameters} + */ + default Optional parameters() { + return Optional.empty(); + } + + /** + * ALPN configuration parameters. + */ + @ExperimentalApi + interface SecureAuxTransportParameters { + Optional clientAuth(); + + Collection cipherSuites(); + } +} diff --git a/server/src/main/java/org/opensearch/plugins/SecureSettingsFactory.java b/server/src/main/java/org/opensearch/plugins/SecureSettingsFactory.java index ec2276ecc62ef..0fdf4b6927eb0 100644 --- a/server/src/main/java/org/opensearch/plugins/SecureSettingsFactory.java +++ b/server/src/main/java/org/opensearch/plugins/SecureSettingsFactory.java @@ -33,4 +33,11 @@ public interface SecureSettingsFactory { * @return optionally, the instance of the {@link SecureHttpTransportSettingsProvider} */ Optional getSecureHttpTransportSettingsProvider(Settings settings); + + /** + * Creates (or provides pre-created) instance of the {@link SecureAuxTransportSettingsProvider} + * @param settings settings + * @return optionally, the instance of the {@link SecureAuxTransportSettingsProvider} + */ + Optional getSecureAuxTransportSettingsProvider(Settings settings); } diff --git a/server/src/main/java/org/opensearch/transport/TransportAdapterProvider.java b/server/src/main/java/org/opensearch/transport/TransportAdapterProvider.java index 36dbd5a699b40..7e39445b1699c 100644 --- a/server/src/main/java/org/opensearch/transport/TransportAdapterProvider.java +++ b/server/src/main/java/org/opensearch/transport/TransportAdapterProvider.java @@ -32,7 +32,7 @@ public interface TransportAdapterProvider { * Provides a new transport adapter of required transport adapter class and transport instance. * @param transport adapter class * @param settings settings - * @param transport HTTP transport instance + * @param transport transport instance * @param adapterClass required transport adapter class * @return the non-empty {@link Optional} if the transport adapter could be created, empty one otherwise */ diff --git a/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java b/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java index 447377e372e61..c07fa0e183c00 100644 --- a/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java +++ b/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java @@ -47,6 +47,7 @@ import org.opensearch.http.HttpStats; import org.opensearch.http.NullDispatcher; import org.opensearch.plugins.NetworkPlugin; +import org.opensearch.plugins.SecureAuxTransportSettingsProvider; import org.opensearch.plugins.SecureHttpTransportSettingsProvider; import org.opensearch.plugins.SecureSettingsFactory; import org.opensearch.plugins.SecureTransportSettingsProvider; @@ -130,6 +131,12 @@ public Optional buildHttpServerExceptionHandler( } }); } + + @Override + public Optional getSecureAuxTransportSettingsProvider(Settings settings) { + return Optional.of(new SecureAuxTransportSettingsProvider() { + }); + } }; } From 8964f63653fadd5363267e0ef2edf2dd7bfe0105 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Thu, 10 Apr 2025 09:02:16 -0400 Subject: [PATCH 189/550] Fix FileInterceptor's delete calls for Unix Domain Sockets on Windows (#17873) * Fix FileInterceptor's delete calls for Unix Domain Sockets on Windows Signed-off-by: Andriy Redko * Added NetPermission check for UNIX Domain Socket deletion on Windows Signed-off-by: Andriy Redko --------- Signed-off-by: Andriy Redko --- .../opensearch/javaagent/FileInterceptor.java | 94 +++++++++++-------- 1 file changed, 53 insertions(+), 41 deletions(-) diff --git a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/FileInterceptor.java b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/FileInterceptor.java index e1ceddee23ca4..f17cfccd8d86f 100644 --- a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/FileInterceptor.java +++ b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/FileInterceptor.java @@ -12,6 +12,7 @@ import java.io.FilePermission; import java.lang.reflect.Method; +import java.net.NetPermission; import java.nio.file.OpenOption; import java.nio.file.Path; import java.nio.file.Paths; @@ -71,59 +72,70 @@ public static void intercept(@Advice.AllArguments Object[] args, @Advice.Origin boolean isMutating = name.equals("move") || name.equals("write") || name.startsWith("create"); final boolean isDelete = isMutating == false ? name.startsWith("delete") : false; - String targetFilePath = null; - if (isMutating == false && isDelete == false) { - if (name.equals("newByteChannel") == true || name.equals("open") == true) { - if (args.length > 1 && args[1] instanceof OpenOption[] opts) { - for (final OpenOption opt : opts) { - if (opt != StandardOpenOption.READ) { - isMutating = true; - break; - } - } - - } - } else if (name.equals("copy") == true) { - if (args.length > 1 && args[1] instanceof String pathStr) { - targetFilePath = Paths.get(pathStr).toAbsolutePath().toString(); - } else if (args.length > 1 && args[1] instanceof Path path) { - targetFilePath = path.toAbsolutePath().toString(); + // This is Windows implementation of UNIX Domain Sockets (close) + if (isDelete == true + && walker.getCallerClass().getName().equalsIgnoreCase("sun.nio.ch.PipeImpl$Initializer$LoopbackConnector") == true) { + final NetPermission permission = new NetPermission("accessUnixDomainSocket"); + for (ProtectionDomain domain : callers) { + if (!policy.implies(domain, permission)) { + throw new SecurityException("Denied access to: " + filePath + ", domain " + domain); } } - } + } else { + String targetFilePath = null; + if (isMutating == false && isDelete == false) { + if (name.equals("newByteChannel") == true || name.equals("open") == true) { + if (args.length > 1 && args[1] instanceof OpenOption[] opts) { + for (final OpenOption opt : opts) { + if (opt != StandardOpenOption.READ) { + isMutating = true; + break; + } + } - // Check each permission separately - for (final ProtectionDomain domain : callers) { - // Handle FileChannel.open() separately to check read/write permissions properly - if (method.getName().equals("open")) { - if (isMutating == true && !policy.implies(domain, new FilePermission(filePath, "read,write"))) { - throw new SecurityException("Denied OPEN (read/write) access to file: " + filePath + ", domain: " + domain); - } else if (!policy.implies(domain, new FilePermission(filePath, "read"))) { - throw new SecurityException("Denied OPEN (read) access to file: " + filePath + ", domain: " + domain); + } + } else if (name.equals("copy") == true) { + if (args.length > 1 && args[1] instanceof String pathStr) { + targetFilePath = Paths.get(pathStr).toAbsolutePath().toString(); + } else if (args.length > 1 && args[1] instanceof Path path) { + targetFilePath = path.toAbsolutePath().toString(); + } } } - // Handle Files.copy() separately to check read/write permissions properly - if (method.getName().equals("copy")) { - if (!policy.implies(domain, new FilePermission(filePath, "read"))) { - throw new SecurityException("Denied COPY (read) access to file: " + filePath + ", domain: " + domain); + // Check each permission separately + for (final ProtectionDomain domain : callers) { + // Handle FileChannel.open() separately to check read/write permissions properly + if (method.getName().equals("open")) { + if (isMutating == true && !policy.implies(domain, new FilePermission(filePath, "read,write"))) { + throw new SecurityException("Denied OPEN (read/write) access to file: " + filePath + ", domain: " + domain); + } else if (!policy.implies(domain, new FilePermission(filePath, "read"))) { + throw new SecurityException("Denied OPEN (read) access to file: " + filePath + ", domain: " + domain); + } } - if (targetFilePath != null) { - if (!policy.implies(domain, new FilePermission(targetFilePath, "write"))) { - throw new SecurityException("Denied COPY (write) access to file: " + targetFilePath + ", domain: " + domain); + // Handle Files.copy() separately to check read/write permissions properly + if (method.getName().equals("copy")) { + if (!policy.implies(domain, new FilePermission(filePath, "read"))) { + throw new SecurityException("Denied COPY (read) access to file: " + filePath + ", domain: " + domain); + } + + if (targetFilePath != null) { + if (!policy.implies(domain, new FilePermission(targetFilePath, "write"))) { + throw new SecurityException("Denied COPY (write) access to file: " + targetFilePath + ", domain: " + domain); + } } } - } - // File mutating operations - if (isMutating && !policy.implies(domain, new FilePermission(filePath, "write"))) { - throw new SecurityException("Denied WRITE access to file: " + filePath + ", domain: " + domain); - } + // File mutating operations + if (isMutating && !policy.implies(domain, new FilePermission(filePath, "write"))) { + throw new SecurityException("Denied WRITE access to file: " + filePath + ", domain: " + domain); + } - // File deletion operations - if (isDelete && !policy.implies(domain, new FilePermission(filePath, "delete"))) { - throw new SecurityException("Denied DELETE access to file: " + filePath + ", domain: " + domain); + // File deletion operations + if (isDelete && !policy.implies(domain, new FilePermission(filePath, "delete"))) { + throw new SecurityException("Denied DELETE access to file: " + filePath + ", domain: " + domain); + } } } } From 9db5e67b3ba819c977d2d767ae0b8b22ed7dd61c Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Thu, 10 Apr 2025 10:37:01 -0400 Subject: [PATCH 190/550] Fix security policy for Windows based builds and deployments (#17878) Signed-off-by: Andriy Redko --- .../resources/org/opensearch/bootstrap/security.policy | 7 +++++++ .../org/opensearch/bootstrap/test-framework.policy | 3 +++ 2 files changed, 10 insertions(+) diff --git a/server/src/main/resources/org/opensearch/bootstrap/security.policy b/server/src/main/resources/org/opensearch/bootstrap/security.policy index fbe0afb3c2a95..d51165898602f 100644 --- a/server/src/main/resources/org/opensearch/bootstrap/security.policy +++ b/server/src/main/resources/org/opensearch/bootstrap/security.policy @@ -91,30 +91,37 @@ grant codeBase "${codebase.zstd-jni}" { // repository-azure plugin and server side streaming grant codeBase "${codebase.reactor-core}" { permission java.net.SocketPermission "*", "connect,resolve"; + permission java.net.NetPermission "accessUnixDomainSocket"; }; grant codeBase "${codebase.opensearch-cli}" { permission java.net.SocketPermission "*", "connect,resolve"; + permission java.net.NetPermission "accessUnixDomainSocket"; }; grant codeBase "${codebase.opensearch-core}" { permission java.net.SocketPermission "*", "connect,resolve"; + permission java.net.NetPermission "accessUnixDomainSocket"; }; grant codeBase "${codebase.jackson-core}" { permission java.net.SocketPermission "*", "connect,resolve"; + permission java.net.NetPermission "accessUnixDomainSocket"; }; grant codeBase "${codebase.opensearch-common}" { permission java.net.SocketPermission "*", "connect,resolve"; + permission java.net.NetPermission "accessUnixDomainSocket"; }; grant codeBase "${codebase.opensearch-x-content}" { permission java.net.SocketPermission "*", "connect,resolve"; + permission java.net.NetPermission "accessUnixDomainSocket"; }; grant codeBase "${codebase.opensearch}" { permission java.net.SocketPermission "*", "connect,resolve"; + permission java.net.NetPermission "accessUnixDomainSocket"; }; //// Everything else: diff --git a/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy b/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy index 5fe1a5b64e6c7..04af165708511 100644 --- a/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy +++ b/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy @@ -81,6 +81,7 @@ grant codeBase "${codebase.lucene-test-framework}" { permission java.nio.file.LinkPermission "hard"; // needed for RAMUsageTester permission java.lang.RuntimePermission "accessDeclaredMembers"; + permission java.net.NetPermission "accessUnixDomainSocket"; }; grant codeBase "${codebase.randomizedtesting-runner}" { @@ -92,6 +93,7 @@ grant codeBase "${codebase.randomizedtesting-runner}" { permission org.opensearch.secure_sm.ThreadPermission "modifyArbitraryThreadGroup"; // needed for TestClass creation permission java.lang.RuntimePermission "accessDeclaredMembers"; + permission java.net.NetPermission "accessUnixDomainSocket"; }; grant codeBase "${codebase.junit}" { @@ -176,4 +178,5 @@ grant { permission org.opensearch.secure_sm.ThreadContextPermission "stashAndMergeHeaders"; permission org.opensearch.secure_sm.ThreadContextPermission "stashWithOrigin"; permission java.lang.RuntimePermission "setDefaultUncaughtExceptionHandler"; + permission java.net.NetPermission "accessUnixDomainSocket"; }; From d29e95c0dbaf5716d128e0177e8151bba7dc959e Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Thu, 10 Apr 2025 13:47:20 -0400 Subject: [PATCH 191/550] Fix security policy for Windows based builds and deployments for transport plugins (#17882) Signed-off-by: Andriy Redko --- .../src/main/plugin-metadata/plugin-security.policy | 1 + .../src/main/plugin-metadata/plugin-security.policy | 1 + 2 files changed, 2 insertions(+) diff --git a/modules/transport-netty4/src/main/plugin-metadata/plugin-security.policy b/modules/transport-netty4/src/main/plugin-metadata/plugin-security.policy index 62cac9cda2a3e..8e700de5d7528 100644 --- a/modules/transport-netty4/src/main/plugin-metadata/plugin-security.policy +++ b/modules/transport-netty4/src/main/plugin-metadata/plugin-security.policy @@ -36,6 +36,7 @@ grant { // netty makes and accepts socket connections permission java.net.SocketPermission "*", "accept,connect"; + permission java.net.NetPermission "accessUnixDomainSocket"; // Netty sets custom classloader for some of its internal threads permission java.lang.RuntimePermission "*", "setContextClassLoader"; diff --git a/plugins/transport-reactor-netty4/src/main/plugin-metadata/plugin-security.policy b/plugins/transport-reactor-netty4/src/main/plugin-metadata/plugin-security.policy index 2b589d7518988..1efe9a490c23e 100644 --- a/plugins/transport-reactor-netty4/src/main/plugin-metadata/plugin-security.policy +++ b/plugins/transport-reactor-netty4/src/main/plugin-metadata/plugin-security.policy @@ -12,6 +12,7 @@ grant { // netty makes and accepts socket connections permission java.net.SocketPermission "*", "accept,connect"; + permission java.net.NetPermission "accessUnixDomainSocket"; // Netty sets custom classloader for some of its internal threads permission java.lang.RuntimePermission "*", "setContextClassLoader"; From ead6e03ac5b84b0c1c535af26d0e4eeeb9c011aa Mon Sep 17 00:00:00 2001 From: guojialiang Date: Fri, 11 Apr 2025 02:28:25 +0800 Subject: [PATCH 192/550] [segment replication] Increase the default segment counter step size when replica promoting (#17568) * add segment counter increment step setting Signed-off-by: guojialiang * add changelog Signed-off-by: guojialiang * change the default value directly Signed-off-by: guojialiang * add CHANGELOG Signed-off-by: guojialiang --------- Signed-off-by: guojialiang --- CHANGELOG.md | 1 + .../java/org/opensearch/index/engine/NRTReplicationEngine.java | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index aa4591734896f..9bc7b66d47eb2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -42,6 +42,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Unwrap singleton DocValues in date histogram aggregation. ([#17643](https://github.com/opensearch-project/OpenSearch/pull/17643)) - Introduce 512 byte limit to search and ingest pipeline IDs ([#17786](https://github.com/opensearch-project/OpenSearch/pull/17786)) - Avoid skewed segment replication lag metric ([#17831](https://github.com/opensearch-project/OpenSearch/pull/17831)) +- Increase the default segment counter step size when replica promoting ([#17568](https://github.com/opensearch-project/OpenSearch/pull/17568)) ### Dependencies - Bump `com.nimbusds:nimbus-jose-jwt` from 9.41.1 to 10.0.2 ([#17607](https://github.com/opensearch-project/OpenSearch/pull/17607), [#17669](https://github.com/opensearch-project/OpenSearch/pull/17669)) diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java index 80e24fa0c5a7e..12c0fea42bb2f 100644 --- a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java @@ -67,7 +67,7 @@ public class NRTReplicationEngine extends Engine { private volatile long lastReceivedPrimaryGen = SequenceNumbers.NO_OPS_PERFORMED; - private static final int SI_COUNTER_INCREMENT = 10; + private static final int SI_COUNTER_INCREMENT = 100000; public NRTReplicationEngine(EngineConfig engineConfig) { super(engineConfig); From 88c7ed19a30312238d89b58ba9e7a72962914a74 Mon Sep 17 00:00:00 2001 From: Bo Zhang Date: Thu, 10 Apr 2025 11:29:30 -0700 Subject: [PATCH 193/550] Allow plugins to access/override some functions of ParametrizedFieldMapper; Modify the the constructor of the MappedFieldType to support FilterFieldType. (#17575) Signed-off-by: Bo Zhang --- .../java/org/opensearch/index/mapper/MappedFieldType.java | 2 +- .../opensearch/index/mapper/ParametrizedFieldMapper.java | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java b/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java index 81065a88c3001..c6c89d8f981cb 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java +++ b/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java @@ -101,7 +101,7 @@ public MappedFieldType( TextSearchInfo textSearchInfo, Map meta ) { - setBoost(1.0f); + this.boost = 1.0f; this.name = Objects.requireNonNull(name); this.isIndexed = isIndexed; this.isStored = isStored; diff --git a/server/src/main/java/org/opensearch/index/mapper/ParametrizedFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/ParametrizedFieldMapper.java index 4f4ec45001a54..927e5ba90fe64 100644 --- a/server/src/main/java/org/opensearch/index/mapper/ParametrizedFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/ParametrizedFieldMapper.java @@ -95,7 +95,7 @@ protected ParametrizedFieldMapper(String simpleName, MappedFieldType mappedField public abstract ParametrizedFieldMapper.Builder getMergeBuilder(); @Override - public final ParametrizedFieldMapper merge(Mapper mergeWith) { + public ParametrizedFieldMapper merge(Mapper mergeWith) { if (mergeWith instanceof FieldMapper == false) { throw new IllegalArgumentException( @@ -348,7 +348,7 @@ private void merge(FieldMapper toMerge, Conflicts conflicts) { } } - protected void toXContent(XContentBuilder builder, boolean includeDefaults) throws IOException { + public void toXContent(XContentBuilder builder, boolean includeDefaults) throws IOException { if (serializerCheck.check(includeDefaults, isConfigured(), get())) { serializer.serialize(builder, name, getValue()); } @@ -649,7 +649,7 @@ protected String buildFullName(BuilderContext context) { /** * Writes the current builder parameter values as XContent */ - protected final void toXContent(XContentBuilder builder, boolean includeDefaults) throws IOException { + public final void toXContent(XContentBuilder builder, boolean includeDefaults) throws IOException { for (Parameter parameter : getParameters()) { parameter.toXContent(builder, includeDefaults); } From 11618fdeca8e255ac5117fcf1a773f959cdfece7 Mon Sep 17 00:00:00 2001 From: Michael Froh Date: Thu, 10 Apr 2025 12:21:15 -0700 Subject: [PATCH 194/550] Fix javaagent arg when running in IntelliJ (#17886) Taking the summary from my conversation with Claude to fix this: 1. We needed afterEvaluate to ensure the project was fully configured before accessing its properties 2. The correct path was in build/distributions rather than build/libs 3. We had to properly handle Gradle's Provider API when constructing the path 4. The jar name needed to include both the archivesName and the project version Signed-off-by: Michael Froh --- gradle/ide.gradle | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/gradle/ide.gradle b/gradle/ide.gradle index 4ea95e94d8f33..343ac03b8ed20 100644 --- a/gradle/ide.gradle +++ b/gradle/ide.gradle @@ -49,6 +49,10 @@ if (System.getProperty('idea.active') == 'true') { } } + buildScan { + server = 'https://127.0.0.1' + } + idea { project { vcs = 'Git' @@ -81,8 +85,14 @@ if (System.getProperty('idea.active') == 'true') { } runConfigurations { defaults(JUnit) { - vmParameters = '-ea -Djava.locale.providers=SPI,CLDR' - vmParameters += ' -javaagent:' + project(':libs:agent-sm:agent').jar.archiveFile.get() + project(':libs:agent-sm:agent').afterEvaluate { agentProject -> + vmParameters = '-ea -Djava.locale.providers=SPI,CLDR' + def jarName = "${agentProject.base.archivesName.get()}-${project.version}.jar" + vmParameters += ' -javaagent:' + agentProject.layout.buildDirectory + .dir('distributions') + .map { it.file(jarName) } + .get() + } } } copyright { From cd8fa4f14e713d5448c0779677b7f38f76c5dc42 Mon Sep 17 00:00:00 2001 From: Craig Perkins Date: Thu, 10 Apr 2025 16:09:04 -0400 Subject: [PATCH 195/550] Bump org.apache.poi version from 5.2.5 to 5.4.1 in /plugins/ingest-attachment (#17887) * Upgrade org.apache.poi version from 5.2.5 to 5.4.1 Signed-off-by: Craig Perkins * Add to CHANGELOG Signed-off-by: Craig Perkins --------- Signed-off-by: Craig Perkins --- CHANGELOG.md | 1 + plugins/ingest-attachment/build.gradle | 2 +- plugins/ingest-attachment/licenses/poi-5.2.5.jar.sha1 | 1 - plugins/ingest-attachment/licenses/poi-5.4.1.jar.sha1 | 1 + plugins/ingest-attachment/licenses/poi-ooxml-5.2.5.jar.sha1 | 1 - plugins/ingest-attachment/licenses/poi-ooxml-5.4.1.jar.sha1 | 1 + .../ingest-attachment/licenses/poi-ooxml-lite-5.2.5.jar.sha1 | 1 - .../ingest-attachment/licenses/poi-ooxml-lite-5.4.1.jar.sha1 | 1 + .../ingest-attachment/licenses/poi-scratchpad-5.2.5.jar.sha1 | 1 - .../ingest-attachment/licenses/poi-scratchpad-5.4.1.jar.sha1 | 1 + 10 files changed, 6 insertions(+), 5 deletions(-) delete mode 100644 plugins/ingest-attachment/licenses/poi-5.2.5.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/poi-5.4.1.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/poi-ooxml-5.2.5.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/poi-ooxml-5.4.1.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/poi-ooxml-lite-5.2.5.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/poi-ooxml-lite-5.4.1.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/poi-scratchpad-5.2.5.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/poi-scratchpad-5.4.1.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 9bc7b66d47eb2..967dd745ddd9d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -63,6 +63,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `reactor_netty` from 1.1.26 to 1.2.3 ([#17322](https://github.com/opensearch-project/OpenSearch/pull/17322), [#17377](https://github.com/opensearch-project/OpenSearch/pull/17377)) - Bump `com.google.api.grpc:proto-google-iam-v1` from 1.33.0 to 1.49.1 ([#17811](https://github.com/opensearch-project/OpenSearch/pull/17811)) - Bump `com.azure:azure-core` from 1.54.1 to 1.55.3 ([#17810](https://github.com/opensearch-project/OpenSearch/pull/17810)) +- Bump `org.apache.poi` version from 5.2.5 to 5.4.1 in /plugins/ingest-attachment ([#17887](https://github.com/opensearch-project/OpenSearch/pull/17887)) ### Changed diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 47a15b75234dc..57f56b138d2a7 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -40,7 +40,7 @@ opensearchplugin { versions << [ 'tika' : '2.9.2', 'pdfbox': '2.0.31', - 'poi' : '5.2.5', + 'poi' : '5.4.1', 'mime4j': '0.8.11' ] diff --git a/plugins/ingest-attachment/licenses/poi-5.2.5.jar.sha1 b/plugins/ingest-attachment/licenses/poi-5.2.5.jar.sha1 deleted file mode 100644 index 0eca17726eb0b..0000000000000 --- a/plugins/ingest-attachment/licenses/poi-5.2.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7e00f6b2f76375fe89022d5a7db8acb71cbd55f5 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-5.4.1.jar.sha1 b/plugins/ingest-attachment/licenses/poi-5.4.1.jar.sha1 new file mode 100644 index 0000000000000..5603199c7c9ec --- /dev/null +++ b/plugins/ingest-attachment/licenses/poi-5.4.1.jar.sha1 @@ -0,0 +1 @@ +e4c74c59e13f62d8edd215756d14ce55566c6efe \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-ooxml-5.2.5.jar.sha1 b/plugins/ingest-attachment/licenses/poi-ooxml-5.2.5.jar.sha1 deleted file mode 100644 index 6b14be4461425..0000000000000 --- a/plugins/ingest-attachment/licenses/poi-ooxml-5.2.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -df9f2c52371eeba24db8ea8cafa77285c3cc0742 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-ooxml-5.4.1.jar.sha1 b/plugins/ingest-attachment/licenses/poi-ooxml-5.4.1.jar.sha1 new file mode 100644 index 0000000000000..7d9eacfed6517 --- /dev/null +++ b/plugins/ingest-attachment/licenses/poi-ooxml-5.4.1.jar.sha1 @@ -0,0 +1 @@ +508ed3e7fcc775738415870d0bc6d27196317fe3 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-ooxml-lite-5.2.5.jar.sha1 b/plugins/ingest-attachment/licenses/poi-ooxml-lite-5.2.5.jar.sha1 deleted file mode 100644 index f9a473173a297..0000000000000 --- a/plugins/ingest-attachment/licenses/poi-ooxml-lite-5.2.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -eaa61452d8f0d13080fbb4757a392f09f90e4c49 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-ooxml-lite-5.4.1.jar.sha1 b/plugins/ingest-attachment/licenses/poi-ooxml-lite-5.4.1.jar.sha1 new file mode 100644 index 0000000000000..b38ce888bffea --- /dev/null +++ b/plugins/ingest-attachment/licenses/poi-ooxml-lite-5.4.1.jar.sha1 @@ -0,0 +1 @@ +0ed2246f88254ba40fc2e7999c8f8e4e9031208a \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-scratchpad-5.2.5.jar.sha1 b/plugins/ingest-attachment/licenses/poi-scratchpad-5.2.5.jar.sha1 deleted file mode 100644 index 68665ddafd7d8..0000000000000 --- a/plugins/ingest-attachment/licenses/poi-scratchpad-5.2.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fc600cf765a49d73935a6e48a5b84f4abcdd0518 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-scratchpad-5.4.1.jar.sha1 b/plugins/ingest-attachment/licenses/poi-scratchpad-5.4.1.jar.sha1 new file mode 100644 index 0000000000000..8f328d4cc4a17 --- /dev/null +++ b/plugins/ingest-attachment/licenses/poi-scratchpad-5.4.1.jar.sha1 @@ -0,0 +1 @@ +ba43fb23ab262865b349d58d6ef1218755eef228 \ No newline at end of file From 032f4095cf797059e718d74fd2d337d95f8a09a9 Mon Sep 17 00:00:00 2001 From: Mikhail Khludnev Date: Fri, 11 Apr 2025 04:00:14 +0300 Subject: [PATCH 196/550] Pass in order terms as sorted to TermInSetQuery() (#17714) * pass in order terms as sorted to TermInSetQuery() Signed-off-by: Mikhail Khludnev * slightly more elegant solution Signed-off-by: Mikhail Khludnev * Attempting mocking TermInSetQ constructor. Signed-off-by: Mikhail Khludnev * Handle ids as well. Signed-off-by: Mikhail Khludnev * forbidden api Signed-off-by: Mikhail Khludnev * make unnecessary method slow but correct. Signed-off-by: Mikhail Khludnev * make unnecessary method slow but correct. Signed-off-by: Mikhail Khludnev * Polish test coverage Signed-off-by: Mikhail Khludnev * CHANGELOG.md Signed-off-by: Mikhail Khludnev * assertThrows Signed-off-by: Mikhail Khludnev * spotlessApply Signed-off-by: Mikhail Khludnev * coverage tests and refactoring Signed-off-by: Mikhail Khludnev * javadoc Signed-off-by: Mikhail Khludnev * javadoc Signed-off-by: Mikhail Khludnev * mark nocommit Signed-off-by: Mikhail Khludnev * one more nocommit test Signed-off-by: Mikhail Khludnev * forbidden api Signed-off-by: Mikhail Khludnev * no commit for out of line tests Signed-off-by: Mikhail Khludnev * Review Signed-off-by: Mikhail Khludnev --------- Signed-off-by: Mikhail Khludnev Signed-off-by: Mikhail Khludnev --- CHANGELOG.md | 1 + .../mapper/BytesRefsCollectionBuilder.java | 182 ++++++++++++++++++ .../index/mapper/IdFieldMapper.java | 8 +- .../index/mapper/KeywordFieldMapper.java | 23 +-- .../index/mapper/TermBasedFieldType.java | 9 +- .../BytesRefsCollectionBuilderTests.java | 82 ++++++++ .../index/mapper/KeywordFieldTypeTests.java | 83 ++++++++ 7 files changed, 367 insertions(+), 21 deletions(-) create mode 100644 server/src/main/java/org/opensearch/index/mapper/BytesRefsCollectionBuilder.java create mode 100644 server/src/test/java/org/opensearch/index/mapper/BytesRefsCollectionBuilderTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 967dd745ddd9d..d86de830248e7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add FilterFieldType for developers who want to wrap MappedFieldType ([#17627](https://github.com/opensearch-project/OpenSearch/pull/17627)) - [Rule Based Auto-tagging] Add in-memory rule processing service ([#17365](https://github.com/opensearch-project/OpenSearch/pull/17365)) - [Security Manager Replacement] Create initial Java Agent to intercept Socket::connect calls ([#17724](https://github.com/opensearch-project/OpenSearch/pull/17724)) +- Faster `terms_query` with already sorted terms ([#17714](https://github.com/opensearch-project/OpenSearch/pull/17714)) - Add ingestion management APIs for pause, resume and get ingestion state ([#17631](https://github.com/opensearch-project/OpenSearch/pull/17631)) - [Security Manager Replacement] Enhance Java Agent to intercept System::exit ([#17746](https://github.com/opensearch-project/OpenSearch/pull/17746)) - [Security Manager Replacement] Add a policy parser for Java agent security policies ([#17753](https://github.com/opensearch-project/OpenSearch/pull/17753)) diff --git a/server/src/main/java/org/opensearch/index/mapper/BytesRefsCollectionBuilder.java b/server/src/main/java/org/opensearch/index/mapper/BytesRefsCollectionBuilder.java new file mode 100644 index 0000000000000..2fbb01b2ced25 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/mapper/BytesRefsCollectionBuilder.java @@ -0,0 +1,182 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.search.TermInSetQuery; +import org.apache.lucene.util.BytesRef; + +import java.util.AbstractSet; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Comparator; +import java.util.Iterator; +import java.util.List; +import java.util.SortedSet; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.function.Supplier; + +/** + * Purposed for passing terms into {@link TermInSetQuery}. + * If the given terms are sorted already, it wrap it with a SortedSet stub. + * Otherwise, it passes terms as list. + */ +public class BytesRefsCollectionBuilder implements Consumer, Supplier> { + + /** + * Strategy for building BytesRef collection. + * */ + protected interface ConsumerStrategy extends Function, Supplier> {} + + public BytesRefsCollectionBuilder(int sizeExpected) { + terms = new ArrayList<>(sizeExpected); + } + + protected final List terms; + protected ConsumerStrategy delegate = createStartStrategy(); + + @Override + public void accept(BytesRef bytesRef) { + delegate = delegate.apply(bytesRef); + } + + @Override + public Collection get() { + Collection result = delegate.get(); + delegate = createFrozenStrategy(result); + return result; + } + + protected ConsumerStrategy createStartStrategy() { + return new ConsumerStrategy() { + @Override + public ConsumerStrategy apply(BytesRef firstBytes) { + terms.add(firstBytes); // firstly, just store + return createSortedStrategy(firstBytes); + } + + @Override + public Collection get() { + return terms; // empty list + } + }; + } + + protected ConsumerStrategy createSortedStrategy(BytesRef firstBytes) { + return new ConsumerStrategy() { + BytesRef prev = firstBytes; + + @Override + public ConsumerStrategy apply(BytesRef bytesRef) { + terms.add(bytesRef); + if (bytesRef.compareTo(prev) >= 0) { // keep checking sorted + prev = bytesRef; + return this; + } else { // isn't sorted + return createUnsortedStrategy(); + } + } + + @Override + public Collection get() { + return new SortedBytesSet(terms); + } + }; + } + + protected ConsumerStrategy createUnsortedStrategy() { + return new ConsumerStrategy() { + @Override + public ConsumerStrategy apply(BytesRef bytesRef) { // just storing + terms.add(bytesRef); + return this; + } + + @Override + public Collection get() { + return terms; + } + }; + } + + protected ConsumerStrategy createFrozenStrategy(Collection result) { + return new ConsumerStrategy() { + + @Override + public ConsumerStrategy apply(BytesRef bytesRef) { + throw new IllegalStateException("already build"); + } + + @Override + public Collection get() { + return result; + } + }; + } + + /** + * {@link SortedSet} for passing into TermInSetQuery() + * */ + protected static class SortedBytesSet extends AbstractSet implements SortedSet { + + private final List bytesRefs; + + public SortedBytesSet(List bytesRefs) { + this.bytesRefs = bytesRefs; + } + + @Override + public Iterator iterator() { + return bytesRefs.iterator(); + } + + @Override + public int size() { + return bytesRefs.size(); + } + + @Override + public Comparator comparator() { + return null; + } + + @Override + public SortedSet subSet(BytesRef fromElement, BytesRef toElement) { + throw new UnsupportedOperationException(); + } + + @Override + public SortedSet headSet(BytesRef toElement) { + throw new UnsupportedOperationException(); + } + + @Override + public SortedSet tailSet(BytesRef fromElement) { + throw new UnsupportedOperationException(); + } + + @Override + public BytesRef first() { + throw new UnsupportedOperationException(); + } + + @Override + public BytesRef last() { + throw new UnsupportedOperationException(); + } + + /** + * Dedicated for {@link TermInSetQuery#TermInSetQuery(String, Collection)}. + */ + @Override + public T[] toArray(T[] a) { + return bytesRefs.toArray(a); + } + } +} diff --git a/server/src/main/java/org/opensearch/index/mapper/IdFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/IdFieldMapper.java index a386e8b11eb38..786774fb95e07 100644 --- a/server/src/main/java/org/opensearch/index/mapper/IdFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/IdFieldMapper.java @@ -66,9 +66,7 @@ import org.opensearch.search.sort.SortOrder; import java.io.IOException; -import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.function.Supplier; @@ -166,15 +164,15 @@ public Query existsQuery(QueryShardContext context) { @Override public Query termsQuery(List values, QueryShardContext context) { failIfNotIndexed(); - Collection bytesRefs = new ArrayList<>(values.size()); + BytesRefsCollectionBuilder bytesRefs = new BytesRefsCollectionBuilder(values.size()); for (int i = 0; i < values.size(); i++) { Object idObject = values.get(i); if (idObject instanceof BytesRef) { idObject = ((BytesRef) idObject).utf8ToString(); } - bytesRefs.add(Uid.encodeId(idObject.toString())); + bytesRefs.accept(Uid.encodeId(idObject.toString())); } - return new TermInSetQuery(MultiTermQuery.CONSTANT_SCORE_BLENDED_REWRITE, name(), bytesRefs); + return new TermInSetQuery(MultiTermQuery.CONSTANT_SCORE_BLENDED_REWRITE, name(), bytesRefs.get()); } @Override diff --git a/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java index 02870347163f3..b4d205f38a7b9 100644 --- a/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java @@ -70,9 +70,7 @@ import java.io.IOException; import java.io.UncheckedIOException; -import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; @@ -449,23 +447,26 @@ public Query termsQuery(List values, QueryShardContext context) { if (!context.keywordFieldIndexOrDocValuesEnabled()) { return super.termsQuery(values, context); } - Collection iBytesRefs = new ArrayList<>(values.size()); - Collection dVByteRefs = new ArrayList<>(values.size()); + BytesRefsCollectionBuilder iBytesRefs = new BytesRefsCollectionBuilder(values.size()); + BytesRefsCollectionBuilder dVByteRefs = new BytesRefsCollectionBuilder(values.size()); for (int i = 0; i < values.size(); i++) { - iBytesRefs.add(indexedValueForSearch(values.get(i))); - dVByteRefs.add(indexedValueForSearch(rewriteForDocValue(values.get(i)))); + BytesRef idxBytes = indexedValueForSearch(values.get(i)); + iBytesRefs.accept(idxBytes); + BytesRef dvBytes = indexedValueForSearch(rewriteForDocValue(values.get(i))); + dVByteRefs.accept(dvBytes); } - Query indexQuery = new TermInSetQuery(MultiTermQuery.CONSTANT_SCORE_BLENDED_REWRITE, name(), iBytesRefs); - Query dvQuery = new TermInSetQuery(MultiTermQuery.DOC_VALUES_REWRITE, name(), dVByteRefs); + Query indexQuery = new TermInSetQuery(MultiTermQuery.CONSTANT_SCORE_BLENDED_REWRITE, name(), iBytesRefs.get()); + Query dvQuery = new TermInSetQuery(MultiTermQuery.DOC_VALUES_REWRITE, name(), dVByteRefs.get()); return new IndexOrDocValuesQuery(indexQuery, dvQuery); } // if we only have doc_values enabled, we construct a new query with doc_values re-written if (hasDocValues()) { - Collection bytesRefs = new ArrayList<>(values.size()); + BytesRefsCollectionBuilder bytesCollector = new BytesRefsCollectionBuilder(values.size()); for (int i = 0; i < values.size(); i++) { - bytesRefs.add(indexedValueForSearch(rewriteForDocValue(values.get(i)))); + BytesRef dvBytes = indexedValueForSearch(rewriteForDocValue(values.get(i))); + bytesCollector.accept(dvBytes); } - return new TermInSetQuery(MultiTermQuery.DOC_VALUES_REWRITE, name(), bytesRefs); + return new TermInSetQuery(MultiTermQuery.DOC_VALUES_REWRITE, name(), bytesCollector.get()); } // has index enabled, we're going to return the query as is return super.termsQuery(values, context); diff --git a/server/src/main/java/org/opensearch/index/mapper/TermBasedFieldType.java b/server/src/main/java/org/opensearch/index/mapper/TermBasedFieldType.java index da01c6490ea37..938f2ccbe770b 100644 --- a/server/src/main/java/org/opensearch/index/mapper/TermBasedFieldType.java +++ b/server/src/main/java/org/opensearch/index/mapper/TermBasedFieldType.java @@ -43,8 +43,6 @@ import org.opensearch.common.lucene.search.AutomatonQueries; import org.opensearch.index.query.QueryShardContext; -import java.util.ArrayList; -import java.util.Collection; import java.util.List; import java.util.Map; @@ -96,11 +94,12 @@ public Query termQuery(Object value, QueryShardContext context) { @Override public Query termsQuery(List values, QueryShardContext context) { failIfNotIndexed(); - Collection bytesRefs = new ArrayList<>(values.size()); + BytesRefsCollectionBuilder bytesCollector = new BytesRefsCollectionBuilder(values.size()); for (int i = 0; i < values.size(); i++) { - bytesRefs.add(indexedValueForSearch(values.get(i))); + BytesRef elem = indexedValueForSearch(values.get(i)); + bytesCollector.accept(elem); } - return new TermInSetQuery(MultiTermQuery.CONSTANT_SCORE_BLENDED_REWRITE, name(), bytesRefs); + return new TermInSetQuery(MultiTermQuery.CONSTANT_SCORE_BLENDED_REWRITE, name(), bytesCollector.get()); } } diff --git a/server/src/test/java/org/opensearch/index/mapper/BytesRefsCollectionBuilderTests.java b/server/src/test/java/org/opensearch/index/mapper/BytesRefsCollectionBuilderTests.java new file mode 100644 index 0000000000000..83c4877a1949e --- /dev/null +++ b/server/src/test/java/org/opensearch/index/mapper/BytesRefsCollectionBuilderTests.java @@ -0,0 +1,82 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.util.BytesRef; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.SortedSet; +import java.util.stream.Stream; + +public class BytesRefsCollectionBuilderTests extends OpenSearchTestCase { + + public void testBuildSortedNotSorted() { + String[] seedStrings = generateRandomStringArray(10, 10, false, true); + List bytesRefList = Arrays.stream(seedStrings).map(BytesRef::new).toList(); + List sortedBytesRefs = bytesRefList.stream().sorted().toList(); + + Collection sortedSet = assertCollectionBuilt(sortedBytesRefs); + assertCollectionBuilt(bytesRefList); + + assertTrue(sortedSet instanceof SortedSet); + assertNull(((SortedSet) sortedSet).comparator()); + } + + public void testBuildFooBar() { + String[] reverseOrderStrings = new String[] { "foo", "bar" }; + List bytesRefList = Arrays.stream(reverseOrderStrings).map(BytesRef::new).toList(); + List sortedBytesRefs = bytesRefList.stream().sorted().toList(); + + Collection sortedSet = assertCollectionBuilt(sortedBytesRefs); + Collection reverseList = assertCollectionBuilt(bytesRefList); + + assertTrue(sortedSet instanceof SortedSet); + assertNull(((SortedSet) sortedSet).comparator()); + + assertTrue(reverseList instanceof List); + } + + public void testFrozen() { + BytesRefsCollectionBuilder builder = new BytesRefsCollectionBuilder(1); + String[] seedStrings = generateRandomStringArray(5, 10, false, true); + Arrays.stream(seedStrings).map(BytesRef::new).forEachOrdered(builder); + Collection bytesRefCollection = builder.get(); + assertNotNull(bytesRefCollection); + assertEquals(seedStrings.length, bytesRefCollection.size()); + assertThrows(IllegalStateException.class, () -> builder.accept(new BytesRef("illegal state"))); + assertSame(bytesRefCollection, builder.get()); + } + + private static Collection assertCollectionBuilt(List sortedBytesRefs) { + BytesRefsCollectionBuilder builder = new BytesRefsCollectionBuilder(1); + sortedBytesRefs.stream().forEachOrdered(builder); + Collection bytesRefCollection = builder.get(); + assertEquals(bytesRefCollection.size(), sortedBytesRefs.size()); + for (Iterator iterator = bytesRefCollection.iterator(), iterator2 = sortedBytesRefs.iterator(); iterator.hasNext() + || iterator2.hasNext();) { + assertTrue(iterator.next().bytesEquals(iterator2.next())); + } + return bytesRefCollection; + } + + public void testCoverUnsupported() { + BytesRefsCollectionBuilder builder = new BytesRefsCollectionBuilder(1); + Stream.of("in", "order").map(BytesRef::new).forEachOrdered(builder); + SortedSet bytesRefCollection = (SortedSet) builder.get(); + assertThrows(UnsupportedOperationException.class, () -> bytesRefCollection.subSet(new BytesRef("a"), new BytesRef("z"))); + assertThrows(UnsupportedOperationException.class, () -> bytesRefCollection.headSet(new BytesRef("a"))); + assertThrows(UnsupportedOperationException.class, () -> bytesRefCollection.tailSet(new BytesRef("a"))); + assertThrows(UnsupportedOperationException.class, bytesRefCollection::first); + assertThrows(UnsupportedOperationException.class, bytesRefCollection::last); + } +} diff --git a/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java index 60137918e0cad..4074cb0497e6c 100644 --- a/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java @@ -33,6 +33,8 @@ import com.carrotsearch.randomizedtesting.generators.RandomStrings; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.LowerCaseFilter; import org.apache.lucene.analysis.TokenFilter; @@ -77,9 +79,18 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Random; +import java.util.SortedSet; +import java.util.stream.Stream; + +import org.mockito.MockedConstruction; +import org.mockito.stubbing.Answer; + +import static org.mockito.Mockito.mockConstructionWithAnswer; public class KeywordFieldTypeTests extends FieldTypeTestCase { @@ -197,6 +208,78 @@ public void testTermsQuery() { ); } + public void testTermsSortedQuery() { + String[] seedStrings = generateRandomStringArray(10, 10, false, true); + List bytesRefList = Arrays.stream(seedStrings).map(BytesRef::new).toList(); + List sortedStrings = bytesRefList.stream().sorted().map(BytesRef::utf8ToString).toList(); + + MappedFieldType ft = new KeywordFieldType("field"); + Query expected = new IndexOrDocValuesQuery( + new TermInSetQuery("field", bytesRefList), + new TermInSetQuery(MultiTermQuery.DOC_VALUES_REWRITE, "field", bytesRefList) + ); + assertEquals(expected, ft.termsQuery(sortedStrings, MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + + MappedFieldType onlyIndexed = new KeywordFieldType("field", true, false, Collections.emptyMap()); + Query expectedIndex = new TermInSetQuery("field", bytesRefList); + assertEquals(expectedIndex, onlyIndexed.termsQuery(sortedStrings, null)); + + MappedFieldType onlyDocValues = new KeywordFieldType("field", false, true, Collections.emptyMap()); + Query expectedDocValues = new TermInSetQuery(MultiTermQuery.DOC_VALUES_REWRITE, "field", bytesRefList); + assertEquals(expectedDocValues, onlyDocValues.termsQuery(sortedStrings, null)); + } + + @AwaitsFix(bugUrl = "no commit") + public void testMockTermsSortedQuery() { + String[] seedStrings = generateRandomStringArray(10, 10, false, false); + if (seedStrings.length == 1) { + seedStrings = Stream.concat(Arrays.stream(seedStrings), Arrays.stream(generateRandomStringArray(10, 10, false, false))) + .toArray(String[]::new); + } + List bytesRefList = Arrays.stream(seedStrings).map(BytesRef::new).toList(); + List sortedStrings = bytesRefList.stream().sorted().map(BytesRef::utf8ToString).toList(); + Answer asseretSortedSetArg = invocationOnMock -> { + Object[] args = invocationOnMock.getArguments(); + for (int i = 0; i < args.length; i++) { + if (args[i] instanceof Collection) { + assertTrue(args[i] instanceof SortedSet); + return invocationOnMock.callRealMethod(); + } + } + fail(); + return null; + }; + try (MockedConstruction ignored = mockConstructionWithAnswer(TermInSetQuery.class, asseretSortedSetArg)) { + MappedFieldType ft = new KeywordFieldType("field"); + assertNotNull(ft.termsQuery(sortedStrings, MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + MappedFieldType onlyIndexed = new KeywordFieldType("field", true, false, Collections.emptyMap()); + assertNotNull(onlyIndexed.termsQuery(sortedStrings, null)); + MappedFieldType onlyDocValues = new KeywordFieldType("field", false, true, Collections.emptyMap()); + assertNotNull(onlyDocValues.termsQuery(sortedStrings, null)); + } + } + + @AwaitsFix(bugUrl = "no commit") + public void testHeavyWeight() { + int arraySize = 10000000; + BytesRef[] array = new BytesRef[arraySize]; + Random random = random(); + for (int i = 0; i < arraySize; i++) { + String str = RandomStrings.randomAsciiOfLength(random, 10); + array[i] = new BytesRef(str); + } + BytesRefsCollectionBuilder outofOrder = new BytesRefsCollectionBuilder(arraySize); + BytesRefsCollectionBuilder inOrder = new BytesRefsCollectionBuilder(arraySize); + Arrays.stream(array).forEach(outofOrder); + Arrays.stream(array).sorted().forEachOrdered(inOrder); + Logger logger = LogManager.getLogger(KeywordFieldTypeTests.class); + long start = System.currentTimeMillis(), intermid; + new TermInSetQuery("foo", outofOrder.get()); + logger.info("out of order {} ms", (intermid = System.currentTimeMillis()) - start); + new TermInSetQuery("foo", inOrder.get()); + logger.info("in order{} ms", System.currentTimeMillis() - intermid); + } + public void testExistsQuery() { { KeywordFieldType ft = new KeywordFieldType("field"); From 9bbdd3cc2a7d620efc6981b5cf3fad6cdfd1acf0 Mon Sep 17 00:00:00 2001 From: Varun Bharadwaj Date: Thu, 10 Apr 2025 20:02:54 -0700 Subject: [PATCH 197/550] Use shard pointer tracked by writer for recovery (#17868) Signed-off-by: Varun Bharadwaj --- CHANGELOG.md | 1 + .../pollingingest/DefaultStreamPoller.java | 91 +++++++++++-------- .../MessageProcessorRunnable.java | 11 +++ .../index/engine/IngestionEngineTests.java | 2 +- .../DefaultStreamPollerTests.java | 74 ++++++++++++--- 5 files changed, 130 insertions(+), 49 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d86de830248e7..e6f9e29700b6e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -77,6 +77,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Fixed - Fix bytes parameter on `_cat/recovery` ([#17598](https://github.com/opensearch-project/OpenSearch/pull/17598)) - Fix slow performance of FeatureFlag checks ([#17611](https://github.com/opensearch-project/OpenSearch/pull/17611)) +- Fix shard recovery in pull-based ingestion to avoid skipping messages ([#17868](https://github.com/opensearch-project/OpenSearch/pull/17868))) ### Security diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java b/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java index 4b4a44e13d1df..42019c5bfcd55 100644 --- a/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java +++ b/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java @@ -50,7 +50,7 @@ public class DefaultStreamPoller implements StreamPoller { private ExecutorService processorThread; // start of the batch, inclusive - private IngestionShardPointer batchStartPointer; + private IngestionShardPointer initialBatchStartPointer; private boolean includeBatchStartPointer = false; private ResetState resetState; @@ -105,7 +105,7 @@ public DefaultStreamPoller( this.consumer = Objects.requireNonNull(consumer); this.resetState = resetState; this.resetValue = resetValue; - this.batchStartPointer = startPointer; + this.initialBatchStartPointer = startPointer; this.state = initialState; this.persistedPointers = persistedPointers; if (!this.persistedPointers.isEmpty()) { @@ -170,23 +170,23 @@ protected void startPoll() { if (resetState != ResetState.NONE) { switch (resetState) { case EARLIEST: - batchStartPointer = consumer.earliestPointer(); - logger.info("Resetting offset by seeking to earliest offset {}", batchStartPointer.asString()); + initialBatchStartPointer = consumer.earliestPointer(); + logger.info("Resetting offset by seeking to earliest offset {}", initialBatchStartPointer.asString()); break; case LATEST: - batchStartPointer = consumer.latestPointer(); - logger.info("Resetting offset by seeking to latest offset {}", batchStartPointer.asString()); + initialBatchStartPointer = consumer.latestPointer(); + logger.info("Resetting offset by seeking to latest offset {}", initialBatchStartPointer.asString()); break; case REWIND_BY_OFFSET: - batchStartPointer = consumer.pointerFromOffset(resetValue); - logger.info("Resetting offset by seeking to offset {}", batchStartPointer.asString()); + initialBatchStartPointer = consumer.pointerFromOffset(resetValue); + logger.info("Resetting offset by seeking to offset {}", initialBatchStartPointer.asString()); break; case REWIND_BY_TIMESTAMP: - batchStartPointer = consumer.pointerFromTimestampMillis(Long.parseLong(resetValue)); + initialBatchStartPointer = consumer.pointerFromTimestampMillis(Long.parseLong(resetValue)); logger.info( "Resetting offset by seeking to timestamp {}, corresponding offset {}", resetValue, - batchStartPointer.asString() + initialBatchStartPointer.asString() ); break; } @@ -209,7 +209,8 @@ protected void startPoll() { List> results; if (includeBatchStartPointer) { - results = consumer.readNext(batchStartPointer, true, MAX_POLL_SIZE, POLL_TIMEOUT); + results = consumer.readNext(initialBatchStartPointer, true, MAX_POLL_SIZE, POLL_TIMEOUT); + includeBatchStartPointer = false; } else { results = consumer.readNext(MAX_POLL_SIZE, POLL_TIMEOUT); } @@ -220,38 +221,47 @@ protected void startPoll() { } state = State.PROCESSING; - // process the records - boolean firstInBatch = true; - for (IngestionShardConsumer.ReadResult result : results) { - if (firstInBatch) { - // update the batch start pointer to the next batch - batchStartPointer = result.getPointer(); - firstInBatch = false; - } + processRecords(results); + } catch (Exception e) { + // Pause ingestion when an error is encountered while polling the streaming source. + // Currently we do not have a good way to skip past the failing messages. + // The user will have the option to manually update the offset and resume ingestion. + // todo: support retry? + logger.error("Pausing ingestion. Fatal error occurred in polling the shard {}: {}", consumer.getShardId(), e); + pause(); + } + } + } - // check if the message is already processed - if (isProcessed(result.getPointer())) { - logger.info("Skipping message with pointer {} as it is already processed", result.getPointer().asString()); - continue; - } - totalPolledCount.inc(); - blockingQueue.put(result); - - logger.debug( - "Put message {} with pointer {} to the blocking queue", - String.valueOf(result.getMessage().getPayload()), - result.getPointer().asString() - ); + private void processRecords(List> results) { + for (IngestionShardConsumer.ReadResult result : results) { + try { + // check if the message is already processed + if (isProcessed(result.getPointer())) { + logger.debug("Skipping message with pointer {} as it is already processed", () -> result.getPointer().asString()); + continue; } - // for future reads, we do not need to include the batch start pointer, and read from the last successful pointer. - includeBatchStartPointer = false; - } catch (Throwable e) { - logger.error("Error in polling the shard {}: {}", consumer.getShardId(), e); + totalPolledCount.inc(); + blockingQueue.put(result); + + logger.debug( + "Put message {} with pointer {} to the blocking queue", + String.valueOf(result.getMessage().getPayload()), + result.getPointer().asString() + ); + } catch (Exception e) { + logger.error( + "Error in processing a record. Shard {}, pointer {}: {}", + consumer.getShardId(), + result.getPointer().asString(), + e + ); errorStrategy.handleError(e, IngestionErrorStrategy.ErrorStage.POLLING); if (!errorStrategy.shouldIgnoreError(e, IngestionErrorStrategy.ErrorStage.POLLING)) { // Blocking error encountered. Pause poller to stop processing remaining updates. pause(); + break; } } } @@ -329,9 +339,16 @@ public boolean isClosed() { return closed; } + /** + * Returns the batch start pointer from where the poller can resume in case of shard recovery. The poller and + * processor are decoupled in this implementation, and hence the latest pointer tracked by the processor acts as the + * recovery/start point. In case the processor has not started tracking, then the initial batchStartPointer used by + * the poller acts as the start point. + */ @Override public IngestionShardPointer getBatchStartPointer() { - return batchStartPointer; + IngestionShardPointer currentShardPointer = processorRunnable.getCurrentShardPointer(); + return currentShardPointer == null ? initialBatchStartPointer : currentShardPointer; } @Override diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/MessageProcessorRunnable.java b/server/src/main/java/org/opensearch/indices/pollingingest/MessageProcessorRunnable.java index 28de7224f9d89..c1d098279a7eb 100644 --- a/server/src/main/java/org/opensearch/indices/pollingingest/MessageProcessorRunnable.java +++ b/server/src/main/java/org/opensearch/indices/pollingingest/MessageProcessorRunnable.java @@ -13,6 +13,7 @@ import org.apache.lucene.document.StoredField; import org.apache.lucene.index.Term; import org.opensearch.action.DocWriteRequest; +import org.opensearch.common.Nullable; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.common.metrics.CounterMetric; import org.opensearch.common.util.RequestUtils; @@ -59,6 +60,10 @@ public class MessageProcessorRunnable implements Runnable { private final MessageProcessor messageProcessor; private final CounterMetric stats = new CounterMetric(); + // tracks the most recent pointer that is being processed + @Nullable + private volatile IngestionShardPointer currentShardPointer; + /** * Constructor. * @@ -274,6 +279,7 @@ public void run() { if (readResult != null) { try { stats.inc(); + currentShardPointer = readResult.getPointer(); messageProcessor.process(readResult.getMessage(), readResult.getPointer()); readResult = null; } catch (Exception e) { @@ -308,4 +314,9 @@ public IngestionErrorStrategy getErrorStrategy() { public void setErrorStrategy(IngestionErrorStrategy errorStrategy) { this.errorStrategy = errorStrategy; } + + @Nullable + public IngestionShardPointer getCurrentShardPointer() { + return currentShardPointer; + } } diff --git a/server/src/test/java/org/opensearch/index/engine/IngestionEngineTests.java b/server/src/test/java/org/opensearch/index/engine/IngestionEngineTests.java index d8c5ebb16a36a..a510f92f9dd4c 100644 --- a/server/src/test/java/org/opensearch/index/engine/IngestionEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/IngestionEngineTests.java @@ -102,7 +102,7 @@ public void testCreateEngine() throws IOException { // verify the commit data Assert.assertEquals(7, commitData.size()); // the commiit data is the start of the current batch - Assert.assertEquals("0", commitData.get(StreamPoller.BATCH_START)); + Assert.assertEquals("1", commitData.get(StreamPoller.BATCH_START)); // verify the stored offsets var offset = new FakeIngestionSource.FakeIngestionShardPointer(0); diff --git a/server/src/test/java/org/opensearch/indices/pollingingest/DefaultStreamPollerTests.java b/server/src/test/java/org/opensearch/indices/pollingingest/DefaultStreamPollerTests.java index 6d71a3763fbc9..37ff7eeb27f4c 100644 --- a/server/src/test/java/org/opensearch/indices/pollingingest/DefaultStreamPollerTests.java +++ b/server/src/test/java/org/opensearch/indices/pollingingest/DefaultStreamPollerTests.java @@ -267,13 +267,14 @@ public void testDropErrorIngestionStrategy() throws TimeoutException, Interrupte ); IngestionShardConsumer mockConsumer = mock(IngestionShardConsumer.class); when(mockConsumer.getShardId()).thenReturn(0); - when(mockConsumer.readNext(any(), anyBoolean(), anyLong(), anyInt())).thenThrow(new RuntimeException("message1 poll failed")) - .thenReturn(readResultsBatch1) - .thenThrow(new RuntimeException("message3 poll failed")) - .thenReturn(readResultsBatch2) - .thenReturn(Collections.emptyList()); + when(mockConsumer.readNext(any(), anyBoolean(), anyLong(), anyInt())).thenReturn(readResultsBatch1); + when(mockConsumer.readNext(anyLong(), anyInt())).thenReturn(readResultsBatch2).thenReturn(Collections.emptyList()); IngestionErrorStrategy errorStrategy = spy(new DropIngestionErrorStrategy("ingestion_source")); + ArrayBlockingQueue mockQueue = mock(ArrayBlockingQueue.class); + doThrow(new RuntimeException()).doNothing().when(mockQueue).put(any()); + processorRunnable = new MessageProcessorRunnable(mockQueue, processor, errorStrategy); + poller = new DefaultStreamPoller( new FakeIngestionSource.FakeIngestionShardPointer(0), persistedPointers, @@ -288,7 +289,7 @@ public void testDropErrorIngestionStrategy() throws TimeoutException, Interrupte Thread.sleep(sleepTime); verify(errorStrategy, times(1)).handleError(any(), eq(IngestionErrorStrategy.ErrorStage.POLLING)); - verify(processor, times(2)).process(any(), any()); + verify(mockQueue, times(4)).put(any()); } public void testBlockErrorIngestionStrategy() throws TimeoutException, InterruptedException { @@ -314,12 +315,14 @@ public void testBlockErrorIngestionStrategy() throws TimeoutException, Interrupt ); IngestionShardConsumer mockConsumer = mock(IngestionShardConsumer.class); when(mockConsumer.getShardId()).thenReturn(0); - when(mockConsumer.readNext(any(), anyBoolean(), anyLong(), anyInt())).thenThrow(new RuntimeException("message1 poll failed")) - .thenReturn(readResultsBatch1) - .thenReturn(readResultsBatch2) - .thenReturn(Collections.emptyList()); + when(mockConsumer.readNext(any(), anyBoolean(), anyLong(), anyInt())).thenReturn(readResultsBatch1); + when(mockConsumer.readNext(anyLong(), anyInt())).thenReturn(readResultsBatch2).thenReturn(Collections.emptyList()); IngestionErrorStrategy errorStrategy = spy(new BlockIngestionErrorStrategy("ingestion_source")); + ArrayBlockingQueue mockQueue = mock(ArrayBlockingQueue.class); + doThrow(new RuntimeException()).doNothing().when(mockQueue).put(any()); + processorRunnable = new MessageProcessorRunnable(mockQueue, processor, errorStrategy); + poller = new DefaultStreamPoller( new FakeIngestionSource.FakeIngestionShardPointer(0), persistedPointers, @@ -334,7 +337,6 @@ public void testBlockErrorIngestionStrategy() throws TimeoutException, Interrupt Thread.sleep(sleepTime); verify(errorStrategy, times(1)).handleError(any(), eq(IngestionErrorStrategy.ErrorStage.POLLING)); - verify(processor, never()).process(any(), any()); assertEquals(DefaultStreamPoller.State.PAUSED, poller.getState()); assertTrue(poller.isPaused()); } @@ -374,4 +376,54 @@ public void testUpdateErrorStrategy() { assertTrue(poller.getErrorStrategy() instanceof BlockIngestionErrorStrategy); assertTrue(processorRunnable.getErrorStrategy() instanceof BlockIngestionErrorStrategy); } + + public void testPersistedBatchStartPointer() throws TimeoutException, InterruptedException { + messages.add("{\"_id\":\"3\",\"_source\":{\"name\":\"bob\", \"age\": 24}}".getBytes(StandardCharsets.UTF_8)); + messages.add("{\"_id\":\"4\",\"_source\":{\"name\":\"alice\", \"age\": 21}}".getBytes(StandardCharsets.UTF_8)); + List< + IngestionShardConsumer.ReadResult< + FakeIngestionSource.FakeIngestionShardPointer, + FakeIngestionSource.FakeIngestionMessage>> readResultsBatch1 = fakeConsumer.readNext( + fakeConsumer.earliestPointer(), + true, + 2, + 100 + ); + List< + IngestionShardConsumer.ReadResult< + FakeIngestionSource.FakeIngestionShardPointer, + FakeIngestionSource.FakeIngestionMessage>> readResultsBatch2 = fakeConsumer.readNext( + new FakeIngestionSource.FakeIngestionShardPointer(2), + true, + 2, + 100 + ); + + // This test publishes 4 messages, so use blocking queue of size 3. This ensures the poller is blocked when adding the 4th message + // for validation. + IngestionErrorStrategy errorStrategy = spy(new BlockIngestionErrorStrategy("ingestion_source")); + doThrow(new RuntimeException()).when(processor).process(any(), any()); + processorRunnable = new MessageProcessorRunnable(new ArrayBlockingQueue<>(3), processor, errorStrategy); + + IngestionShardConsumer mockConsumer = mock(IngestionShardConsumer.class); + when(mockConsumer.getShardId()).thenReturn(0); + when(mockConsumer.readNext(any(), anyBoolean(), anyLong(), anyInt())).thenReturn(readResultsBatch1); + + when(mockConsumer.readNext(anyLong(), anyInt())).thenReturn(readResultsBatch2).thenReturn(Collections.emptyList()); + + poller = new DefaultStreamPoller( + new FakeIngestionSource.FakeIngestionShardPointer(0), + persistedPointers, + mockConsumer, + processorRunnable, + StreamPoller.ResetState.NONE, + "", + errorStrategy, + StreamPoller.State.NONE + ); + poller.start(); + Thread.sleep(sleepTime); + + assertEquals(new FakeIngestionSource.FakeIngestionShardPointer(0), poller.getBatchStartPointer()); + } } From 3e10fe3f4eacbc950f09e7f2a285797b78df8ca1 Mon Sep 17 00:00:00 2001 From: Shailesh Singh Date: Fri, 11 Apr 2025 11:34:16 +0530 Subject: [PATCH 198/550] Add query changes to support unsigned-long in star tree (#17275) Signed-off-by: Shailesh Singh --- CHANGELOG.md | 1 + .../datacube/DimensionDataType.java | 8 +- .../node/FixedLengthStarTreeNode.java | 58 ++++++---- .../datacube/startree/node/StarTreeNode.java | 13 ++- .../startree/filter/ExactMatchDimFilter.java | 7 +- .../startree/filter/RangeMatchDimFilter.java | 10 +- .../provider/DimensionFilterMapper.java | 31 +++++- .../FixedLengthStarTreeNodeSearchTests.java | 52 ++++++--- .../startree/BigIntegerField.java | 100 ++++++++++++++++++ .../startree/MetricAggregatorTests.java | 30 +++++- 10 files changed, 263 insertions(+), 47 deletions(-) create mode 100644 server/src/test/java/org/opensearch/search/aggregations/startree/BigIntegerField.java diff --git a/CHANGELOG.md b/CHANGELOG.md index e6f9e29700b6e..402a030430edb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,6 +35,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add SearchService and Search GRPC endpoint ([#17830](https://github.com/opensearch-project/OpenSearch/pull/17830)) - Add update and delete support in pull-based ingestion ([#17822](https://github.com/opensearch-project/OpenSearch/pull/17822)) - Allow maxPollSize and pollTimeout in IngestionSource to be configurable ([#17863](https://github.com/opensearch-project/OpenSearch/pull/17863)) +- [Star Tree] [Search] Add query changes to support unsigned-long in star tree ([#17275](https://github.com/opensearch-project/OpenSearch/pull/17275)) ### Changed - Migrate BC libs to their FIPS counterparts ([#14912](https://github.com/opensearch-project/OpenSearch/pull/14912)) diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionDataType.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionDataType.java index 67138b69c69fa..bbc1e802f58ff 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionDataType.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionDataType.java @@ -12,6 +12,8 @@ /** * Represents the data type of the dimension value. + * TODO: This needs to be eventually merged with DimensionFilterMapper and all indexing related code + * which use this should instead use the mapper * * @opensearch.experimental */ @@ -19,7 +21,7 @@ public enum DimensionDataType { LONG { @Override - int compare(Long a, Long b) { + public int compare(Long a, Long b) { if (a == null && b == null) { return 0; } @@ -34,7 +36,7 @@ int compare(Long a, Long b) { }, UNSIGNED_LONG { @Override - int compare(Long a, Long b) { + public int compare(Long a, Long b) { if (a == null && b == null) { return 0; } @@ -48,5 +50,5 @@ int compare(Long a, Long b) { } }; - abstract int compare(Long a, Long b); + public abstract int compare(Long a, Long b); } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/node/FixedLengthStarTreeNode.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/node/FixedLengthStarTreeNode.java index c6c4993290c16..0151f8c7e21c0 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/node/FixedLengthStarTreeNode.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/node/FixedLengthStarTreeNode.java @@ -8,12 +8,15 @@ package org.opensearch.index.compositeindex.datacube.startree.fileformats.node; import org.apache.lucene.store.RandomAccessInput; +import org.opensearch.index.compositeindex.datacube.DimensionDataType; import org.opensearch.index.compositeindex.datacube.startree.node.StarTreeNode; import org.opensearch.index.compositeindex.datacube.startree.node.StarTreeNodeType; import org.opensearch.search.startree.StarTreeNodeCollector; +import org.opensearch.search.startree.filter.provider.DimensionFilterMapper; import java.io.IOException; import java.io.UncheckedIOException; +import java.util.Comparator; import java.util.Iterator; /** @@ -193,7 +196,11 @@ public StarTreeNode getChildStarNode() throws IOException { } @Override - public StarTreeNode getChildForDimensionValue(Long dimensionValue, StarTreeNode lastMatchedChild) throws IOException { + public StarTreeNode getChildForDimensionValue( + Long dimensionValue, + StarTreeNode lastMatchedChild, + DimensionFilterMapper dimensionFilterMapper + ) throws IOException { // there will be no children for leaf nodes if (isLeaf()) { return null; @@ -201,7 +208,11 @@ public StarTreeNode getChildForDimensionValue(Long dimensionValue, StarTreeNode StarTreeNode resultStarTreeNode = null; if (null != dimensionValue) { - resultStarTreeNode = binarySearchChild(dimensionValue, lastMatchedChild); + resultStarTreeNode = binarySearchChild( + dimensionValue, + lastMatchedChild, + dimensionFilterMapper == null ? DimensionDataType.LONG::compare : dimensionFilterMapper.comparator() + ); } return resultStarTreeNode; } @@ -238,11 +249,13 @@ private static FixedLengthStarTreeNode matchStarTreeNodeTypeOrNull(FixedLengthSt * Performs a binary search to find a child node with the given dimension value. * * @param dimensionValue The dimension value to search for + * @param lastMatchedNode : If not null, we begin the binary search from the node after this. + * @param comparator : Comparator (LONG or UNSIGNED_LONG) to compare the dimension values * @return The child node if found, null otherwise * @throws IOException If there's an error reading from the input */ - private FixedLengthStarTreeNode binarySearchChild(long dimensionValue, StarTreeNode lastMatchedNode) throws IOException { - + private FixedLengthStarTreeNode binarySearchChild(long dimensionValue, StarTreeNode lastMatchedNode, Comparator comparator) + throws IOException { int low = firstChildId; int high = getInt(LAST_CHILD_ID_OFFSET); @@ -268,10 +281,10 @@ private FixedLengthStarTreeNode binarySearchChild(long dimensionValue, StarTreeN int mid = low + (high - low) / 2; FixedLengthStarTreeNode midNode = new FixedLengthStarTreeNode(in, mid); long midDimensionValue = midNode.getDimensionValue(); - - if (midDimensionValue == dimensionValue) { + int compare = comparator.compare(midDimensionValue, dimensionValue); + if (compare == 0) { return midNode; - } else if (midDimensionValue < dimensionValue) { + } else if (compare < 0) { low = mid + 1; } else { high = mid - 1; @@ -281,16 +294,19 @@ private FixedLengthStarTreeNode binarySearchChild(long dimensionValue, StarTreeN } @Override - public void collectChildrenInRange(long low, long high, StarTreeNodeCollector collector) throws IOException { - if (low <= high) { - FixedLengthStarTreeNode lowStarTreeNode = binarySearchChild(low, true, null); + public void collectChildrenInRange(long low, long high, StarTreeNodeCollector collector, DimensionFilterMapper dimensionFilterMapper) + throws IOException { + Comparator comparator = dimensionFilterMapper.comparator(); + if (comparator.compare(low, high) <= 0) { + FixedLengthStarTreeNode lowStarTreeNode = binarySearchChild(low, true, null, comparator); if (lowStarTreeNode != null) { - FixedLengthStarTreeNode highStarTreeNode = binarySearchChild(high, false, lowStarTreeNode); + FixedLengthStarTreeNode highStarTreeNode = binarySearchChild(high, false, lowStarTreeNode, comparator); if (highStarTreeNode != null) { for (int lowNodeId = lowStarTreeNode.nodeId(); lowNodeId <= highStarTreeNode.nodeId(); ++lowNodeId) { collector.collectStarTreeNode(new FixedLengthStarTreeNode(in, lowNodeId)); } - } else if (lowStarTreeNode.getDimensionValue() <= high) { // Low StarTreeNode is the last default node for that dimension. + } else if (comparator.compare(lowStarTreeNode.getDimensionValue(), high) <= 0) { // Low StarTreeNode is the last default// + // node for that dimension. collector.collectStarTreeNode(lowStarTreeNode); } } @@ -302,11 +318,16 @@ public void collectChildrenInRange(long low, long high, StarTreeNodeCollector co * @param dimensionValue : The dimension to match. * @param matchNextHighest : If true then we try to return @dimensionValue or the next Highest. Else, we return @dimensionValue or the next Lowest. * @param lastMatchedNode : If not null, we begin the binary search from the node after this. + * @param comparator : Comparator (LONG or UNSIGNED_LONG) to compare the dimension values * @return : Matched node or null. * @throws IOException : */ - private FixedLengthStarTreeNode binarySearchChild(long dimensionValue, boolean matchNextHighest, StarTreeNode lastMatchedNode) - throws IOException { + private FixedLengthStarTreeNode binarySearchChild( + long dimensionValue, + boolean matchNextHighest, + StarTreeNode lastMatchedNode, + Comparator comparator + ) throws IOException { int low = firstChildId; int tempLow = low; @@ -342,17 +363,18 @@ private FixedLengthStarTreeNode binarySearchChild(long dimensionValue, boolean m FixedLengthStarTreeNode midNode = new FixedLengthStarTreeNode(in, mid); long midDimensionValue = midNode.getDimensionValue(); - if (midDimensionValue == dimensionValue) { + int compare = comparator.compare(midDimensionValue, dimensionValue); + if (compare == 0) { return midNode; } else { - if (midDimensionValue < dimensionValue) { // Going to the right from mid to search next + if (compare < 0) { // Going to the right from mid to search next tempLow = mid + 1; // We are going out of bounds for this dimension on the right side. if (tempLow > high || tempLow == nullNodeId) { return matchNextHighest ? null : midNode; } else { FixedLengthStarTreeNode nodeGreaterThanMid = new FixedLengthStarTreeNode(in, tempLow); - if (nodeGreaterThanMid.getDimensionValue() > dimensionValue) { + if (comparator.compare(nodeGreaterThanMid.getDimensionValue(), dimensionValue) > 0) { return matchNextHighest ? nodeGreaterThanMid : midNode; } } @@ -363,7 +385,7 @@ private FixedLengthStarTreeNode binarySearchChild(long dimensionValue, boolean m return matchNextHighest ? midNode : null; } else { FixedLengthStarTreeNode nodeLessThanMid = new FixedLengthStarTreeNode(in, tempHigh); - if (nodeLessThanMid.getDimensionValue() < dimensionValue) { + if (comparator.compare(nodeLessThanMid.getDimensionValue(), dimensionValue) < 0) { return matchNextHighest ? midNode : nodeLessThanMid; } } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/node/StarTreeNode.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/node/StarTreeNode.java index 40161a942ae4b..3dc5bf61883ba 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/node/StarTreeNode.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/node/StarTreeNode.java @@ -10,6 +10,7 @@ import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.search.startree.StarTreeNodeCollector; +import org.opensearch.search.startree.filter.provider.DimensionFilterMapper; import java.io.IOException; import java.util.Iterator; @@ -107,28 +108,34 @@ public interface StarTreeNode { * @param dimensionValue the dimension value * @return the child node for the given dimension value or null if child is not present * @throws IOException if an I/O error occurs while retrieving the child node + * + * TODO: Remove this method - Only used in UTs for Star Tree indexing */ default StarTreeNode getChildForDimensionValue(Long dimensionValue) throws IOException { - return getChildForDimensionValue(dimensionValue, null); + return getChildForDimensionValue(dimensionValue, null, null); } /** * Matches the given @dimensionValue amongst the child default nodes for this node. * @param dimensionValue : Value to match * @param lastMatchedChild : If not null, binary search will use this as the start/low + * @param dimensionFilterMapper : dimensionFilterMapper object * @return : Matched StarTreeNode or null if not found * @throws IOException : Any exception in reading the node data from index. */ - StarTreeNode getChildForDimensionValue(Long dimensionValue, StarTreeNode lastMatchedChild) throws IOException; + StarTreeNode getChildForDimensionValue(Long dimensionValue, StarTreeNode lastMatchedChild, DimensionFilterMapper dimensionFilterMapper) + throws IOException; /** * Collects all matching child nodes whose dimension values lie within the range of low and high, both inclusive. * @param low : Starting of the range ( inclusive ) * @param high : End of the range ( inclusive ) * @param collector : Collector to collect the matched child StarTreeNode's + * @param dimensionFilterMapper : dimensionFilterMapper object * @throws IOException : Any exception in reading the node data from index. */ - void collectChildrenInRange(long low, long high, StarTreeNodeCollector collector) throws IOException; + void collectChildrenInRange(long low, long high, StarTreeNodeCollector collector, DimensionFilterMapper dimensionFilterMapper) + throws IOException; /** * Returns the child star node for a node in the star-tree. diff --git a/server/src/main/java/org/opensearch/search/startree/filter/ExactMatchDimFilter.java b/server/src/main/java/org/opensearch/search/startree/filter/ExactMatchDimFilter.java index 28ea261ca1e56..0ea603f18495f 100644 --- a/server/src/main/java/org/opensearch/search/startree/filter/ExactMatchDimFilter.java +++ b/server/src/main/java/org/opensearch/search/startree/filter/ExactMatchDimFilter.java @@ -35,6 +35,8 @@ public class ExactMatchDimFilter implements DimensionFilter { // Order is essential for successive binary search private TreeSet convertedOrdinals; + private DimensionFilterMapper dimensionFilterMapper; + public ExactMatchDimFilter(String dimensionName, List valuesToMatch) { this.dimensionName = dimensionName; this.rawValues = valuesToMatch; @@ -47,9 +49,10 @@ public void initialiseForSegment(StarTreeValues starTreeValues, SearchContext se dimensionName, starTreeValues.getStarTreeField().getDimensionsOrder() ); - DimensionFilterMapper dimensionFilterMapper = DimensionFilterMapper.Factory.fromMappedFieldType( + this.dimensionFilterMapper = DimensionFilterMapper.Factory.fromMappedFieldType( searchContext.mapperService().fieldType(dimensionName) ); + for (Object rawValue : rawValues) { Optional ordinal = dimensionFilterMapper.getMatchingOrdinal( matchedDim.getField(), @@ -69,7 +72,7 @@ public void matchStarTreeNodes(StarTreeNode parentNode, StarTreeValues starTreeV if (parentNode != null) { StarTreeNode lastMatchedNode = null; for (long ordinal : convertedOrdinals) { - lastMatchedNode = parentNode.getChildForDimensionValue(ordinal, lastMatchedNode); + lastMatchedNode = parentNode.getChildForDimensionValue(ordinal, lastMatchedNode, dimensionFilterMapper); if (lastMatchedNode != null) { collector.collectStarTreeNode(lastMatchedNode); } diff --git a/server/src/main/java/org/opensearch/search/startree/filter/RangeMatchDimFilter.java b/server/src/main/java/org/opensearch/search/startree/filter/RangeMatchDimFilter.java index fecf1a9ebf76b..d41c7815ad7a7 100644 --- a/server/src/main/java/org/opensearch/search/startree/filter/RangeMatchDimFilter.java +++ b/server/src/main/java/org/opensearch/search/startree/filter/RangeMatchDimFilter.java @@ -37,6 +37,8 @@ public class RangeMatchDimFilter implements DimensionFilter { private boolean skipRangeCollection = false; + private DimensionFilterMapper dimensionFilterMapper; + public RangeMatchDimFilter(String dimensionName, Object low, Object high, boolean includeLow, boolean includeHigh) { this.dimensionName = dimensionName; this.low = low; @@ -48,9 +50,10 @@ public RangeMatchDimFilter(String dimensionName, Object low, Object high, boolea @Override public void initialiseForSegment(StarTreeValues starTreeValues, SearchContext searchContext) { skipRangeCollection = false; - DimensionFilterMapper dimensionFilterMapper = DimensionFilterMapper.Factory.fromMappedFieldType( + this.dimensionFilterMapper = DimensionFilterMapper.Factory.fromMappedFieldType( searchContext.mapperService().fieldType(dimensionName) ); + lowOrdinal = 0L; if (low != null) { MatchType lowMatchType = includeLow ? MatchType.GTE : MatchType.GT; @@ -77,13 +80,14 @@ public void initialiseForSegment(StarTreeValues starTreeValues, SearchContext se public void matchStarTreeNodes(StarTreeNode parentNode, StarTreeValues starTreeValues, StarTreeNodeCollector collector) throws IOException { if (parentNode != null && !skipRangeCollection) { - parentNode.collectChildrenInRange(lowOrdinal, highOrdinal, collector); + parentNode.collectChildrenInRange(lowOrdinal, highOrdinal, collector, dimensionFilterMapper); } } @Override public boolean matchDimValue(long ordinal, StarTreeValues starTreeValues) { - return lowOrdinal <= ordinal && ordinal <= highOrdinal; + return dimensionFilterMapper.comparator().compare(lowOrdinal, ordinal) <= 0 + && dimensionFilterMapper.comparator().compare(ordinal, highOrdinal) <= 0; } } diff --git a/server/src/main/java/org/opensearch/search/startree/filter/provider/DimensionFilterMapper.java b/server/src/main/java/org/opensearch/search/startree/filter/provider/DimensionFilterMapper.java index 8afdb00864b22..3b1713450e278 100644 --- a/server/src/main/java/org/opensearch/search/startree/filter/provider/DimensionFilterMapper.java +++ b/server/src/main/java/org/opensearch/search/startree/filter/provider/DimensionFilterMapper.java @@ -14,9 +14,11 @@ import org.apache.lucene.sandbox.document.HalfFloatPoint; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.NumericUtils; +import org.opensearch.common.Numbers; import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.lucene.BytesRefs; import org.opensearch.common.lucene.Lucene; +import org.opensearch.index.compositeindex.datacube.DimensionDataType; import org.opensearch.index.compositeindex.datacube.startree.index.StarTreeValues; import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedSetStarTreeValuesIterator; import org.opensearch.index.mapper.KeywordFieldMapper.KeywordFieldType; @@ -29,6 +31,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Comparator; import java.util.List; import java.util.Map; import java.util.Optional; @@ -40,6 +43,7 @@ import static org.opensearch.index.mapper.NumberFieldMapper.NumberType.INTEGER; import static org.opensearch.index.mapper.NumberFieldMapper.NumberType.LONG; import static org.opensearch.index.mapper.NumberFieldMapper.NumberType.SHORT; +import static org.opensearch.index.mapper.NumberFieldMapper.NumberType.UNSIGNED_LONG; import static org.opensearch.index.mapper.NumberFieldMapper.NumberType.hasDecimalPart; import static org.opensearch.index.mapper.NumberFieldMapper.NumberType.signum; @@ -88,6 +92,10 @@ Optional getMatchingOrdinal( DimensionFilter.MatchType matchType ); + default Comparator comparator() { + return DimensionDataType.LONG::compare; + } + /** * Singleton Factory for @{@link DimensionFilterMapper} */ @@ -109,7 +117,9 @@ class Factory { DOUBLE.typeName(), new DoubleFieldMapperNumeric(), org.opensearch.index.mapper.KeywordFieldMapper.CONTENT_TYPE, - new KeywordFieldMapper() + new KeywordFieldMapper(), + UNSIGNED_LONG.typeName(), + new UnsignedLongFieldMapperNumeric() ); public static DimensionFilterMapper fromMappedFieldType(MappedFieldType mappedFieldType) { @@ -208,6 +218,25 @@ Long defaultMaximum() { } } +class UnsignedLongFieldMapperNumeric extends NumericNonDecimalMapper { + + @Override + Long defaultMinimum() { + return Numbers.MIN_UNSIGNED_LONG_VALUE_AS_LONG; + } + + @Override + Long defaultMaximum() { + return Numbers.MAX_UNSIGNED_LONG_VALUE_AS_LONG; + } + + @Override + public Comparator comparator() { + return DimensionDataType.UNSIGNED_LONG::compare; + } + +} + abstract class NumericDecimalFieldMapper extends NumericMapper { @Override diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/node/FixedLengthStarTreeNodeSearchTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/node/FixedLengthStarTreeNodeSearchTests.java index 4d95034d80bb7..2e0817ddafcd6 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/node/FixedLengthStarTreeNodeSearchTests.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/node/FixedLengthStarTreeNodeSearchTests.java @@ -24,7 +24,9 @@ import org.opensearch.index.compositeindex.datacube.startree.fileformats.meta.StarTreeMetadata; import org.opensearch.index.compositeindex.datacube.startree.node.InMemoryTreeNode; import org.opensearch.index.compositeindex.datacube.startree.node.StarTreeFactory; +import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.search.aggregations.startree.ArrayBasedCollector; +import org.opensearch.search.startree.filter.provider.DimensionFilterMapper; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -43,6 +45,9 @@ public class FixedLengthStarTreeNodeSearchTests extends OpenSearchTestCase { public void testExactMatch() { long[] randomSorted = random().longs(100, Long.MIN_VALUE, Long.MAX_VALUE).toArray(); Arrays.sort(randomSorted); + DimensionFilterMapper dimensionFilterMapper = DimensionFilterMapper.Factory.fromMappedFieldType( + new NumberFieldMapper.NumberFieldType("fieldName", NumberFieldMapper.NumberType.LONG) + ); for (boolean createStarNode : new boolean[] { true, false }) { for (boolean createNullNode : new boolean[] { true, false }) { createStarTreeForDimension(new long[] { -1, 1, 2, 5 }, createStarNode, createNullNode, List.of(fixedLengthStarTreeNode -> { @@ -53,18 +58,30 @@ public void testExactMatch() { result &= -1 == lastMatchedNode.getDimensionValue(); // Leaf Node should return null result &= null == lastMatchedNode.getChildForDimensionValue(5L); - result &= null == lastMatchedNode.getChildForDimensionValue(5L, lastMatchedNode); + result &= null == lastMatchedNode.getChildForDimensionValue(5L, lastMatchedNode, dimensionFilterMapper); // Asserting Last Matched Node works as expected - lastMatchedNode = (FixedLengthStarTreeNode) fixedLengthStarTreeNode.getChildForDimensionValue(1L, lastMatchedNode); + lastMatchedNode = (FixedLengthStarTreeNode) fixedLengthStarTreeNode.getChildForDimensionValue( + 1L, + lastMatchedNode, + dimensionFilterMapper + ); result &= 1 == lastMatchedNode.getDimensionValue(); - lastMatchedNode = (FixedLengthStarTreeNode) fixedLengthStarTreeNode.getChildForDimensionValue(5L, lastMatchedNode); + lastMatchedNode = (FixedLengthStarTreeNode) fixedLengthStarTreeNode.getChildForDimensionValue( + 5L, + lastMatchedNode, + dimensionFilterMapper + ); result &= 5 == lastMatchedNode.getDimensionValue(); // Asserting null is returned when last matched node is after the value to search. - lastMatchedNode = (FixedLengthStarTreeNode) fixedLengthStarTreeNode.getChildForDimensionValue(2L, lastMatchedNode); + lastMatchedNode = (FixedLengthStarTreeNode) fixedLengthStarTreeNode.getChildForDimensionValue( + 2L, + lastMatchedNode, + dimensionFilterMapper + ); result &= null == lastMatchedNode; // When dimension value is null result &= null == fixedLengthStarTreeNode.getChildForDimensionValue(null); - result &= null == fixedLengthStarTreeNode.getChildForDimensionValue(null, null); + result &= null == fixedLengthStarTreeNode.getChildForDimensionValue(null, null, dimensionFilterMapper); // non-existing dimensionValue result &= null == fixedLengthStarTreeNode.getChildForDimensionValue(4L); result &= null == fixedLengthStarTreeNode.getChildForDimensionValue(randomLongBetween(6, Long.MAX_VALUE)); @@ -124,6 +141,9 @@ public void testExactMatch() { public void testRangeMatch() { long[] randomSorted = random().longs(100, Long.MIN_VALUE, Long.MAX_VALUE).toArray(); Arrays.sort(randomSorted); + DimensionFilterMapper dimensionFilterMapper = DimensionFilterMapper.Factory.fromMappedFieldType( + new NumberFieldMapper.NumberFieldType("fieldName", NumberFieldMapper.NumberType.LONG) + ); for (boolean createStarNode : new boolean[] { true, false }) { for (boolean createNullNode : new boolean[] { true, false }) { createStarTreeForDimension( @@ -136,43 +156,43 @@ public void testRangeMatch() { ArrayBasedCollector collector; // Whole range collector = new ArrayBasedCollector(); - fixedLengthStarTreeNode.collectChildrenInRange(-20, 26, collector); + fixedLengthStarTreeNode.collectChildrenInRange(-20, 26, collector, dimensionFilterMapper); result &= collector.matchAllCollectedValues(new long[] { -10, -1, 1, 2, 5, 9, 25 }); // Subset matched from left collector = new ArrayBasedCollector(); - fixedLengthStarTreeNode.collectChildrenInRange(-2, 1, collector); + fixedLengthStarTreeNode.collectChildrenInRange(-2, 1, collector, dimensionFilterMapper); result &= collector.matchAllCollectedValues(new long[] { -1, 1 }); // Subset matched from right collector = new ArrayBasedCollector(); - fixedLengthStarTreeNode.collectChildrenInRange(6, 100, collector); + fixedLengthStarTreeNode.collectChildrenInRange(6, 100, collector, dimensionFilterMapper); result &= collector.matchAllCollectedValues(new long[] { 9, 25 }); // No match on left collector = new ArrayBasedCollector(); - fixedLengthStarTreeNode.collectChildrenInRange(-30, -20, collector); + fixedLengthStarTreeNode.collectChildrenInRange(-30, -20, collector, dimensionFilterMapper); result &= collector.collectedNodeCount() == 0; // No match on right collector = new ArrayBasedCollector(); - fixedLengthStarTreeNode.collectChildrenInRange(30, 50, collector); + fixedLengthStarTreeNode.collectChildrenInRange(30, 50, collector, dimensionFilterMapper); result &= collector.collectedNodeCount() == 0; // Low > High collector = new ArrayBasedCollector(); - fixedLengthStarTreeNode.collectChildrenInRange(50, 10, collector); + fixedLengthStarTreeNode.collectChildrenInRange(50, 10, collector, dimensionFilterMapper); result &= collector.collectedNodeCount() == 0; // Match leftmost collector = new ArrayBasedCollector(); - fixedLengthStarTreeNode.collectChildrenInRange(-30, -10, collector); + fixedLengthStarTreeNode.collectChildrenInRange(-30, -10, collector, dimensionFilterMapper); result &= collector.matchAllCollectedValues(new long[] { -10 }); // Match rightmost collector = new ArrayBasedCollector(); - fixedLengthStarTreeNode.collectChildrenInRange(10, 25, collector); + fixedLengthStarTreeNode.collectChildrenInRange(10, 25, collector, dimensionFilterMapper); result &= collector.matchAllCollectedValues(new long[] { 25 }); // Match contains interval which has nothing collector = new ArrayBasedCollector(); - fixedLengthStarTreeNode.collectChildrenInRange(10, 24, collector); + fixedLengthStarTreeNode.collectChildrenInRange(10, 24, collector, dimensionFilterMapper); result &= collector.collectedNodeCount() == 0; // Match contains interval which has nothing collector = new ArrayBasedCollector(); - fixedLengthStarTreeNode.collectChildrenInRange(6, 24, collector); + fixedLengthStarTreeNode.collectChildrenInRange(6, 24, collector, dimensionFilterMapper); result &= collector.matchAllCollectedValues(new long[] { 9 }); return result; } catch (IOException e) { @@ -187,7 +207,7 @@ public void testRangeMatch() { try { ArrayBasedCollector collector = new ArrayBasedCollector(); long low = randomLong(), high = randomLong(); - fixedLengthStarTreeNode.collectChildrenInRange(low, high, collector); + fixedLengthStarTreeNode.collectChildrenInRange(low, high, collector, dimensionFilterMapper); if (low < high) { Long lowValue = treeSet.ceiling(low); if (lowValue != null) { diff --git a/server/src/test/java/org/opensearch/search/aggregations/startree/BigIntegerField.java b/server/src/test/java/org/opensearch/search/aggregations/startree/BigIntegerField.java new file mode 100644 index 0000000000000..d4733379579d6 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/aggregations/startree/BigIntegerField.java @@ -0,0 +1,100 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ +package org.opensearch.search.aggregations.startree; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.StoredValue; +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.NumericUtils; + +import java.math.BigInteger; + +public final class BigIntegerField extends Field { + + private static final FieldType FIELD_TYPE = new FieldType(); + private static final FieldType FIELD_TYPE_STORED; + public static final int BYTES = 16; + + static { + FIELD_TYPE.setDimensions(1, 16); + FIELD_TYPE.setDocValuesType(DocValuesType.SORTED_NUMERIC); + FIELD_TYPE.freeze(); + + FIELD_TYPE_STORED = new FieldType(FIELD_TYPE); + FIELD_TYPE_STORED.setStored(true); + FIELD_TYPE_STORED.freeze(); + } + + private final StoredValue storedValue; + + /** + * Creates a new BigIntegerField, indexing the provided point, storing it as a DocValue, and optionally + * storing it as a stored field. + * + * @param name field name + * @param value the BigInteger value + * @param stored whether to store the field + * @throws IllegalArgumentException if the field name or value is null. + */ + public BigIntegerField(String name, BigInteger value, Field.Store stored) { + super(name, stored == Field.Store.YES ? FIELD_TYPE_STORED : FIELD_TYPE); + fieldsData = value; + if (stored == Field.Store.YES) { + storedValue = new StoredValue(value.longValue()); + } else { + storedValue = null; + } + } + + @Override + public BytesRef binaryValue() { + return pack((BigInteger) fieldsData); + } + + @Override + public StoredValue storedValue() { + return storedValue; + } + + @Override + public void setLongValue(long value) { + super.setLongValue(value); + if (storedValue != null) { + storedValue.setLongValue(value); + } + } + + @Override + public String toString() { + return getClass().getSimpleName() + " <" + name + ':' + fieldsData + '>'; + } + + private static BytesRef pack(BigInteger... point) { + if (point == null) { + throw new IllegalArgumentException("point must not be null"); + } + if (point.length == 0) { + throw new IllegalArgumentException("point must not be 0 dimensions"); + } + byte[] packed = new byte[point.length * BYTES]; + + for (int dim = 0; dim < point.length; dim++) { + encodeDimension(point[dim], packed, dim * BYTES); + } + + return new BytesRef(packed); + } + + /** Encode single BigInteger dimension */ + public static void encodeDimension(BigInteger value, byte[] dest, int offset) { + NumericUtils.bigIntToSortableBytes(value, BYTES, dest, offset); + } + +} diff --git a/server/src/test/java/org/opensearch/search/aggregations/startree/MetricAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/startree/MetricAggregatorTests.java index 0f0db9907d381..4555382700f21 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/startree/MetricAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/startree/MetricAggregatorTests.java @@ -78,6 +78,7 @@ import org.junit.Before; import java.io.IOException; +import java.math.BigInteger; import java.util.ArrayList; import java.util.Collections; import java.util.LinkedHashMap; @@ -92,6 +93,7 @@ import java.util.stream.Stream; import static org.opensearch.common.util.FeatureFlags.STAR_TREE_INDEX; +import static org.opensearch.index.mapper.NumberFieldMapper.NumberType.objectToUnsignedLong; import static org.opensearch.search.aggregations.AggregationBuilders.avg; import static org.opensearch.search.aggregations.AggregationBuilders.count; import static org.opensearch.search.aggregations.AggregationBuilders.max; @@ -147,7 +149,14 @@ public void testStarTreeDocValues() throws IOException { new DimensionFieldData("long_field", () -> random().nextInt(50), DimensionTypes.LONG), new DimensionFieldData("half_float_field", () -> random().nextFloat(50), DimensionTypes.HALF_FLOAT), new DimensionFieldData("float_field", () -> random().nextFloat(50), DimensionTypes.FLOAT), - new DimensionFieldData("double_field", () -> random().nextDouble(50), DimensionTypes.DOUBLE) + new DimensionFieldData("double_field", () -> random().nextDouble(50), DimensionTypes.DOUBLE), + new DimensionFieldData("unsigned_long_field", () -> { + long queryValue = randomBoolean() + ? 9223372036854775807L - random().nextInt(100000) + : -9223372036854775808L + random().nextInt(100000); + + return objectToUnsignedLong(asUnsignedDecimalString(queryValue), false); + }, DimensionTypes.UNSIGNED_LONG) ); for (Supplier maxLeafDocsSupplier : MAX_LEAF_DOC_VARIATIONS) { testStarTreeDocValuesInternal( @@ -625,6 +634,17 @@ public MappedFieldType getMappedField(String fieldName) { public Dimension getDimension(String fieldName) { return new OrdinalDimension(fieldName); } + }), + UNSIGNED_LONG(new NumericDimensionFieldDataSupplier() { + @Override + NumberFieldMapper.NumberType numberType() { + return NumberFieldMapper.NumberType.UNSIGNED_LONG; + } + + @Override + public IndexableField getField(String fieldName, Supplier valueSupplier) { + return new BigIntegerField(fieldName, (BigInteger) valueSupplier.get(), Field.Store.YES); + } }); private final DimensionFieldDataSupplier dimensionFieldDataSupplier; @@ -639,4 +659,12 @@ public DimensionFieldDataSupplier getFieldDataSupplier() { } + private String asUnsignedDecimalString(long l) { + BigInteger b = BigInteger.valueOf(l); + if (b.signum() < 0) { + b = b.add(BigInteger.ONE.shiftLeft(64)); + } + return b.toString(); + } + } From 4d0ac04caa85567a8a5fb3c850e41a1349624587 Mon Sep 17 00:00:00 2001 From: Gulshan <71965388+kumargu@users.noreply.github.com> Date: Fri, 11 Apr 2025 16:53:44 +0530 Subject: [PATCH 199/550] [Java Agent Policy Parser] Skip evaluation of AttachPermission (#17896) Signed-off-by: Gulshan Kumar --- .../main/java/org/opensearch/secure_sm/policy/PolicyFile.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyFile.java b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyFile.java index eaae59f35c4aa..a259aa667356d 100644 --- a/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyFile.java +++ b/libs/agent-sm/agent-policy/src/main/java/org/opensearch/secure_sm/policy/PolicyFile.java @@ -49,7 +49,8 @@ public class PolicyFile extends java.security.Policy { "org.bouncycastle.crypto.CryptoServicesPermission", "org.opensearch.script.ClassPermission", "javax.security.auth.AuthPermission", - "javax.security.auth.kerberos.ServicePermission" + "javax.security.auth.kerberos.ServicePermission", + "com.sun.tools.attach.AttachPermission" ); private final PolicyInfo policyInfo; From a4bc4bfb7b372227c2b15fdee1583809113045c2 Mon Sep 17 00:00:00 2001 From: Karen X Date: Fri, 11 Apr 2025 11:05:20 -0400 Subject: [PATCH 200/550] [GRPC] Update GRPC APIs to use latest opensearch-protobufs:0.3.0 jar and various javadocs fixes (#17895) Signed-off-by: Karen Xu --- CHANGELOG.md | 1 + plugins/transport-grpc/build.gradle | 2 +- .../licenses/protobufs-0.2.0.jar.sha1 | 1 - .../licenses/protobufs-0.3.0.jar.sha1 | 1 + .../request/common/ObjectMapProtoUtils.java | 24 ++-- .../request/common/OpTypeProtoUtils.java | 44 +++++++ .../request/common/RefreshProtoUtils.java | 44 +++++++ .../bulk/ActiveShardCountProtoUtils.java | 31 ++--- .../bulk/BulkRequestParserProtoUtils.java | 45 ++----- .../document/bulk/BulkRequestProtoUtils.java | 35 ++---- .../query/AbstractQueryBuilderProtoUtils.java | 20 ++-- .../search/sort/SortOrderProtoUtils.java | 47 +------- .../response/common/FieldValueProtoUtils.java | 13 +- .../common/DocumentFieldProtoUtils.java | 12 +- .../common/VersionTypeProtoUtils.java | 41 +++++++ .../document/get/GetResultProtoUtils.java | 6 +- .../search/ProtoActionsProtoUtils.java | 1 - .../response/search/SearchHitProtoUtils.java | 15 ++- .../request/common/OpTypeProtoUtilsTests.java | 54 +++++++++ .../common/RefreshProtoUtilsTests.java | 64 ++++++++++ .../bulk/ActiveShardCountProtoUtilsTests.java | 67 ++--------- .../BulkRequestParserProtoUtilsTests.java | 5 +- .../bulk/BulkRequestProtoUtilsTests.java | 112 ++++++++++-------- .../sort/FieldSortBuilderProtoUtilsTests.java | 8 +- .../search/sort/SortOrderProtoUtilsTests.java | 21 ++-- .../common/DocumentFieldProtoUtilsTests.java | 8 +- .../common/VersionTypeProtoUtilsTests.java | 48 ++++++++ 27 files changed, 474 insertions(+), 296 deletions(-) delete mode 100644 plugins/transport-grpc/licenses/protobufs-0.2.0.jar.sha1 create mode 100644 plugins/transport-grpc/licenses/protobufs-0.3.0.jar.sha1 create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/common/OpTypeProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/common/RefreshProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/VersionTypeProtoUtils.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/common/OpTypeProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/common/RefreshProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/VersionTypeProtoUtilsTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 402a030430edb..57ba38c19aa0d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -66,6 +66,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.google.api.grpc:proto-google-iam-v1` from 1.33.0 to 1.49.1 ([#17811](https://github.com/opensearch-project/OpenSearch/pull/17811)) - Bump `com.azure:azure-core` from 1.54.1 to 1.55.3 ([#17810](https://github.com/opensearch-project/OpenSearch/pull/17810)) - Bump `org.apache.poi` version from 5.2.5 to 5.4.1 in /plugins/ingest-attachment ([#17887](https://github.com/opensearch-project/OpenSearch/pull/17887)) +- Bump `org.opensearch:protobufs` from 0.2.0 to 0.3.0 ([#17888](https://github.com/opensearch-project/OpenSearch/pull/17888)) ### Changed diff --git a/plugins/transport-grpc/build.gradle b/plugins/transport-grpc/build.gradle index 12cbf0ecf76cf..2e5db8116ee63 100644 --- a/plugins/transport-grpc/build.gradle +++ b/plugins/transport-grpc/build.gradle @@ -35,7 +35,7 @@ dependencies { implementation "io.grpc:grpc-stub:${versions.grpc}" implementation "io.grpc:grpc-util:${versions.grpc}" implementation "io.perfmark:perfmark-api:0.26.0" - implementation "org.opensearch:protobufs:0.2.0" + implementation "org.opensearch:protobufs:0.3.0" testImplementation project(':test:framework') } diff --git a/plugins/transport-grpc/licenses/protobufs-0.2.0.jar.sha1 b/plugins/transport-grpc/licenses/protobufs-0.2.0.jar.sha1 deleted file mode 100644 index 3fe6d0cdbabd4..0000000000000 --- a/plugins/transport-grpc/licenses/protobufs-0.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a29095657b4a0f9b59659d71e7e540e9b07fd044 \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/protobufs-0.3.0.jar.sha1 b/plugins/transport-grpc/licenses/protobufs-0.3.0.jar.sha1 new file mode 100644 index 0000000000000..319d6aa6545c2 --- /dev/null +++ b/plugins/transport-grpc/licenses/protobufs-0.3.0.jar.sha1 @@ -0,0 +1 @@ +5e22ed37e4535c9c9cfeb8993f5294ba1201795c \ No newline at end of file diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/common/ObjectMapProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/common/ObjectMapProtoUtils.java index 8efc057ec4cf1..be4089058f7a6 100644 --- a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/common/ObjectMapProtoUtils.java +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/common/ObjectMapProtoUtils.java @@ -16,7 +16,9 @@ import java.util.Map; /** - * Utility class for converting ObjectMap Protobuf type to a Java object. + * Utility class for converting ObjectMap Protocol Buffer types to standard Java objects. + * This class provides methods to transform Protocol Buffer representations of object maps + * into their corresponding Java Map, List, and primitive type equivalents. */ public class ObjectMapProtoUtils { @@ -25,11 +27,12 @@ private ObjectMapProtoUtils() { } /** - * Converts a ObjectMap to Java POJO representation. - * Similar to {@link XContentParser#map()} + * Converts a Protocol Buffer ObjectMap to a Java Map representation. + * Similar to {@link XContentParser#map()}, this method transforms the structured + * Protocol Buffer data into a standard Java Map with appropriate value types. * - * @param objectMap The generic protobuf objectMap to convert - * @return A Protobuf builder .google.protobuf.Struct representation + * @param objectMap The Protocol Buffer ObjectMap to convert + * @return A Java Map containing the key-value pairs from the Protocol Buffer ObjectMap */ public static Map fromProto(ObjectMap objectMap) { @@ -43,11 +46,14 @@ public static Map fromProto(ObjectMap objectMap) { } /** - * Converts a ObjectMap.Value to Java POJO representation. - * Similar to {@link XContentParser#map()} + * Converts a Protocol Buffer ObjectMap.Value to an appropriate Java object representation. + * This method handles various value types (numbers, strings, booleans, lists, nested maps) + * and converts them to their Java equivalents. * - * @param value The generic protobuf ObjectMap.Value to convert - * @return A Protobuf builder .google.protobuf.Struct representation + * @param value The Protocol Buffer ObjectMap.Value to convert + * @return A Java object representing the value (could be a primitive type, String, List, or Map) + * @throws UnsupportedOperationException if the value is null, which cannot be added to a Java map + * @throws IllegalArgumentException if the value type cannot be converted */ public static Object fromProto(ObjectMap.Value value) { if (value.hasNullValue()) { diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/common/OpTypeProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/common/OpTypeProtoUtils.java new file mode 100644 index 0000000000000..c47c0eafb18da --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/common/OpTypeProtoUtils.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.common; + +import org.opensearch.action.DocWriteRequest; +import org.opensearch.protobufs.OpType; + +/** + * Utility class for converting OpType Protocol Buffers to OpenSearch DocWriteRequest.OpType objects. + * This class handles the conversion of Protocol Buffer representations to their + * corresponding OpenSearch operation type enumerations. + */ +public class OpTypeProtoUtils { + + private OpTypeProtoUtils() { + // Utility class, no instances + } + + /** + * Converts a Protocol Buffer OpType to its corresponding OpenSearch DocWriteRequest.OpType. + * Similar to {@link DocWriteRequest.OpType}. + * + * @param opType The Protocol Buffer OpType to convert + * @return The corresponding OpenSearch DocWriteRequest.OpType + * @throws UnsupportedOperationException if the operation type is not supported + */ + public static DocWriteRequest.OpType fromProto(OpType opType) { + + switch (opType) { + case OP_TYPE_CREATE: + return DocWriteRequest.OpType.CREATE; + case OP_TYPE_INDEX: + return DocWriteRequest.OpType.INDEX; + default: + throw new UnsupportedOperationException("Invalid optype: " + opType); + } + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/common/RefreshProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/common/RefreshProtoUtils.java new file mode 100644 index 0000000000000..6d1aecdc317e9 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/common/RefreshProtoUtils.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.common; + +import org.opensearch.action.support.WriteRequest; + +/** + * Utility class for converting Refresh Protocol Buffers to OpenSearch WriteRequest.RefreshPolicy values. + * This class handles the conversion of Protocol Buffer refresh policy representations to their + * corresponding OpenSearch refresh policy string values. + */ +public class RefreshProtoUtils { + + private RefreshProtoUtils() { + // Utility class, no instances + } + + /** + * Converts a Protocol Buffer Refresh enum to its corresponding OpenSearch refresh policy string value. + * This method maps the gRPC protocol buffer refresh policy values to the internal + * OpenSearch WriteRequest.RefreshPolicy string values. + * + * @param refresh The Protocol Buffer Refresh enum to convert + * @return The corresponding OpenSearch refresh policy string value + */ + public static String getRefreshPolicy(org.opensearch.protobufs.Refresh refresh) { + switch (refresh) { + case REFRESH_TRUE: + return WriteRequest.RefreshPolicy.IMMEDIATE.getValue(); + case REFRESH_WAIT_FOR: + return WriteRequest.RefreshPolicy.WAIT_UNTIL.getValue(); + case REFRESH_FALSE: + case REFRESH_UNSPECIFIED: + default: + return WriteRequest.RefreshPolicy.NONE.getValue(); + } + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/ActiveShardCountProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/ActiveShardCountProtoUtils.java index df337be9f5e84..94e816cb38f45 100644 --- a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/ActiveShardCountProtoUtils.java +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/ActiveShardCountProtoUtils.java @@ -9,7 +9,6 @@ package org.opensearch.plugin.transport.grpc.proto.request.document.bulk; import org.opensearch.action.support.ActiveShardCount; -import org.opensearch.protobufs.BulkRequest; import org.opensearch.protobufs.WaitForActiveShards; /** @@ -23,7 +22,7 @@ public class ActiveShardCountProtoUtils { /** * Private constructor to prevent instantiation of utility class. */ - protected ActiveShardCountProtoUtils() { + private ActiveShardCountProtoUtils() { // Utility class, no instances } @@ -33,37 +32,25 @@ protected ActiveShardCountProtoUtils() { * the wait_for_active_shards parameter from the Protocol Buffer request and applies * the appropriate ActiveShardCount setting to the OpenSearch bulk request. * - * @param bulkRequest The OpenSearch bulk request to modify - * @param request The Protocol Buffer request containing the active shard count settings - * @return The modified OpenSearch bulk request with updated active shard count settings + * @param waitForActiveShards The protobuf object containing the active shard count + * @return The modified bulk request */ - public static org.opensearch.action.bulk.BulkRequest getActiveShardCount( - org.opensearch.action.bulk.BulkRequest bulkRequest, - BulkRequest request - ) { - if (!request.hasWaitForActiveShards()) { - return bulkRequest; - } - WaitForActiveShards waitForActiveShards = request.getWaitForActiveShards(); + public static ActiveShardCount parseProto(WaitForActiveShards waitForActiveShards) { + switch (waitForActiveShards.getWaitForActiveShardsCase()) { case WaitForActiveShards.WaitForActiveShardsCase.WAIT_FOR_ACTIVE_SHARD_OPTIONS: switch (waitForActiveShards.getWaitForActiveShardOptions()) { case WAIT_FOR_ACTIVE_SHARD_OPTIONS_UNSPECIFIED: throw new UnsupportedOperationException("No mapping for WAIT_FOR_ACTIVE_SHARD_OPTIONS_UNSPECIFIED"); case WAIT_FOR_ACTIVE_SHARD_OPTIONS_ALL: - bulkRequest.waitForActiveShards(ActiveShardCount.ALL); - break; + return ActiveShardCount.ALL; default: - bulkRequest.waitForActiveShards(ActiveShardCount.DEFAULT); - break; + return ActiveShardCount.DEFAULT; } - break; case WaitForActiveShards.WaitForActiveShardsCase.INT32_VALUE: - bulkRequest.waitForActiveShards(waitForActiveShards.getInt32Value()); - break; + return ActiveShardCount.from(waitForActiveShards.getInt32Value()); default: - throw new UnsupportedOperationException("No mapping for WAIT_FOR_ACTIVE_SHARD_OPTIONS_UNSPECIFIED"); + return ActiveShardCount.DEFAULT; } - return bulkRequest; } } diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/BulkRequestParserProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/BulkRequestParserProtoUtils.java index b78d7d305b868..a200763f68f42 100644 --- a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/BulkRequestParserProtoUtils.java +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/BulkRequestParserProtoUtils.java @@ -23,11 +23,13 @@ import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.plugin.transport.grpc.proto.request.common.FetchSourceContextProtoUtils; import org.opensearch.plugin.transport.grpc.proto.request.common.ScriptProtoUtils; +import org.opensearch.plugin.transport.grpc.proto.response.document.common.VersionTypeProtoUtils; import org.opensearch.protobufs.BulkRequest; import org.opensearch.protobufs.BulkRequestBody; import org.opensearch.protobufs.CreateOperation; import org.opensearch.protobufs.DeleteOperation; import org.opensearch.protobufs.IndexOperation; +import org.opensearch.protobufs.OpType; import org.opensearch.protobufs.UpdateOperation; import org.opensearch.script.Script; import org.opensearch.search.fetch.subphase.FetchSourceContext; @@ -110,7 +112,7 @@ public static DocWriteRequest[] getDocWriteRequests( String id = null; String routing = valueOrDefault(defaultRouting, request.getRouting()); FetchSourceContext fetchSourceContext = defaultFetchSourceContext; - IndexOperation.OpType opType = null; + OpType opType = null; long version = Versions.MATCH_ANY; VersionType versionType = VersionType.INTERNAL; long ifSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; @@ -226,17 +228,8 @@ public static IndexRequest buildCreateRequest( routing = createOperation.hasRouting() ? createOperation.getRouting() : routing; version = createOperation.hasVersion() ? createOperation.getVersion() : version; if (createOperation.hasVersionType()) { - switch (createOperation.getVersionType()) { - case VERSION_TYPE_EXTERNAL: - versionType = VersionType.EXTERNAL; - break; - case VERSION_TYPE_EXTERNAL_GTE: - versionType = VersionType.EXTERNAL_GTE; - break; - default: - versionType = VersionType.INTERNAL; - break; - } + versionType = VersionTypeProtoUtils.fromProto(createOperation.getVersionType()); + } pipeline = createOperation.hasPipeline() ? createOperation.getPipeline() : pipeline; ifSeqNo = createOperation.hasIfSeqNo() ? createOperation.getIfSeqNo() : ifSeqNo; @@ -276,7 +269,7 @@ public static IndexRequest buildCreateRequest( public static IndexRequest buildIndexRequest( IndexOperation indexOperation, byte[] document, - IndexOperation.OpType opType, + OpType opType, String index, String id, String routing, @@ -293,17 +286,7 @@ public static IndexRequest buildIndexRequest( routing = indexOperation.hasRouting() ? indexOperation.getRouting() : routing; version = indexOperation.hasVersion() ? indexOperation.getVersion() : version; if (indexOperation.hasVersionType()) { - switch (indexOperation.getVersionType()) { - case VERSION_TYPE_EXTERNAL: - versionType = VersionType.EXTERNAL; - break; - case VERSION_TYPE_EXTERNAL_GTE: - versionType = VersionType.EXTERNAL_GTE; - break; - default: - versionType = VersionType.INTERNAL; - break; - } + versionType = VersionTypeProtoUtils.fromProto(indexOperation.getVersionType()); } pipeline = indexOperation.hasPipeline() ? indexOperation.getPipeline() : pipeline; ifSeqNo = indexOperation.hasIfSeqNo() ? indexOperation.getIfSeqNo() : ifSeqNo; @@ -326,7 +309,7 @@ public static IndexRequest buildIndexRequest( .routing(routing) .version(version) .versionType(versionType) - .create(opType.equals(IndexOperation.OpType.OP_TYPE_CREATE)) + .create(opType.equals(OpType.OP_TYPE_CREATE)) .setPipeline(pipeline) .setIfSeqNo(ifSeqNo) .setIfPrimaryTerm(ifPrimaryTerm) @@ -487,17 +470,7 @@ public static DeleteRequest buildDeleteRequest( routing = deleteOperation.hasRouting() ? deleteOperation.getRouting() : routing; version = deleteOperation.hasVersion() ? deleteOperation.getVersion() : version; if (deleteOperation.hasVersionType()) { - switch (deleteOperation.getVersionType()) { - case VERSION_TYPE_EXTERNAL: - versionType = VersionType.EXTERNAL; - break; - case VERSION_TYPE_EXTERNAL_GTE: - versionType = VersionType.EXTERNAL_GTE; - break; - default: - versionType = VersionType.INTERNAL; - break; - } + versionType = VersionTypeProtoUtils.fromProto(deleteOperation.getVersionType()); } ifSeqNo = deleteOperation.hasIfSeqNo() ? deleteOperation.getIfSeqNo() : ifSeqNo; ifPrimaryTerm = deleteOperation.hasIfPrimaryTerm() ? deleteOperation.getIfPrimaryTerm() : ifPrimaryTerm; diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/BulkRequestProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/BulkRequestProtoUtils.java index d3e3824dde3dd..2cdfedd4d94ad 100644 --- a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/BulkRequestProtoUtils.java +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/BulkRequestProtoUtils.java @@ -9,8 +9,8 @@ package org.opensearch.plugin.transport.grpc.proto.request.document.bulk; import org.opensearch.action.bulk.BulkShardRequest; -import org.opensearch.action.support.WriteRequest; import org.opensearch.plugin.transport.grpc.proto.request.common.FetchSourceContextProtoUtils; +import org.opensearch.plugin.transport.grpc.proto.request.common.RefreshProtoUtils; import org.opensearch.protobufs.BulkRequest; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.document.RestBulkAction; @@ -27,13 +27,13 @@ public class BulkRequestProtoUtils { /** * Private constructor to prevent instantiation of utility class. */ - protected BulkRequestProtoUtils() { + private BulkRequestProtoUtils() { // Utility class, no instances } /** * Prepare the request for execution. - * Similar to {@link RestBulkAction#prepareRequest(RestRequest, NodeClient)} ()} + * Similar to {@link RestBulkAction#prepareRequest(RestRequest, NodeClient)} * Please ensure to keep both implementations consistent. * * @param request the request to execute @@ -47,8 +47,9 @@ public static org.opensearch.action.bulk.BulkRequest prepareRequest(BulkRequest FetchSourceContext defaultFetchSourceContext = FetchSourceContextProtoUtils.parseFromProtoRequest(request); String defaultPipeline = request.hasPipeline() ? request.getPipeline() : null; - bulkRequest = ActiveShardCountProtoUtils.getActiveShardCount(bulkRequest, request); - + if (request.hasWaitForActiveShards()) { + bulkRequest.waitForActiveShards(ActiveShardCountProtoUtils.parseProto(request.getWaitForActiveShards())); + } Boolean defaultRequireAlias = request.hasRequireAlias() ? request.getRequireAlias() : null; if (request.hasTimeout()) { @@ -57,7 +58,7 @@ public static org.opensearch.action.bulk.BulkRequest prepareRequest(BulkRequest bulkRequest.timeout(BulkShardRequest.DEFAULT_TIMEOUT); } - bulkRequest.setRefreshPolicy(getRefreshPolicy(request)); + bulkRequest.setRefreshPolicy(RefreshProtoUtils.getRefreshPolicy(request.getRefresh())); // Note: batch_size is deprecated in OS 3.x. Add batch_size parameter when backporting to OS 2.x /* @@ -80,26 +81,4 @@ public static org.opensearch.action.bulk.BulkRequest prepareRequest(BulkRequest return bulkRequest; } - - /** - * Extracts the refresh policy from the bulk request. - * - * @param request The bulk request containing the refresh policy - * @return The refresh policy as a string, or null if not specified - */ - public static String getRefreshPolicy(org.opensearch.protobufs.BulkRequest request) { - if (!request.hasRefresh()) { - return null; - } - switch (request.getRefresh()) { - case REFRESH_TRUE: - return WriteRequest.RefreshPolicy.IMMEDIATE.getValue(); - case REFRESH_WAIT_FOR: - return WriteRequest.RefreshPolicy.WAIT_UNTIL.getValue(); - case REFRESH_FALSE: - case REFRESH_UNSPECIFIED: - default: - return WriteRequest.RefreshPolicy.NONE.getValue(); - } - } } diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/AbstractQueryBuilderProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/AbstractQueryBuilderProtoUtils.java index 5e4d4ac778bdb..92c0985da2a21 100644 --- a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/AbstractQueryBuilderProtoUtils.java +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/AbstractQueryBuilderProtoUtils.java @@ -12,10 +12,10 @@ import org.opensearch.index.query.QueryBuilder; import org.opensearch.protobufs.QueryContainer; -import java.io.IOException; - /** - * Utility class for converting AbstractQueryBuilder Protocol Buffers to objects. + * Utility class for converting Protocol Buffer query representations to OpenSearch QueryBuilder objects. + * This class provides methods to parse different types of query containers and transform them + * into their corresponding OpenSearch QueryBuilder implementations for search operations. */ public class AbstractQueryBuilderProtoUtils { @@ -24,14 +24,16 @@ private AbstractQueryBuilderProtoUtils() { } /** - * Parse a query from its Protocol Buffer representation - * Similar to {@link AbstractQueryBuilder#parseInnerQueryBuilder(XContentParser)} + * Parse a query from its Protocol Buffer representation. + * Similar to {@link AbstractQueryBuilder#parseInnerQueryBuilder(XContentParser)}, this method + * determines the query type from the Protocol Buffer container and delegates to the appropriate + * specialized parser. * - * @param queryContainer The Protocol Buffer query container - * @return A QueryBuilder instance - * @throws IOException if there's an error during parsing + * @param queryContainer The Protocol Buffer query container that holds various query type options + * @return A QueryBuilder instance configured according to the input query parameters + * @throws UnsupportedOperationException if the query type is not supported */ - public static QueryBuilder parseInnerQueryBuilderProto(QueryContainer queryContainer) throws IOException { + public static QueryBuilder parseInnerQueryBuilderProto(QueryContainer queryContainer) throws UnsupportedOperationException { QueryBuilder result; if (queryContainer.hasMatchAll()) { diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/sort/SortOrderProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/sort/SortOrderProtoUtils.java index 6dc40dd5b700b..9a839acb1cf04 100644 --- a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/sort/SortOrderProtoUtils.java +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/sort/SortOrderProtoUtils.java @@ -7,9 +7,6 @@ */ package org.opensearch.plugin.transport.grpc.proto.request.search.sort; -import org.opensearch.protobufs.GeoDistanceSort; -import org.opensearch.protobufs.ScoreSort; -import org.opensearch.protobufs.ScriptSort; import org.opensearch.search.sort.SortOrder; /** @@ -32,49 +29,7 @@ private SortOrderProtoUtils() { * @return The corresponding OpenSearch SortOrder * @throws IllegalArgumentException if the sort order is unspecified or invalid */ - public static SortOrder fromProto(ScoreSort.SortOrder sortOrder) { - switch (sortOrder) { - case SORT_ORDER_ASC: - return SortOrder.ASC; - case SORT_ORDER_DESC: - return SortOrder.DESC; - case SORT_ORDER_UNSPECIFIED: - default: - throw new IllegalArgumentException("Must provide oneof sort combinations"); - } - } - - /** - * Converts a Protocol Buffer GeoDistanceSort.SortOrder to an OpenSearch SortOrder. - * Similar to {@link SortOrder#fromString(String)}, this method maps the Protocol Buffer - * sort order enum values to their corresponding OpenSearch SortOrder values. - * - * @param sortOrder The Protocol Buffer GeoDistanceSort.SortOrder to convert - * @return The corresponding OpenSearch SortOrder - * @throws IllegalArgumentException if the sort order is unspecified or invalid - */ - public static SortOrder fromProto(GeoDistanceSort.SortOrder sortOrder) { - switch (sortOrder) { - case SORT_ORDER_ASC: - return SortOrder.ASC; - case SORT_ORDER_DESC: - return SortOrder.DESC; - case SORT_ORDER_UNSPECIFIED: - default: - throw new IllegalArgumentException("Must provide oneof sort combinations"); - } - } - - /** - * Converts a Protocol Buffer ScriptSort.SortOrder to an OpenSearch SortOrder. - * Similar to {@link SortOrder#fromString(String)}, this method maps the Protocol Buffer - * sort order enum values to their corresponding OpenSearch SortOrder values. - * - * @param sortOrder The Protocol Buffer ScriptSort.SortOrder to convert - * @return The corresponding OpenSearch SortOrder - * @throws IllegalArgumentException if the sort order is unspecified or invalid - */ - public static SortOrder fromProto(ScriptSort.SortOrder sortOrder) { + public static SortOrder fromProto(org.opensearch.protobufs.SortOrder sortOrder) { switch (sortOrder) { case SORT_ORDER_ASC: return SortOrder.ASC; diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/common/FieldValueProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/common/FieldValueProtoUtils.java index 61524011af950..6eb9583f8c95b 100644 --- a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/common/FieldValueProtoUtils.java +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/common/FieldValueProtoUtils.java @@ -14,7 +14,9 @@ import java.util.Map; /** - * Utility class for converting generic Java objects to google.protobuf.Struct Protobuf type. + * Utility class for converting generic Java objects to Protocol Buffer FieldValue type. + * This class provides methods to transform Java objects of various types (primitives, strings, + * maps, etc.) into their corresponding Protocol Buffer representations for gRPC communication. */ public class FieldValueProtoUtils { @@ -23,10 +25,13 @@ private FieldValueProtoUtils() { } /** - * Converts a generic Java Object to its Protocol Buffer representation. + * Converts a generic Java Object to its Protocol Buffer FieldValue representation. + * This method handles various Java types (Integer, Long, Double, Float, String, Boolean, Enum, Map) + * and converts them to the appropriate FieldValue type. * - * @param javaObject The java object to convert - * @return A Protobuf builder .google.protobuf.Struct representation + * @param javaObject The Java object to convert + * @return A Protocol Buffer FieldValue representation of the Java object + * @throws IllegalArgumentException if the Java object type cannot be converted */ public static FieldValue toProto(Object javaObject) { FieldValue.Builder fieldValueBuilder = FieldValue.newBuilder(); diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/DocumentFieldProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/DocumentFieldProtoUtils.java index eedbd69a57057..5a8be5d19e77f 100644 --- a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/DocumentFieldProtoUtils.java +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/DocumentFieldProtoUtils.java @@ -7,11 +7,11 @@ */ package org.opensearch.plugin.transport.grpc.proto.response.document.common; -import com.google.protobuf.Value; import org.opensearch.common.document.DocumentField; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.plugin.transport.grpc.proto.response.common.StructProtoUtils; +import org.opensearch.plugin.transport.grpc.proto.response.common.ObjectMapProtoUtils; +import org.opensearch.protobufs.ObjectMap; import java.util.List; @@ -33,8 +33,8 @@ private DocumentFieldProtoUtils() { * @param fieldValues The list of DocumentField values to convert * @return A Protobuf Value representation */ - public static Value toProto(List fieldValues) { - return StructProtoUtils.toProto(fieldValues); + public static ObjectMap.Value toProto(List fieldValues) { + return ObjectMapProtoUtils.toProto(fieldValues); } /** @@ -44,8 +44,8 @@ public static Value toProto(List fieldValues) { * @param fieldValue The DocumentField value to convert * @return A Protobuf Value representation */ - public static Value toProto(Object fieldValue) { - return StructProtoUtils.toProto(fieldValue); + public static ObjectMap.Value toProto(Object fieldValue) { + return ObjectMapProtoUtils.toProto(fieldValue); } } diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/VersionTypeProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/VersionTypeProtoUtils.java new file mode 100644 index 0000000000000..2462094601f44 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/VersionTypeProtoUtils.java @@ -0,0 +1,41 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.response.document.common; + +import org.opensearch.index.VersionType; + +/** + * Utility class for converting VersionType Protocol Buffers to OpenSearch VersionType objects. + * This class handles the conversion of Protocol Buffer version type representations to their + * corresponding OpenSearch version type enumerations. + */ +public class VersionTypeProtoUtils { + + private VersionTypeProtoUtils() { + // Utility class, no instances + } + + /** + * Converts a Protocol Buffer VersionType to its corresponding OpenSearch VersionType. + * Similar to {@link VersionType#fromString(String)}. + * + * @param versionType The Protocol Buffer VersionType to convert + * @return The corresponding OpenSearch VersionType + */ + public static VersionType fromProto(org.opensearch.protobufs.VersionType versionType) { + switch (versionType) { + case VERSION_TYPE_EXTERNAL: + return VersionType.EXTERNAL; + case VERSION_TYPE_EXTERNAL_GTE: + return VersionType.EXTERNAL_GTE; + default: + return VersionType.INTERNAL; + } + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/get/GetResultProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/get/GetResultProtoUtils.java index 574179411d27c..abc071174532f 100644 --- a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/get/GetResultProtoUtils.java +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/document/get/GetResultProtoUtils.java @@ -8,7 +8,6 @@ package org.opensearch.plugin.transport.grpc.proto.response.document.get; import com.google.protobuf.ByteString; -import com.google.protobuf.Struct; import org.opensearch.common.document.DocumentField; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; @@ -17,6 +16,7 @@ import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.plugin.transport.grpc.proto.response.document.common.DocumentFieldProtoUtils; import org.opensearch.protobufs.InlineGetDictUserDefined; +import org.opensearch.protobufs.ObjectMap; import org.opensearch.protobufs.ResponseItem; /** @@ -74,7 +74,7 @@ public static InlineGetDictUserDefined.Builder toProtoEmbedded(GetResult getResu } // TODO test output once GetDocument GRPC endpoint is implemented - Struct.Builder metadataFieldsBuilder = Struct.newBuilder(); + ObjectMap.Builder metadataFieldsBuilder = ObjectMap.newBuilder(); for (DocumentField field : getResult.getMetadataFields().values()) { if (field.getName().equals(IgnoredFieldMapper.NAME)) { metadataFieldsBuilder.putFields(field.getName(), DocumentFieldProtoUtils.toProto(field.getValues())); @@ -93,7 +93,7 @@ public static InlineGetDictUserDefined.Builder toProtoEmbedded(GetResult getResu } // TODO test output once GetDocument GRPC endpoint is implemented - Struct.Builder documentFieldsBuilder = Struct.newBuilder(); + ObjectMap.Builder documentFieldsBuilder = ObjectMap.newBuilder(); if (!getResult.getDocumentFields().isEmpty()) { for (DocumentField field : getResult.getDocumentFields().values()) { documentFieldsBuilder.putFields(field.getName(), DocumentFieldProtoUtils.toProto(field.getValues())); diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/ProtoActionsProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/ProtoActionsProtoUtils.java index 7c07af2a4357c..a46ac3879990e 100644 --- a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/ProtoActionsProtoUtils.java +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/ProtoActionsProtoUtils.java @@ -49,6 +49,5 @@ protected static void buildBroadcastShardsHeader( searchResponseBodyProtoBuilder.setShards( ShardStatisticsProtoUtils.getShardStats(total, successful, skipped, failed, shardFailures) ); - } } diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/SearchHitProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/SearchHitProtoUtils.java index 2aa49201a28ad..cbfbffbac3cb8 100644 --- a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/SearchHitProtoUtils.java +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/response/search/SearchHitProtoUtils.java @@ -28,9 +28,9 @@ import java.util.Map; /** - * Utility class for converting SearchResponse objects to Protocol Buffers. - * This class handles the conversion of search operation responses to their - * Protocol Buffer representation. + * Utility class for converting SearchHit objects to Protocol Buffers. + * This class handles the conversion of search hit data to their + * Protocol Buffer representation for gRPC communication. */ public class SearchHitProtoUtils { @@ -154,6 +154,15 @@ protected static org.opensearch.protobufs.Hit toInnerProto(SearchHit hit) throws return hitBuilder.build(); } + /** + * Recursively builds a Protocol Buffer Explanation from a Lucene Explanation. + * This method converts the Lucene explanation structure, including nested details, + * into the corresponding Protocol Buffer representation. + * + * @param explanation The Lucene Explanation to convert + * @return A Protocol Buffer Explanation representation + * @throws IOException if there's an error during conversion + */ private static org.opensearch.protobufs.Explanation buildExplanation(org.apache.lucene.search.Explanation explanation) throws IOException { org.opensearch.protobufs.Explanation.Builder protoExplanationBuilder = org.opensearch.protobufs.Explanation.newBuilder(); diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/common/OpTypeProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/common/OpTypeProtoUtilsTests.java new file mode 100644 index 0000000000000..d899fd61c6602 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/common/OpTypeProtoUtilsTests.java @@ -0,0 +1,54 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.common; + +import org.opensearch.action.DocWriteRequest; +import org.opensearch.protobufs.OpType; +import org.opensearch.test.OpenSearchTestCase; + +public class OpTypeProtoUtilsTests extends OpenSearchTestCase { + + public void testFromProtoWithOpTypeCreate() { + // Test conversion from OpType.OP_TYPE_CREATE to DocWriteRequest.OpType.CREATE + DocWriteRequest.OpType result = OpTypeProtoUtils.fromProto(OpType.OP_TYPE_CREATE); + + // Verify the result + assertEquals("OP_TYPE_CREATE should convert to DocWriteRequest.OpType.CREATE", DocWriteRequest.OpType.CREATE, result); + } + + public void testFromProtoWithOpTypeIndex() { + // Test conversion from OpType.OP_TYPE_INDEX to DocWriteRequest.OpType.INDEX + DocWriteRequest.OpType result = OpTypeProtoUtils.fromProto(OpType.OP_TYPE_INDEX); + + // Verify the result + assertEquals("OP_TYPE_INDEX should convert to DocWriteRequest.OpType.INDEX", DocWriteRequest.OpType.INDEX, result); + } + + public void testFromProtoWithOpTypeUnspecified() { + // Test conversion from OpType.OP_TYPE_UNSPECIFIED, should throw UnsupportedOperationException + UnsupportedOperationException exception = expectThrows( + UnsupportedOperationException.class, + () -> OpTypeProtoUtils.fromProto(OpType.OP_TYPE_UNSPECIFIED) + ); + + // Verify the exception message + assertTrue("Exception message should mention 'Invalid optype'", exception.getMessage().contains("Invalid optype")); + } + + public void testFromProtoWithUnrecognizedOpType() { + // Test conversion with an unrecognized OpType, should throw UnsupportedOperationException + UnsupportedOperationException exception = expectThrows( + UnsupportedOperationException.class, + () -> OpTypeProtoUtils.fromProto(OpType.UNRECOGNIZED) + ); + + // Verify the exception message + assertTrue("Exception message should mention 'Invalid optype'", exception.getMessage().contains("Invalid optype")); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/common/RefreshProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/common/RefreshProtoUtilsTests.java new file mode 100644 index 0000000000000..fe00eb5d97f14 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/common/RefreshProtoUtilsTests.java @@ -0,0 +1,64 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.document.bulk; + +import org.opensearch.action.support.WriteRequest; +import org.opensearch.plugin.transport.grpc.proto.request.common.RefreshProtoUtils; +import org.opensearch.protobufs.BulkRequest; +import org.opensearch.protobufs.Refresh; +import org.opensearch.test.OpenSearchTestCase; + +public class RefreshProtoUtilsTests extends OpenSearchTestCase { + + public void testGetRefreshPolicyWithRefreshTrue() { + // Call getRefreshPolicy + String refreshPolicy = RefreshProtoUtils.getRefreshPolicy(Refresh.REFRESH_TRUE); + + // Verify the result + assertEquals("Should return IMMEDIATE refresh policy", WriteRequest.RefreshPolicy.IMMEDIATE.getValue(), refreshPolicy); + } + + public void testGetRefreshPolicyWithRefreshWaitFor() { + + // Call getRefreshPolicy + String refreshPolicy = RefreshProtoUtils.getRefreshPolicy(Refresh.REFRESH_WAIT_FOR); + + // Verify the result + assertEquals("Should return WAIT_UNTIL refresh policy", WriteRequest.RefreshPolicy.WAIT_UNTIL.getValue(), refreshPolicy); + } + + public void testGetRefreshPolicyWithRefreshFalse() { + // Call getRefreshPolicy + String refreshPolicy = RefreshProtoUtils.getRefreshPolicy(Refresh.REFRESH_FALSE); + + // Verify the result + assertEquals("Should return NONE refresh policy", WriteRequest.RefreshPolicy.NONE.getValue(), refreshPolicy); + } + + public void testGetRefreshPolicyWithRefreshUnspecified() { + // Call getRefreshPolicy + String refreshPolicy = RefreshProtoUtils.getRefreshPolicy(Refresh.REFRESH_UNSPECIFIED); + + // Verify the result + assertEquals("Should return NONE refresh policy", WriteRequest.RefreshPolicy.NONE.getValue(), refreshPolicy); + } + + public void testGetRefreshPolicyWithNoRefresh() { + // Create a protobuf BulkRequest with no refresh value + BulkRequest request = BulkRequest.newBuilder().build(); + + // Call getRefreshPolicy + String refreshPolicy = RefreshProtoUtils.getRefreshPolicy(request.getRefresh()); + + // Verify the result + assertEquals("Should default to REFRESH_UNSPECIFIED", Refresh.REFRESH_UNSPECIFIED, request.getRefresh()); + assertEquals("Should return NONE refresh policy", WriteRequest.RefreshPolicy.NONE.getValue(), refreshPolicy); + } + +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/ActiveShardCountProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/ActiveShardCountProtoUtilsTests.java index b0e5c9c28d40e..00baad33957a6 100644 --- a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/ActiveShardCountProtoUtilsTests.java +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/ActiveShardCountProtoUtilsTests.java @@ -8,115 +8,72 @@ package org.opensearch.plugin.transport.grpc.proto.request.document.bulk; -import org.opensearch.action.bulk.BulkRequest; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.protobufs.WaitForActiveShards; import org.opensearch.test.OpenSearchTestCase; -import static org.opensearch.plugin.transport.grpc.proto.request.document.bulk.ActiveShardCountProtoUtils.getActiveShardCount; - public class ActiveShardCountProtoUtilsTests extends OpenSearchTestCase { public void testGetActiveShardCountWithNoWaitForActiveShards() { - // Create a BulkRequest - BulkRequest bulkRequest = new BulkRequest(); - - // Create a protobuf BulkRequest with no wait_for_active_shards - org.opensearch.protobufs.BulkRequest protoRequest = org.opensearch.protobufs.BulkRequest.newBuilder().build(); - BulkRequest result = getActiveShardCount(bulkRequest, protoRequest); + ActiveShardCount result = ActiveShardCountProtoUtils.parseProto(WaitForActiveShards.newBuilder().build()); // Verify the result - assertSame("Should return the same BulkRequest instance", bulkRequest, result); - assertEquals("Should have default active shard count", ActiveShardCount.DEFAULT, result.waitForActiveShards()); + assertEquals("Should have default active shard count", ActiveShardCount.DEFAULT, result); } public void testGetActiveShardCountWithWaitForActiveShardsAll() { - // Create a BulkRequest - BulkRequest bulkRequest = new BulkRequest(); - // Create a protobuf BulkRequest with wait_for_active_shards = ALL (value 1) WaitForActiveShards waitForActiveShards = WaitForActiveShards.newBuilder() .setWaitForActiveShardOptionsValue(1) // WAIT_FOR_ACTIVE_SHARD_OPTIONS_ALL = 1 .build(); - org.opensearch.protobufs.BulkRequest protoRequest = org.opensearch.protobufs.BulkRequest.newBuilder() - .setWaitForActiveShards(waitForActiveShards) - .build(); - - BulkRequest result = getActiveShardCount(bulkRequest, protoRequest); + ActiveShardCount result = ActiveShardCountProtoUtils.parseProto(waitForActiveShards); // Verify the result - assertSame("Should return the same BulkRequest instance", bulkRequest, result); - assertEquals("Should have ALL active shard count", ActiveShardCount.ALL, result.waitForActiveShards()); + assertEquals("Should have ALL active shard count", ActiveShardCount.ALL, result); } public void testGetActiveShardCountWithWaitForActiveShardsDefault() { - // Create a BulkRequest - BulkRequest bulkRequest = new BulkRequest(); // Create a protobuf BulkRequest with wait_for_active_shards = DEFAULT (value 2) WaitForActiveShards waitForActiveShards = WaitForActiveShards.newBuilder() .setWaitForActiveShardOptionsValue(2) // WAIT_FOR_ACTIVE_SHARD_OPTIONS_DEFAULT = 2 .build(); - org.opensearch.protobufs.BulkRequest protoRequest = org.opensearch.protobufs.BulkRequest.newBuilder() - .setWaitForActiveShards(waitForActiveShards) - .build(); - - BulkRequest result = getActiveShardCount(bulkRequest, protoRequest); + ActiveShardCount result = ActiveShardCountProtoUtils.parseProto(waitForActiveShards); // Verify the result - assertSame("Should return the same BulkRequest instance", bulkRequest, result); - assertEquals("Should have DEFAULT active shard count", ActiveShardCount.DEFAULT, result.waitForActiveShards()); + assertEquals("Should have DEFAULT active shard count", ActiveShardCount.DEFAULT, result); } public void testGetActiveShardCountWithWaitForActiveShardsUnspecified() { - // Create a BulkRequest - BulkRequest bulkRequest = new BulkRequest(); - // Create a protobuf BulkRequest with wait_for_active_shards = UNSPECIFIED (value 0) WaitForActiveShards waitForActiveShards = WaitForActiveShards.newBuilder() .setWaitForActiveShardOptionsValue(0) // WAIT_FOR_ACTIVE_SHARD_OPTIONS_UNSPECIFIED = 0 .build(); - org.opensearch.protobufs.BulkRequest protoRequest = org.opensearch.protobufs.BulkRequest.newBuilder() - .setWaitForActiveShards(waitForActiveShards) - .build(); - - expectThrows(UnsupportedOperationException.class, () -> getActiveShardCount(bulkRequest, protoRequest)); + expectThrows(UnsupportedOperationException.class, () -> ActiveShardCountProtoUtils.parseProto(waitForActiveShards)); } public void testGetActiveShardCountWithWaitForActiveShardsInt32() { - // Create a BulkRequest - BulkRequest bulkRequest = new BulkRequest(); // Create a protobuf BulkRequest with wait_for_active_shards = 2 WaitForActiveShards waitForActiveShards = WaitForActiveShards.newBuilder().setInt32Value(2).build(); - org.opensearch.protobufs.BulkRequest protoRequest = org.opensearch.protobufs.BulkRequest.newBuilder() - .setWaitForActiveShards(waitForActiveShards) - .build(); - - BulkRequest result = getActiveShardCount(bulkRequest, protoRequest); + ActiveShardCount result = ActiveShardCountProtoUtils.parseProto(waitForActiveShards); // Verify the result - assertSame("Should return the same BulkRequest instance", bulkRequest, result); - assertEquals("Should have active shard count of 2", ActiveShardCount.from(2), result.waitForActiveShards()); + assertEquals("Should have active shard count of 2", ActiveShardCount.from(2), result); } public void testGetActiveShardCountWithWaitForActiveShardsNoCase() { - // Create a BulkRequest - BulkRequest bulkRequest = new BulkRequest(); - // Create a protobuf BulkRequest with wait_for_active_shards but no case set WaitForActiveShards waitForActiveShards = WaitForActiveShards.newBuilder().build(); - org.opensearch.protobufs.BulkRequest protoRequest = org.opensearch.protobufs.BulkRequest.newBuilder() - .setWaitForActiveShards(waitForActiveShards) - .build(); + ActiveShardCount result = ActiveShardCountProtoUtils.parseProto(waitForActiveShards); - // Call getActiveShardCount, should throw UnsupportedOperationException - expectThrows(UnsupportedOperationException.class, () -> getActiveShardCount(bulkRequest, protoRequest)); + // Verify the result + assertEquals("Should have DEFAULT active shard count", ActiveShardCount.DEFAULT, result); } } diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/BulkRequestParserProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/BulkRequestParserProtoUtilsTests.java index 655b845b89ef4..b4a6a4cd22724 100644 --- a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/BulkRequestParserProtoUtilsTests.java +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/BulkRequestParserProtoUtilsTests.java @@ -21,6 +21,7 @@ import org.opensearch.protobufs.CreateOperation; import org.opensearch.protobufs.DeleteOperation; import org.opensearch.protobufs.IndexOperation; +import org.opensearch.protobufs.OpType; import org.opensearch.protobufs.UpdateOperation; import org.opensearch.test.OpenSearchTestCase; @@ -128,7 +129,7 @@ public void testBuildIndexRequestWithOpType() { IndexOperation indexOperation = IndexOperation.newBuilder() .setIndex("test-index") .setId("test-id") - .setOpType(IndexOperation.OpType.OP_TYPE_CREATE) + .setOpType(OpType.OP_TYPE_CREATE) .build(); // Create document content @@ -138,7 +139,7 @@ public void testBuildIndexRequestWithOpType() { IndexRequest indexRequest = BulkRequestParserProtoUtils.buildIndexRequest( indexOperation, document, - IndexOperation.OpType.OP_TYPE_CREATE, + OpType.OP_TYPE_CREATE, "default-index", "default-id", "default-routing", diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/BulkRequestProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/BulkRequestProtoUtilsTests.java index 6312b6515ae59..e1b54ef743ace 100644 --- a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/BulkRequestProtoUtilsTests.java +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/document/bulk/BulkRequestProtoUtilsTests.java @@ -8,110 +8,122 @@ package org.opensearch.plugin.transport.grpc.proto.request.document.bulk; +import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.WriteRequest; import org.opensearch.protobufs.BulkRequest; +import org.opensearch.protobufs.Refresh; +import org.opensearch.protobufs.WaitForActiveShards; import org.opensearch.test.OpenSearchTestCase; import java.text.ParseException; public class BulkRequestProtoUtilsTests extends OpenSearchTestCase { - public void testGetRefreshPolicyWithRefreshTrue() { - // Create a protobuf BulkRequest with refresh=REFRESH_TRUE (value 1) - BulkRequest request = BulkRequest.newBuilder().setRefresh(BulkRequest.Refresh.REFRESH_TRUE).build(); + public void testPrepareRequestWithBasicSettings() { + // Create a protobuf BulkRequest with basic settings + BulkRequest request = BulkRequest.newBuilder() + .setIndex("test-index") + .setRouting("test-routing") + .setRefresh(Refresh.REFRESH_TRUE) + .setTimeout("30s") + .build(); - // Call getRefreshPolicy - String refreshPolicy = BulkRequestProtoUtils.getRefreshPolicy(request); + // Call prepareRequest + org.opensearch.action.bulk.BulkRequest bulkRequest = BulkRequestProtoUtils.prepareRequest(request); // Verify the result - assertEquals("Should return IMMEDIATE refresh policy", WriteRequest.RefreshPolicy.IMMEDIATE.getValue(), refreshPolicy); + assertNotNull("BulkRequest should not be null", bulkRequest); + assertEquals("Refresh policy should match", WriteRequest.RefreshPolicy.IMMEDIATE, bulkRequest.getRefreshPolicy()); + assertEquals("Timeout should match", "30s", bulkRequest.timeout().toString()); } - public void testGetRefreshPolicyWithRefreshWaitFor() { - // Create a protobuf BulkRequest with refresh=REFRESH_WAIT_FOR - BulkRequest request = BulkRequest.newBuilder().setRefresh(BulkRequest.Refresh.REFRESH_WAIT_FOR).build(); + public void testPrepareRequestWithDefaultValues() { + // Create a protobuf BulkRequest with no specific settings + BulkRequest request = BulkRequest.newBuilder().build(); - // Call getRefreshPolicy - String refreshPolicy = BulkRequestProtoUtils.getRefreshPolicy(request); + // Call prepareRequest + org.opensearch.action.bulk.BulkRequest bulkRequest = BulkRequestProtoUtils.prepareRequest(request); // Verify the result - assertEquals("Should return WAIT_UNTIL refresh policy", WriteRequest.RefreshPolicy.WAIT_UNTIL.getValue(), refreshPolicy); + assertNotNull("BulkRequest should not be null", bulkRequest); + assertEquals("Should have zero requests", 0, bulkRequest.numberOfActions()); + assertEquals("Refresh policy should be NONE", WriteRequest.RefreshPolicy.NONE, bulkRequest.getRefreshPolicy()); } - public void testGetRefreshPolicyWithRefreshFalse() { - // Create a protobuf BulkRequest with refresh=REFRESH_FALSE (value 3) - BulkRequest request = BulkRequest.newBuilder().setRefresh(BulkRequest.Refresh.REFRESH_FALSE).build(); + public void testPrepareRequestWithTimeout() throws ParseException { + // Create a protobuf BulkRequest with a timeout + BulkRequest request = BulkRequest.newBuilder().setTimeout("5s").build(); - // Call getRefreshPolicy - String refreshPolicy = BulkRequestProtoUtils.getRefreshPolicy(request); + // Call prepareRequest + org.opensearch.action.bulk.BulkRequest bulkRequest = BulkRequestProtoUtils.prepareRequest(request); // Verify the result - assertEquals("Should return NONE refresh policy", WriteRequest.RefreshPolicy.NONE.getValue(), refreshPolicy); + assertNotNull("BulkRequest should not be null", bulkRequest); + assertEquals("Timeout should match", "5s", bulkRequest.timeout().toString()); } - public void testGetRefreshPolicyWithRefreshUnspecified() { - // Create a protobuf BulkRequest with refresh=REFRESH_UNSPECIFIED (value 0) - BulkRequest request = BulkRequest.newBuilder().setRefresh(BulkRequest.Refresh.REFRESH_UNSPECIFIED).build(); + public void testPrepareRequestWithWaitForActiveShards() { + // Create a WaitForActiveShards with a specific count + WaitForActiveShards waitForActiveShards = WaitForActiveShards.newBuilder().setInt32Value(2).build(); - // Call getRefreshPolicy - String refreshPolicy = BulkRequestProtoUtils.getRefreshPolicy(request); + // Create a protobuf BulkRequest with wait_for_active_shards + BulkRequest request = BulkRequest.newBuilder().setWaitForActiveShards(waitForActiveShards).build(); + + // Call prepareRequest + org.opensearch.action.bulk.BulkRequest bulkRequest = BulkRequestProtoUtils.prepareRequest(request); // Verify the result - assertEquals("Should return NONE refresh policy", WriteRequest.RefreshPolicy.NONE.getValue(), refreshPolicy); + assertNotNull("BulkRequest should not be null", bulkRequest); + assertEquals("Wait for active shards should match", ActiveShardCount.from(2), bulkRequest.waitForActiveShards()); } - public void testGetRefreshPolicyWithNoRefresh() { - // Create a protobuf BulkRequest with no refresh value - BulkRequest request = BulkRequest.newBuilder().build(); + public void testPrepareRequestWithRequireAlias() { + // Create a protobuf BulkRequest with require_alias set to true + BulkRequest request = BulkRequest.newBuilder().setRequireAlias(true).build(); - // Call getRefreshPolicy - String refreshPolicy = BulkRequestProtoUtils.getRefreshPolicy(request); + // Call prepareRequest + org.opensearch.action.bulk.BulkRequest bulkRequest = BulkRequestProtoUtils.prepareRequest(request); // Verify the result - assertNull("Should return null refresh policy", refreshPolicy); + assertNotNull("BulkRequest should not be null", bulkRequest); + // Note: The BulkRequest doesn't expose a getter for requireAlias, so we can't directly verify it + // This test mainly ensures that setting requireAlias doesn't cause any exceptions } - public void testPrepareRequestWithBasicSettings() { - // Create a protobuf BulkRequest with basic settings - BulkRequest request = BulkRequest.newBuilder() - .setIndex("test-index") - .setRouting("test-routing") - .setRefresh(BulkRequest.Refresh.REFRESH_TRUE) - .setTimeout("30s") - .build(); + public void testPrepareRequestWithPipeline() { + // Create a protobuf BulkRequest with a pipeline + BulkRequest request = BulkRequest.newBuilder().setPipeline("test-pipeline").build(); // Call prepareRequest org.opensearch.action.bulk.BulkRequest bulkRequest = BulkRequestProtoUtils.prepareRequest(request); // Verify the result assertNotNull("BulkRequest should not be null", bulkRequest); - assertEquals("Refresh policy should match", WriteRequest.RefreshPolicy.IMMEDIATE, bulkRequest.getRefreshPolicy()); + // Note: The BulkRequest doesn't expose a getter for pipeline, so we can't directly verify it + // This test mainly ensures that setting pipeline doesn't cause any exceptions } - public void testPrepareRequestWithDefaultValues() { - // Create a protobuf BulkRequest with no specific settings - BulkRequest request = BulkRequest.newBuilder().build(); + public void testPrepareRequestWithRefreshWait() { + // Create a protobuf BulkRequest with refresh set to WAIT_FOR + BulkRequest request = BulkRequest.newBuilder().setRefresh(Refresh.REFRESH_WAIT_FOR).build(); // Call prepareRequest org.opensearch.action.bulk.BulkRequest bulkRequest = BulkRequestProtoUtils.prepareRequest(request); // Verify the result assertNotNull("BulkRequest should not be null", bulkRequest); - assertEquals("Should have zero requests", 0, bulkRequest.numberOfActions()); - assertEquals("Refresh policy should be null", WriteRequest.RefreshPolicy.NONE, bulkRequest.getRefreshPolicy()); + assertEquals("Refresh policy should be WAIT_FOR", WriteRequest.RefreshPolicy.WAIT_UNTIL, bulkRequest.getRefreshPolicy()); } - public void testPrepareRequestWithTimeout() throws ParseException { - // Create a protobuf BulkRequest with a timeout - BulkRequest request = BulkRequest.newBuilder().setTimeout("5s").build(); + public void testPrepareRequestWithRefreshFalse() { + // Create a protobuf BulkRequest with refresh set to FALSE + BulkRequest request = BulkRequest.newBuilder().setRefresh(Refresh.REFRESH_FALSE).build(); // Call prepareRequest org.opensearch.action.bulk.BulkRequest bulkRequest = BulkRequestProtoUtils.prepareRequest(request); // Verify the result assertNotNull("BulkRequest should not be null", bulkRequest); - // The timeout is set in the BulkRequest - assertEquals("Require alias should be true", "5s", bulkRequest.timeout().toString()); - + assertEquals("Refresh policy should be NONE", WriteRequest.RefreshPolicy.NONE, bulkRequest.getRefreshPolicy()); } } diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/sort/FieldSortBuilderProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/sort/FieldSortBuilderProtoUtilsTests.java index 8dbf593a0a762..42e15a38f5f97 100644 --- a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/sort/FieldSortBuilderProtoUtilsTests.java +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/sort/FieldSortBuilderProtoUtilsTests.java @@ -37,7 +37,7 @@ public void testFromProtoWithEmptyMap() { public void testFromProtoWithSingleField() { // Create a FieldWithOrderMap with a single field FieldWithOrderMap.Builder builder = FieldWithOrderMap.newBuilder(); - builder.putFieldWithOrderMap("field1", ScoreSort.newBuilder().setOrder(ScoreSort.SortOrder.SORT_ORDER_ASC).build()); + builder.putFieldWithOrderMap("field1", ScoreSort.newBuilder().setOrder(org.opensearch.protobufs.SortOrder.SORT_ORDER_ASC).build()); FieldWithOrderMap fieldWithOrderMap = builder.build(); // Create a list to populate @@ -57,8 +57,8 @@ public void testFromProtoWithSingleField() { public void testFromProtoWithMultipleFields() { // Create a FieldWithOrderMap with multiple fields FieldWithOrderMap.Builder builder = FieldWithOrderMap.newBuilder(); - builder.putFieldWithOrderMap("field1", ScoreSort.newBuilder().setOrder(ScoreSort.SortOrder.SORT_ORDER_ASC).build()); - builder.putFieldWithOrderMap("field2", ScoreSort.newBuilder().setOrder(ScoreSort.SortOrder.SORT_ORDER_DESC).build()); + builder.putFieldWithOrderMap("field1", ScoreSort.newBuilder().setOrder(org.opensearch.protobufs.SortOrder.SORT_ORDER_ASC).build()); + builder.putFieldWithOrderMap("field2", ScoreSort.newBuilder().setOrder(org.opensearch.protobufs.SortOrder.SORT_ORDER_DESC).build()); FieldWithOrderMap fieldWithOrderMap = builder.build(); // Create a list to populate @@ -94,7 +94,7 @@ public void testFromProtoWithMultipleFields() { public void testFromProtoWithScoreField() { // Create a FieldWithOrderMap with the special "score" field FieldWithOrderMap.Builder builder = FieldWithOrderMap.newBuilder(); - builder.putFieldWithOrderMap("score", ScoreSort.newBuilder().setOrder(ScoreSort.SortOrder.SORT_ORDER_DESC).build()); + builder.putFieldWithOrderMap("score", ScoreSort.newBuilder().setOrder(org.opensearch.protobufs.SortOrder.SORT_ORDER_DESC).build()); FieldWithOrderMap fieldWithOrderMap = builder.build(); // Create a list to populate diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/sort/SortOrderProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/sort/SortOrderProtoUtilsTests.java index 1f31780840057..f6842b402afb1 100644 --- a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/sort/SortOrderProtoUtilsTests.java +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/sort/SortOrderProtoUtilsTests.java @@ -7,9 +7,6 @@ */ package org.opensearch.plugin.transport.grpc.proto.request.search.sort; -import org.opensearch.protobufs.GeoDistanceSort; -import org.opensearch.protobufs.ScoreSort; -import org.opensearch.protobufs.ScriptSort; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.OpenSearchTestCase; @@ -17,13 +14,13 @@ public class SortOrderProtoUtilsTests extends OpenSearchTestCase { public void testFromProtoScoreSortAsc() { // Test ASC order - SortOrder sortOrder = SortOrderProtoUtils.fromProto(ScoreSort.SortOrder.SORT_ORDER_ASC); + SortOrder sortOrder = SortOrderProtoUtils.fromProto(org.opensearch.protobufs.SortOrder.SORT_ORDER_ASC); assertEquals("Sort order should be ASC", SortOrder.ASC, sortOrder); } public void testFromProtoScoreSortDesc() { // Test DESC order - SortOrder sortOrder = SortOrderProtoUtils.fromProto(ScoreSort.SortOrder.SORT_ORDER_DESC); + SortOrder sortOrder = SortOrderProtoUtils.fromProto(org.opensearch.protobufs.SortOrder.SORT_ORDER_DESC); assertEquals("Sort order should be DESC", SortOrder.DESC, sortOrder); } @@ -31,7 +28,7 @@ public void testFromProtoScoreSortUnspecified() { // Test UNSPECIFIED order (should throw exception) IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, - () -> SortOrderProtoUtils.fromProto(ScoreSort.SortOrder.SORT_ORDER_UNSPECIFIED) + () -> SortOrderProtoUtils.fromProto(org.opensearch.protobufs.SortOrder.SORT_ORDER_UNSPECIFIED) ); assertTrue( "Exception message should mention 'Must provide oneof sort combinations'", @@ -41,13 +38,13 @@ public void testFromProtoScoreSortUnspecified() { public void testFromProtoGeoDistanceSortAsc() { // Test ASC order - SortOrder sortOrder = SortOrderProtoUtils.fromProto(GeoDistanceSort.SortOrder.SORT_ORDER_ASC); + SortOrder sortOrder = SortOrderProtoUtils.fromProto(org.opensearch.protobufs.SortOrder.SORT_ORDER_ASC); assertEquals("Sort order should be ASC", SortOrder.ASC, sortOrder); } public void testFromProtoGeoDistanceSortDesc() { // Test DESC order - SortOrder sortOrder = SortOrderProtoUtils.fromProto(GeoDistanceSort.SortOrder.SORT_ORDER_DESC); + SortOrder sortOrder = SortOrderProtoUtils.fromProto(org.opensearch.protobufs.SortOrder.SORT_ORDER_DESC); assertEquals("Sort order should be DESC", SortOrder.DESC, sortOrder); } @@ -55,7 +52,7 @@ public void testFromProtoGeoDistanceSortUnspecified() { // Test UNSPECIFIED order (should throw exception) IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, - () -> SortOrderProtoUtils.fromProto(GeoDistanceSort.SortOrder.SORT_ORDER_UNSPECIFIED) + () -> SortOrderProtoUtils.fromProto(org.opensearch.protobufs.SortOrder.SORT_ORDER_UNSPECIFIED) ); assertTrue( "Exception message should mention 'Must provide oneof sort combinations'", @@ -65,13 +62,13 @@ public void testFromProtoGeoDistanceSortUnspecified() { public void testFromProtoScriptSortAsc() { // Test ASC order - SortOrder sortOrder = SortOrderProtoUtils.fromProto(ScriptSort.SortOrder.SORT_ORDER_ASC); + SortOrder sortOrder = SortOrderProtoUtils.fromProto(org.opensearch.protobufs.SortOrder.SORT_ORDER_ASC); assertEquals("Sort order should be ASC", SortOrder.ASC, sortOrder); } public void testFromProtoScriptSortDesc() { // Test DESC order - SortOrder sortOrder = SortOrderProtoUtils.fromProto(ScriptSort.SortOrder.SORT_ORDER_DESC); + SortOrder sortOrder = SortOrderProtoUtils.fromProto(org.opensearch.protobufs.SortOrder.SORT_ORDER_DESC); assertEquals("Sort order should be DESC", SortOrder.DESC, sortOrder); } @@ -79,7 +76,7 @@ public void testFromProtoScriptSortUnspecified() { // Test UNSPECIFIED order (should throw exception) IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, - () -> SortOrderProtoUtils.fromProto(ScriptSort.SortOrder.SORT_ORDER_UNSPECIFIED) + () -> SortOrderProtoUtils.fromProto(org.opensearch.protobufs.SortOrder.SORT_ORDER_UNSPECIFIED) ); assertTrue( "Exception message should mention 'Must provide oneof sort combinations'", diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/DocumentFieldProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/DocumentFieldProtoUtilsTests.java index f4ed9256b2f9a..48745de994548 100644 --- a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/DocumentFieldProtoUtilsTests.java +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/DocumentFieldProtoUtilsTests.java @@ -8,7 +8,7 @@ package org.opensearch.plugin.transport.grpc.proto.response.document.common; -import com.google.protobuf.Value; +import org.opensearch.protobufs.ObjectMap; import org.opensearch.test.OpenSearchTestCase; import java.util.Arrays; @@ -22,7 +22,7 @@ public void testToProtoWithEmptyList() { List fieldValues = Collections.emptyList(); // Convert to Protocol Buffer - Value value = DocumentFieldProtoUtils.toProto(fieldValues); + ObjectMap.Value value = DocumentFieldProtoUtils.toProto(fieldValues); // Verify the conversion assertNotNull("Value should not be null", value); @@ -33,7 +33,7 @@ public void testToProtoWithSimpleValues() { List fieldValues = Arrays.asList("value1", "value2"); // Convert to Protocol Buffer - Value value = DocumentFieldProtoUtils.toProto(fieldValues); + ObjectMap.Value value = DocumentFieldProtoUtils.toProto(fieldValues); // Verify the conversion assertNotNull("Value should not be null", value); @@ -44,7 +44,7 @@ public void testToProtoWithSimpleValues() { public void testToProtoWithNullList() { // Convert null to Protocol Buffer - Value value = DocumentFieldProtoUtils.toProto(null); + ObjectMap.Value value = DocumentFieldProtoUtils.toProto(null); // Verify the conversion assertNotNull("Value should not be null", value); diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/VersionTypeProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/VersionTypeProtoUtilsTests.java new file mode 100644 index 0000000000000..15327c16502e1 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/response/document/common/VersionTypeProtoUtilsTests.java @@ -0,0 +1,48 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.response.document.common; + +import org.opensearch.index.VersionType; +import org.opensearch.test.OpenSearchTestCase; + +public class VersionTypeProtoUtilsTests extends OpenSearchTestCase { + + public void testFromProtoWithVersionTypeExternal() { + // Test conversion from VersionType.VERSION_TYPE_EXTERNAL to VersionType.EXTERNAL + VersionType result = VersionTypeProtoUtils.fromProto(org.opensearch.protobufs.VersionType.VERSION_TYPE_EXTERNAL); + + // Verify the result + assertEquals("VERSION_TYPE_EXTERNAL should convert to VersionType.EXTERNAL", VersionType.EXTERNAL, result); + } + + public void testFromProtoWithVersionTypeExternalGte() { + // Test conversion from VersionType.VERSION_TYPE_EXTERNAL_GTE to VersionType.EXTERNAL_GTE + VersionType result = VersionTypeProtoUtils.fromProto(org.opensearch.protobufs.VersionType.VERSION_TYPE_EXTERNAL_GTE); + + // Verify the result + assertEquals("VERSION_TYPE_EXTERNAL_GTE should convert to VersionType.EXTERNAL_GTE", VersionType.EXTERNAL_GTE, result); + } + + public void testFromProtoWithDefaultCase() { + // Test conversion with a default case (should return INTERNAL) + // Using UNSPECIFIED which will hit the default case + VersionType result = VersionTypeProtoUtils.fromProto(org.opensearch.protobufs.VersionType.VERSION_TYPE_UNSPECIFIED); + + // Verify the result + assertEquals("Default case should convert to VersionType.INTERNAL", VersionType.INTERNAL, result); + } + + public void testFromProtoWithUnrecognizedVersionType() { + // Test conversion with an unrecognized VersionType + VersionType result = VersionTypeProtoUtils.fromProto(org.opensearch.protobufs.VersionType.UNRECOGNIZED); + + // Verify the result (should default to INTERNAL) + assertEquals("UNRECOGNIZED should default to VersionType.INTERNAL", VersionType.INTERNAL, result); + } +} From df3e64537b16170be2eca25cca26402838eb5d16 Mon Sep 17 00:00:00 2001 From: Kaushal Kumar Date: Fri, 11 Apr 2025 10:47:56 -0700 Subject: [PATCH 201/550] [WLM] Rename queryGroup to workloadGroup (#17901) * rename queryGroup to workloadGroup Signed-off-by: Kaushal Kumar * fix propagated headers Signed-off-by: Kaushal Kumar * add changelog entry Signed-off-by: Kaushal Kumar * rename queryGroup to workloadGroup Signed-off-by: Kaushal Kumar * resolve comments Signed-off-by: Kaushal Kumar --------- Signed-off-by: Kaushal Kumar --- CHANGELOG.md | 1 + .../rest/WorkloadManagementRestIT.java | 56 +- .../plugin/wlm/WorkloadManagementPlugin.java | 44 +- .../wlm/WorkloadManagementPluginModule.java | 6 +- ...on.java => CreateWorkloadGroupAction.java} | 16 +- ...t.java => CreateWorkloadGroupRequest.java} | 38 +- ....java => CreateWorkloadGroupResponse.java} | 32 +- ...on.java => DeleteWorkloadGroupAction.java} | 14 +- ...t.java => DeleteWorkloadGroupRequest.java} | 16 +- ...ction.java => GetWorkloadGroupAction.java} | 16 +- ...uest.java => GetWorkloadGroupRequest.java} | 18 +- ...nse.java => GetWorkloadGroupResponse.java} | 34 +- ...> TransportCreateWorkloadGroupAction.java} | 39 +- ...> TransportDeleteWorkloadGroupAction.java} | 30 +- ...a => TransportGetWorkloadGroupAction.java} | 39 +- ...> TransportUpdateWorkloadGroupAction.java} | 39 +- .../wlm/action/UpdateQueryGroupRequest.java | 83 --- ...on.java => UpdateWorkloadGroupAction.java} | 16 +- .../action/UpdateWorkloadGroupRequest.java | 83 +++ ....java => UpdateWorkloadGroupResponse.java} | 32 +- ...ava => RestCreateWorkloadGroupAction.java} | 30 +- ...ava => RestDeleteWorkloadGroupAction.java} | 30 +- ...n.java => RestGetWorkloadGroupAction.java} | 26 +- ...ava => RestUpdateWorkloadGroupAction.java} | 30 +- ...a => WorkloadGroupPersistenceService.java} | 219 ++++--- ...Utils.java => WorkloadGroupTestUtils.java} | 74 +-- ...a => CreateWorkloadGroupRequestTests.java} | 24 +- ... => CreateWorkloadGroupResponseTests.java} | 30 +- ...a => DeleteWorkloadGroupRequestTests.java} | 16 +- ...java => GetWorkloadGroupRequestTests.java} | 22 +- ...ava => GetWorkloadGroupResponseTests.java} | 88 +-- .../wlm/action/QueryGroupActionTestUtils.java | 17 - ...nsportDeleteWorkloadGroupActionTests.java} | 18 +- ...TransportGetWorkloadGroupActionTests.java} | 22 +- ...a => UpdateWorkloadGroupRequestTests.java} | 47 +- ... => UpdateWorkloadGroupResponseTests.java} | 32 +- .../action/WorkloadGroupActionTestUtils.java | 20 + ...> RestDeleteWorkloadGroupActionTests.java} | 32 +- .../QueryGroupPersistenceServiceTests.java | 515 --------------- .../WorkloadGroupPersistenceServiceTests.java | 533 +++++++++++++++ ...son => create_workload_group_context.json} | 6 +- ...son => delete_workload_group_context.json} | 6 +- ...t.json => get_workload_group_context.json} | 8 +- ...son => update_workload_group_context.json} | 8 +- ..._query_group.yml => 10_workload_group.yml} | 40 +- .../backpressure/SearchBackpressureIT.java | 4 +- .../opensearch/wlm/WorkloadManagementIT.java | 137 ++-- .../org/opensearch/action/ActionModule.java | 4 +- .../cluster/wlm/TransportWlmStatsAction.java | 10 +- .../admin/cluster/wlm/WlmStatsRequest.java | 18 +- .../admin/cluster/wlm/WlmStatsResponse.java | 6 +- .../action/search/SearchShardTask.java | 4 +- .../opensearch/action/search/SearchTask.java | 4 +- .../action/search/TransportSearchAction.java | 6 +- .../search/TransportSearchScrollAction.java | 6 +- .../java/org/opensearch/autotagging/Rule.java | 2 +- .../opensearch/autotagging/RuleValidator.java | 2 +- .../org/opensearch/cluster/ClusterModule.java | 8 +- .../opensearch/cluster/metadata/Metadata.java | 38 +- .../{QueryGroup.java => WorkloadGroup.java} | 111 ++-- ...tadata.java => WorkloadGroupMetadata.java} | 68 +- .../main/java/org/opensearch/node/Node.java | 45 +- .../admin/cluster/RestWlmStatsAction.java | 8 +- .../SearchBackpressureService.java | 14 +- .../transport/client/ClusterAdminClient.java | 2 +- ...java => MutableWorkloadGroupFragment.java} | 16 +- .../wlm/QueryGroupsStateAccessor.java | 64 -- ... WorkloadGroupLevelResourceUsageView.java} | 16 +- ...Service.java => WorkloadGroupService.java} | 208 +++--- ...yGroupTask.java => WorkloadGroupTask.java} | 50 +- ...oadGroupThreadContextStatePropagator.java} | 8 +- .../wlm/WorkloadGroupsStateAccessor.java | 67 ++ .../wlm/WorkloadManagementSettings.java | 50 +- ...orkloadManagementTransportInterceptor.java | 26 +- .../MaximumResourceTaskSelectionStrategy.java | 14 +- .../QueryGroupTaskCancellationService.java | 274 -------- .../cancellation/TaskSelectionStrategy.java | 4 +- .../WorkloadGroupTaskCancellationService.java | 282 ++++++++ ...orkloadGroupRequestOperationListener.java} | 24 +- .../org/opensearch/wlm/stats/WlmStats.java | 20 +- ...roupState.java => WorkloadGroupState.java} | 4 +- ...roupStats.java => WorkloadGroupStats.java} | 70 +- .../wlm/tracker/CpuUsageCalculator.java | 6 +- .../wlm/tracker/MemoryUsageCalculator.java | 6 +- ...QueryGroupResourceUsageTrackerService.java | 82 --- .../wlm/tracker/ResourceUsageCalculator.java | 8 +- ...kloadGroupResourceUsageTrackerService.java | 82 +++ .../opensearch/wlm/tracker/package-info.java | 2 +- .../cluster/wlm/WlmStatsResponseTests.java | 32 +- .../nodes/TransportWlmStatsActionTests.java | 4 +- .../cluster/ClusterModuleTests.java | 9 +- .../cluster/metadata/QueryGroupTests.java | 199 ------ ...s.java => WorkloadGroupMetadataTests.java} | 40 +- .../cluster/metadata/WorkloadGroupTests.java | 202 ++++++ .../SearchBackpressureServiceTests.java | 54 +- ...=> MutableWorkloadGroupFragmentTests.java} | 36 +- .../wlm/QueryGroupServiceTests.java | 553 ---------------- .../opensearch/wlm/QueryGroupTaskTests.java | 44 -- ...roupThreadContextStatePropagatorTests.java | 30 - ...loadGroupLevelResourceUsageViewTests.java} | 16 +- .../wlm/WorkloadGroupServiceTests.java | 553 ++++++++++++++++ .../wlm/WorkloadGroupTaskTests.java | 44 ++ ...roupThreadContextStatePropagatorTests.java | 30 + .../wlm/WorkloadManagementSettingsTests.java | 48 +- ...adManagementTransportInterceptorTests.java | 14 +- ...anagementTransportRequestHandlerTests.java | 18 +- ...mumResourceTaskSelectionStrategyTests.java | 26 +- ...ueryGroupTaskCancellationServiceTests.java | 589 ----------------- ...loadGroupTaskCancellationServiceTests.java | 612 ++++++++++++++++++ ...adGroupRequestOperationListenerTests.java} | 148 ++--- .../wlm/stats/QueryGroupStateTests.java | 73 --- .../opensearch/wlm/stats/WlmStatsTests.java | 20 +- .../wlm/stats/WorkloadGroupStateTests.java | 73 +++ ...ests.java => WorkloadGroupStatsTests.java} | 32 +- .../tracker/ResourceUsageCalculatorTests.java | 18 +- ...rceUsageCalculatorTrackerServiceTests.java | 68 +- ...rkloadGroupTaskResourceTrackingTests.java} | 26 +- 117 files changed, 4073 insertions(+), 3963 deletions(-) rename plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/{UpdateQueryGroupAction.java => CreateWorkloadGroupAction.java} (51%) rename plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/{CreateQueryGroupRequest.java => CreateWorkloadGroupRequest.java} (53%) rename plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/{CreateQueryGroupResponse.java => CreateWorkloadGroupResponse.java} (59%) rename plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/{DeleteQueryGroupAction.java => DeleteWorkloadGroupAction.java} (61%) rename plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/{DeleteQueryGroupRequest.java => DeleteWorkloadGroupRequest.java} (75%) rename plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/{CreateQueryGroupAction.java => GetWorkloadGroupAction.java} (53%) rename plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/{GetQueryGroupRequest.java => GetWorkloadGroupRequest.java} (69%) rename plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/{GetQueryGroupResponse.java => GetWorkloadGroupResponse.java} (61%) rename plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/{TransportUpdateQueryGroupAction.java => TransportCreateWorkloadGroupAction.java} (59%) rename plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/{TransportDeleteQueryGroupAction.java => TransportDeleteWorkloadGroupAction.java} (69%) rename plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/{TransportGetQueryGroupAction.java => TransportGetWorkloadGroupAction.java} (65%) rename plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/{TransportCreateQueryGroupAction.java => TransportUpdateWorkloadGroupAction.java} (60%) delete mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/UpdateQueryGroupRequest.java rename plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/{GetQueryGroupAction.java => UpdateWorkloadGroupAction.java} (51%) create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/UpdateWorkloadGroupRequest.java rename plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/{UpdateQueryGroupResponse.java => UpdateWorkloadGroupResponse.java} (60%) rename plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/{RestUpdateQueryGroupAction.java => RestCreateWorkloadGroupAction.java} (58%) rename plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/{RestDeleteQueryGroupAction.java => RestDeleteWorkloadGroupAction.java} (50%) rename plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/{RestGetQueryGroupAction.java => RestGetWorkloadGroupAction.java} (58%) rename plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/{RestCreateQueryGroupAction.java => RestUpdateWorkloadGroupAction.java} (58%) rename plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/{QueryGroupPersistenceService.java => WorkloadGroupPersistenceService.java} (50%) rename plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/{QueryGroupTestUtils.java => WorkloadGroupTestUtils.java} (62%) rename plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/{CreateQueryGroupRequestTests.java => CreateWorkloadGroupRequestTests.java} (50%) rename plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/{CreateQueryGroupResponseTests.java => CreateWorkloadGroupResponseTests.java} (60%) rename plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/{DeleteQueryGroupRequestTests.java => DeleteWorkloadGroupRequestTests.java} (65%) rename plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/{GetQueryGroupRequestTests.java => GetWorkloadGroupRequestTests.java} (62%) rename plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/{GetQueryGroupResponseTests.java => GetWorkloadGroupResponseTests.java} (51%) delete mode 100644 plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/QueryGroupActionTestUtils.java rename plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/{TransportDeleteQueryGroupActionTests.java => TransportDeleteWorkloadGroupActionTests.java} (75%) rename plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/{TransportGetQueryGroupActionTests.java => TransportGetWorkloadGroupActionTests.java} (54%) rename plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/{UpdateQueryGroupRequestTests.java => UpdateWorkloadGroupRequestTests.java} (54%) rename plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/{UpdateQueryGroupResponseTests.java => UpdateWorkloadGroupResponseTests.java} (59%) create mode 100644 plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/WorkloadGroupActionTestUtils.java rename plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/{RestDeleteQueryGroupActionTests.java => RestDeleteWorkloadGroupActionTests.java} (69%) delete mode 100644 plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceServiceTests.java create mode 100644 plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/WorkloadGroupPersistenceServiceTests.java rename plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/{create_query_group_context.json => create_workload_group_context.json} (61%) rename plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/{delete_query_group_context.json => delete_workload_group_context.json} (67%) rename plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/{get_query_group_context.json => get_workload_group_context.json} (65%) rename plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/{update_query_group_context.json => update_workload_group_context.json} (59%) rename plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/test/wlm/{10_query_group.yml => 10_workload_group.yml} (76%) rename server/src/main/java/org/opensearch/cluster/metadata/{QueryGroup.java => WorkloadGroup.java} (59%) rename server/src/main/java/org/opensearch/cluster/metadata/{QueryGroupMetadata.java => WorkloadGroupMetadata.java} (58%) rename server/src/main/java/org/opensearch/wlm/{MutableQueryGroupFragment.java => MutableWorkloadGroupFragment.java} (93%) delete mode 100644 server/src/main/java/org/opensearch/wlm/QueryGroupsStateAccessor.java rename server/src/main/java/org/opensearch/wlm/{QueryGroupLevelResourceUsageView.java => WorkloadGroupLevelResourceUsageView.java} (59%) rename server/src/main/java/org/opensearch/wlm/{QueryGroupService.java => WorkloadGroupService.java} (51%) rename server/src/main/java/org/opensearch/wlm/{QueryGroupTask.java => WorkloadGroupTask.java} (54%) rename server/src/main/java/org/opensearch/wlm/{QueryGroupThreadContextStatePropagator.java => WorkloadGroupThreadContextStatePropagator.java} (80%) create mode 100644 server/src/main/java/org/opensearch/wlm/WorkloadGroupsStateAccessor.java delete mode 100644 server/src/main/java/org/opensearch/wlm/cancellation/QueryGroupTaskCancellationService.java create mode 100644 server/src/main/java/org/opensearch/wlm/cancellation/WorkloadGroupTaskCancellationService.java rename server/src/main/java/org/opensearch/wlm/listeners/{QueryGroupRequestOperationListener.java => WorkloadGroupRequestOperationListener.java} (50%) rename server/src/main/java/org/opensearch/wlm/stats/{QueryGroupState.java => WorkloadGroupState.java} (97%) rename server/src/main/java/org/opensearch/wlm/stats/{QueryGroupStats.java => WorkloadGroupStats.java} (77%) delete mode 100644 server/src/main/java/org/opensearch/wlm/tracker/QueryGroupResourceUsageTrackerService.java create mode 100644 server/src/main/java/org/opensearch/wlm/tracker/WorkloadGroupResourceUsageTrackerService.java delete mode 100644 server/src/test/java/org/opensearch/cluster/metadata/QueryGroupTests.java rename server/src/test/java/org/opensearch/cluster/metadata/{QueryGroupMetadataTests.java => WorkloadGroupMetadataTests.java} (59%) create mode 100644 server/src/test/java/org/opensearch/cluster/metadata/WorkloadGroupTests.java rename server/src/test/java/org/opensearch/wlm/{MutableQueryGroupFragmentTests.java => MutableWorkloadGroupFragmentTests.java} (56%) delete mode 100644 server/src/test/java/org/opensearch/wlm/QueryGroupServiceTests.java delete mode 100644 server/src/test/java/org/opensearch/wlm/QueryGroupTaskTests.java delete mode 100644 server/src/test/java/org/opensearch/wlm/QueryGroupThreadContextStatePropagatorTests.java rename server/src/test/java/org/opensearch/wlm/{QueryGroupLevelResourceUsageViewTests.java => WorkloadGroupLevelResourceUsageViewTests.java} (71%) create mode 100644 server/src/test/java/org/opensearch/wlm/WorkloadGroupServiceTests.java create mode 100644 server/src/test/java/org/opensearch/wlm/WorkloadGroupTaskTests.java create mode 100644 server/src/test/java/org/opensearch/wlm/WorkloadGroupThreadContextStatePropagatorTests.java delete mode 100644 server/src/test/java/org/opensearch/wlm/cancellation/QueryGroupTaskCancellationServiceTests.java create mode 100644 server/src/test/java/org/opensearch/wlm/cancellation/WorkloadGroupTaskCancellationServiceTests.java rename server/src/test/java/org/opensearch/wlm/listeners/{QueryGroupRequestOperationListenerTests.java => WorkloadGroupRequestOperationListenerTests.java} (53%) delete mode 100644 server/src/test/java/org/opensearch/wlm/stats/QueryGroupStateTests.java create mode 100644 server/src/test/java/org/opensearch/wlm/stats/WorkloadGroupStateTests.java rename server/src/test/java/org/opensearch/wlm/stats/{QueryGroupStatsTests.java => WorkloadGroupStatsTests.java} (64%) rename server/src/test/java/org/opensearch/wlm/tracker/{QueryGroupTaskResourceTrackingTests.java => WorkloadGroupTaskResourceTrackingTests.java} (61%) diff --git a/CHANGELOG.md b/CHANGELOG.md index 57ba38c19aa0d..c7e24a16d5be7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -45,6 +45,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Introduce 512 byte limit to search and ingest pipeline IDs ([#17786](https://github.com/opensearch-project/OpenSearch/pull/17786)) - Avoid skewed segment replication lag metric ([#17831](https://github.com/opensearch-project/OpenSearch/pull/17831)) - Increase the default segment counter step size when replica promoting ([#17568](https://github.com/opensearch-project/OpenSearch/pull/17568)) +- [WLM] Rename QueryGroup to WorkloadGroup ([#17901](https://github.com/opensearch-project/OpenSearch/pull/17901)) ### Dependencies - Bump `com.nimbusds:nimbus-jose-jwt` from 9.41.1 to 10.0.2 ([#17607](https://github.com/opensearch-project/OpenSearch/pull/17607), [#17669](https://github.com/opensearch-project/OpenSearch/pull/17669)) diff --git a/plugins/workload-management/src/javaRestTest/java/org/opensearch/rest/WorkloadManagementRestIT.java b/plugins/workload-management/src/javaRestTest/java/org/opensearch/rest/WorkloadManagementRestIT.java index 7e1d61e57b6f7..00d3e901588b1 100644 --- a/plugins/workload-management/src/javaRestTest/java/org/opensearch/rest/WorkloadManagementRestIT.java +++ b/plugins/workload-management/src/javaRestTest/java/org/opensearch/rest/WorkloadManagementRestIT.java @@ -19,31 +19,31 @@ public class WorkloadManagementRestIT extends OpenSearchRestTestCase { public void testCreate() throws Exception { - Response response = performOperation("PUT", "_wlm/query_group", getCreateJson("analytics", "enforced", 0.4, 0.2)); + Response response = performOperation("PUT", "_wlm/workload_group", getCreateJson("analytics", "enforced", 0.4, 0.2)); assertEquals(response.getStatusLine().getStatusCode(), 200); - performOperation("DELETE", "_wlm/query_group/analytics", null); + performOperation("DELETE", "_wlm/workload_group/analytics", null); } public void testMultipleCreate() throws Exception { - Response response = performOperation("PUT", "_wlm/query_group", getCreateJson("analytics2", "enforced", 0.4, 0.2)); + Response response = performOperation("PUT", "_wlm/workload_group", getCreateJson("analytics2", "enforced", 0.4, 0.2)); assertEquals(response.getStatusLine().getStatusCode(), 200); - Response response2 = performOperation("PUT", "_wlm/query_group", getCreateJson("users", "soft", 0.2, 0.1)); + Response response2 = performOperation("PUT", "_wlm/workload_group", getCreateJson("users", "soft", 0.2, 0.1)); assertEquals(response2.getStatusLine().getStatusCode(), 200); assertThrows( ResponseException.class, - () -> performOperation("PUT", "_wlm/query_group", getCreateJson("users2", "soft", 0.41, 0.71)) + () -> performOperation("PUT", "_wlm/workload_group", getCreateJson("users2", "soft", 0.41, 0.71)) ); - performOperation("DELETE", "_wlm/query_group/analytics2", null); - performOperation("DELETE", "_wlm/query_group/users", null); + performOperation("DELETE", "_wlm/workload_group/analytics2", null); + performOperation("DELETE", "_wlm/workload_group/users", null); } public void testGet() throws Exception { - Response response = performOperation("PUT", "_wlm/query_group", getCreateJson("analytics3", "enforced", 0.4, 0.2)); + Response response = performOperation("PUT", "_wlm/workload_group", getCreateJson("analytics3", "enforced", 0.4, 0.2)); assertEquals(response.getStatusLine().getStatusCode(), 200); - Response response2 = performOperation("GET", "_wlm/query_group/analytics3", null); + Response response2 = performOperation("GET", "_wlm/workload_group/analytics3", null); assertEquals(response2.getStatusLine().getStatusCode(), 200); String responseBody2 = EntityUtils.toString(response2.getEntity()); assertTrue(responseBody2.contains("\"name\":\"analytics3\"")); @@ -51,26 +51,26 @@ public void testGet() throws Exception { assertTrue(responseBody2.contains("\"cpu\":0.4")); assertTrue(responseBody2.contains("\"memory\":0.2")); - assertThrows(ResponseException.class, () -> performOperation("GET", "_wlm/query_group/analytics97", null)); - performOperation("DELETE", "_wlm/query_group/analytics3", null); + assertThrows(ResponseException.class, () -> performOperation("GET", "_wlm/workload_group/analytics97", null)); + performOperation("DELETE", "_wlm/workload_group/analytics3", null); } public void testDelete() throws Exception { - Response response = performOperation("PUT", "_wlm/query_group", getCreateJson("analytics4", "enforced", 0.4, 0.2)); + Response response = performOperation("PUT", "_wlm/workload_group", getCreateJson("analytics4", "enforced", 0.4, 0.2)); assertEquals(response.getStatusLine().getStatusCode(), 200); - Response response2 = performOperation("DELETE", "_wlm/query_group/analytics4", null); + Response response2 = performOperation("DELETE", "_wlm/workload_group/analytics4", null); assertEquals(response2.getStatusLine().getStatusCode(), 200); assertTrue(EntityUtils.toString(response2.getEntity()).contains("\"acknowledged\":true")); - assertThrows(ResponseException.class, () -> performOperation("DELETE", "_wlm/query_group/analytics99", null)); + assertThrows(ResponseException.class, () -> performOperation("DELETE", "_wlm/workload_group/analytics99", null)); } public void testUpdate() throws Exception { - Response response = performOperation("PUT", "_wlm/query_group", getCreateJson("analytics5", "enforced", 0.4, 0.2)); + Response response = performOperation("PUT", "_wlm/workload_group", getCreateJson("analytics5", "enforced", 0.4, 0.2)); assertEquals(response.getStatusLine().getStatusCode(), 200); - Response response2 = performOperation("PUT", "_wlm/query_group/analytics5", getUpdateJson("monitor", 0.41, 0.21)); + Response response2 = performOperation("PUT", "_wlm/workload_group/analytics5", getUpdateJson("monitor", 0.41, 0.21)); assertEquals(response2.getStatusLine().getStatusCode(), 200); String responseBody2 = EntityUtils.toString(response2.getEntity()); assertTrue(responseBody2.contains("\"name\":\"analytics5\"")); @@ -84,22 +84,22 @@ public void testUpdate() throws Exception { + " \"memory\" : -0.1\n" + " }\n" + "}'"; - assertThrows(ResponseException.class, () -> performOperation("PUT", "_wlm/query_group/analytics5", json)); + assertThrows(ResponseException.class, () -> performOperation("PUT", "_wlm/workload_group/analytics5", json)); assertThrows( ResponseException.class, - () -> performOperation("PUT", "_wlm/query_group/analytics98", getUpdateJson("monitor", 0.43, 0.23)) + () -> performOperation("PUT", "_wlm/workload_group/analytics98", getUpdateJson("monitor", 0.43, 0.23)) ); - performOperation("DELETE", "_wlm/query_group/analytics5", null); + performOperation("DELETE", "_wlm/workload_group/analytics5", null); } public void testCRUD() throws Exception { - Response response = performOperation("PUT", "_wlm/query_group", getCreateJson("analytics6", "enforced", 0.4, 0.2)); + Response response = performOperation("PUT", "_wlm/workload_group", getCreateJson("analytics6", "enforced", 0.4, 0.2)); assertEquals(response.getStatusLine().getStatusCode(), 200); - Response response1 = performOperation("PUT", "_wlm/query_group/analytics6", getUpdateJson("monitor", 0.41, 0.21)); + Response response1 = performOperation("PUT", "_wlm/workload_group/analytics6", getUpdateJson("monitor", 0.41, 0.21)); assertEquals(response1.getStatusLine().getStatusCode(), 200); - Response response2 = performOperation("GET", "_wlm/query_group/analytics6", null); + Response response2 = performOperation("GET", "_wlm/workload_group/analytics6", null); assertEquals(response2.getStatusLine().getStatusCode(), 200); String responseBody2 = EntityUtils.toString(response2.getEntity()); assertTrue(responseBody2.contains("\"name\":\"analytics6\"")); @@ -109,24 +109,24 @@ public void testCRUD() throws Exception { assertThrows( ResponseException.class, - () -> performOperation("PUT", "_wlm/query_group", getCreateJson("users3", "monitor", 0.6, 0.8)) + () -> performOperation("PUT", "_wlm/workload_group", getCreateJson("users3", "monitor", 0.6, 0.8)) ); - Response response4 = performOperation("PUT", "_wlm/query_group", getCreateJson("users3", "monitor", 0.59, 0.79)); + Response response4 = performOperation("PUT", "_wlm/workload_group", getCreateJson("users3", "monitor", 0.59, 0.79)); assertEquals(response4.getStatusLine().getStatusCode(), 200); - Response response5 = performOperation("DELETE", "_wlm/query_group/analytics6", null); + Response response5 = performOperation("DELETE", "_wlm/workload_group/analytics6", null); assertEquals(response5.getStatusLine().getStatusCode(), 200); String responseBody5 = EntityUtils.toString(response5.getEntity()); assertTrue(responseBody5.contains("\"acknowledged\":true")); - Response response6 = performOperation("GET", "_wlm/query_group", null); + Response response6 = performOperation("GET", "_wlm/workload_group", null); assertEquals(response6.getStatusLine().getStatusCode(), 200); String responseBody6 = EntityUtils.toString(response6.getEntity()); - assertTrue(responseBody6.contains("\"query_groups\"")); + assertTrue(responseBody6.contains("\"workload_groups\"")); assertTrue(responseBody6.contains("\"users3\"")); assertFalse(responseBody6.contains("\"analytics6\"")); - performOperation("DELETE", "_wlm/query_group/users3", null); + performOperation("DELETE", "_wlm/workload_group/users3", null); } static String getCreateJson(String name, String resiliencyMode, double cpu, double memory) { diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPlugin.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPlugin.java index c86490552f2f2..85c8cc8409c0f 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPlugin.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPlugin.java @@ -18,19 +18,19 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsFilter; import org.opensearch.core.action.ActionResponse; -import org.opensearch.plugin.wlm.action.CreateQueryGroupAction; -import org.opensearch.plugin.wlm.action.DeleteQueryGroupAction; -import org.opensearch.plugin.wlm.action.GetQueryGroupAction; -import org.opensearch.plugin.wlm.action.TransportCreateQueryGroupAction; -import org.opensearch.plugin.wlm.action.TransportDeleteQueryGroupAction; -import org.opensearch.plugin.wlm.action.TransportGetQueryGroupAction; -import org.opensearch.plugin.wlm.action.TransportUpdateQueryGroupAction; -import org.opensearch.plugin.wlm.action.UpdateQueryGroupAction; -import org.opensearch.plugin.wlm.rest.RestCreateQueryGroupAction; -import org.opensearch.plugin.wlm.rest.RestDeleteQueryGroupAction; -import org.opensearch.plugin.wlm.rest.RestGetQueryGroupAction; -import org.opensearch.plugin.wlm.rest.RestUpdateQueryGroupAction; -import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; +import org.opensearch.plugin.wlm.action.CreateWorkloadGroupAction; +import org.opensearch.plugin.wlm.action.DeleteWorkloadGroupAction; +import org.opensearch.plugin.wlm.action.GetWorkloadGroupAction; +import org.opensearch.plugin.wlm.action.TransportCreateWorkloadGroupAction; +import org.opensearch.plugin.wlm.action.TransportDeleteWorkloadGroupAction; +import org.opensearch.plugin.wlm.action.TransportGetWorkloadGroupAction; +import org.opensearch.plugin.wlm.action.TransportUpdateWorkloadGroupAction; +import org.opensearch.plugin.wlm.action.UpdateWorkloadGroupAction; +import org.opensearch.plugin.wlm.rest.RestCreateWorkloadGroupAction; +import org.opensearch.plugin.wlm.rest.RestDeleteWorkloadGroupAction; +import org.opensearch.plugin.wlm.rest.RestGetWorkloadGroupAction; +import org.opensearch.plugin.wlm.rest.RestUpdateWorkloadGroupAction; +import org.opensearch.plugin.wlm.service.WorkloadGroupPersistenceService; import org.opensearch.plugins.ActionPlugin; import org.opensearch.plugins.Plugin; import org.opensearch.rest.RestController; @@ -53,10 +53,10 @@ public WorkloadManagementPlugin() {} @Override public List> getActions() { return List.of( - new ActionPlugin.ActionHandler<>(CreateQueryGroupAction.INSTANCE, TransportCreateQueryGroupAction.class), - new ActionPlugin.ActionHandler<>(GetQueryGroupAction.INSTANCE, TransportGetQueryGroupAction.class), - new ActionPlugin.ActionHandler<>(DeleteQueryGroupAction.INSTANCE, TransportDeleteQueryGroupAction.class), - new ActionPlugin.ActionHandler<>(UpdateQueryGroupAction.INSTANCE, TransportUpdateQueryGroupAction.class) + new ActionPlugin.ActionHandler<>(CreateWorkloadGroupAction.INSTANCE, TransportCreateWorkloadGroupAction.class), + new ActionPlugin.ActionHandler<>(GetWorkloadGroupAction.INSTANCE, TransportGetWorkloadGroupAction.class), + new ActionPlugin.ActionHandler<>(DeleteWorkloadGroupAction.INSTANCE, TransportDeleteWorkloadGroupAction.class), + new ActionPlugin.ActionHandler<>(UpdateWorkloadGroupAction.INSTANCE, TransportUpdateWorkloadGroupAction.class) ); } @@ -71,16 +71,16 @@ public List getRestHandlers( Supplier nodesInCluster ) { return List.of( - new RestCreateQueryGroupAction(), - new RestGetQueryGroupAction(), - new RestDeleteQueryGroupAction(), - new RestUpdateQueryGroupAction() + new RestCreateWorkloadGroupAction(), + new RestGetWorkloadGroupAction(), + new RestDeleteWorkloadGroupAction(), + new RestUpdateWorkloadGroupAction() ); } @Override public List> getSettings() { - return List.of(QueryGroupPersistenceService.MAX_QUERY_GROUP_COUNT); + return List.of(WorkloadGroupPersistenceService.MAX_QUERY_GROUP_COUNT); } @Override diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPluginModule.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPluginModule.java index b7c7805639eb2..bb0f4c7e90122 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPluginModule.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPluginModule.java @@ -10,7 +10,7 @@ import org.opensearch.common.inject.AbstractModule; import org.opensearch.common.inject.Singleton; -import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; +import org.opensearch.plugin.wlm.service.WorkloadGroupPersistenceService; /** * Guice Module to manage WorkloadManagement related objects @@ -24,8 +24,8 @@ public WorkloadManagementPluginModule() {} @Override protected void configure() { - // Bind QueryGroupPersistenceService as a singleton to ensure a single instance is used, + // Bind WorkloadGroupPersistenceService as a singleton to ensure a single instance is used, // preventing multiple throttling key registrations in the constructor. - bind(QueryGroupPersistenceService.class).in(Singleton.class); + bind(WorkloadGroupPersistenceService.class).in(Singleton.class); } } diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/UpdateQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateWorkloadGroupAction.java similarity index 51% rename from plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/UpdateQueryGroupAction.java rename to plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateWorkloadGroupAction.java index ff472f206131c..ca9784ebc7e4b 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/UpdateQueryGroupAction.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateWorkloadGroupAction.java @@ -11,26 +11,26 @@ import org.opensearch.action.ActionType; /** - * Transport action to update QueryGroup + * Transport action to create WorkloadGroup * * @opensearch.experimental */ -public class UpdateQueryGroupAction extends ActionType { +public class CreateWorkloadGroupAction extends ActionType { /** - * An instance of UpdateQueryGroupAction + * An instance of CreateWorkloadGroupAction */ - public static final UpdateQueryGroupAction INSTANCE = new UpdateQueryGroupAction(); + public static final CreateWorkloadGroupAction INSTANCE = new CreateWorkloadGroupAction(); /** - * Name for UpdateQueryGroupAction + * Name for CreateWorkloadGroupAction */ - public static final String NAME = "cluster:admin/opensearch/wlm/query_group/_update"; + public static final String NAME = "cluster:admin/opensearch/wlm/workload_group/_create"; /** * Default constructor */ - private UpdateQueryGroupAction() { - super(NAME, UpdateQueryGroupResponse::new); + private CreateWorkloadGroupAction() { + super(NAME, CreateWorkloadGroupResponse::new); } } diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupRequest.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateWorkloadGroupRequest.java similarity index 53% rename from plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupRequest.java rename to plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateWorkloadGroupRequest.java index 1ce04faa7ccc1..ad932667b25e8 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupRequest.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateWorkloadGroupRequest.java @@ -10,7 +10,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; -import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.cluster.metadata.WorkloadGroup; import org.opensearch.common.UUIDs; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -20,7 +20,7 @@ import java.io.IOException; /** - * A request for create QueryGroup + * A request for create WorkloadGroup * User input schema: * { * "name": "analytics", @@ -33,33 +33,33 @@ * * @opensearch.experimental */ -public class CreateQueryGroupRequest extends ClusterManagerNodeRequest { - private final QueryGroup queryGroup; +public class CreateWorkloadGroupRequest extends ClusterManagerNodeRequest { + private final WorkloadGroup workloadGroup; /** - * Constructor for CreateQueryGroupRequest - * @param queryGroup - A {@link QueryGroup} object + * Constructor for CreateWorkloadGroupRequest + * @param workloadGroup - A {@link WorkloadGroup} object */ - CreateQueryGroupRequest(QueryGroup queryGroup) { - this.queryGroup = queryGroup; + CreateWorkloadGroupRequest(WorkloadGroup workloadGroup) { + this.workloadGroup = workloadGroup; } /** - * Constructor for CreateQueryGroupRequest + * Constructor for CreateWorkloadGroupRequest * @param in - A {@link StreamInput} object */ - CreateQueryGroupRequest(StreamInput in) throws IOException { + CreateWorkloadGroupRequest(StreamInput in) throws IOException { super(in); - queryGroup = new QueryGroup(in); + workloadGroup = new WorkloadGroup(in); } /** - * Generate a CreateQueryGroupRequest from XContent + * Generate a CreateWorkloadGroupRequest from XContent * @param parser - A {@link XContentParser} object */ - public static CreateQueryGroupRequest fromXContent(XContentParser parser) throws IOException { - QueryGroup.Builder builder = QueryGroup.Builder.fromXContent(parser); - return new CreateQueryGroupRequest(builder._id(UUIDs.randomBase64UUID()).updatedAt(Instant.now().getMillis()).build()); + public static CreateWorkloadGroupRequest fromXContent(XContentParser parser) throws IOException { + WorkloadGroup.Builder builder = WorkloadGroup.Builder.fromXContent(parser); + return new CreateWorkloadGroupRequest(builder._id(UUIDs.randomBase64UUID()).updatedAt(Instant.now().getMillis()).build()); } @Override @@ -70,13 +70,13 @@ public ActionRequestValidationException validate() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - queryGroup.writeTo(out); + workloadGroup.writeTo(out); } /** - * QueryGroup getter + * WorkloadGroup getter */ - public QueryGroup getQueryGroup() { - return queryGroup; + public WorkloadGroup getWorkloadGroup() { + return workloadGroup; } } diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponse.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateWorkloadGroupResponse.java similarity index 59% rename from plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponse.java rename to plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateWorkloadGroupResponse.java index 9a2a8178c0a29..b33214e042398 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponse.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateWorkloadGroupResponse.java @@ -8,7 +8,7 @@ package org.opensearch.plugin.wlm.action; -import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.cluster.metadata.WorkloadGroup; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -20,49 +20,49 @@ import java.io.IOException; /** - * Response for the create API for QueryGroup + * Response for the create API for WorkloadGroup * * @opensearch.experimental */ -public class CreateQueryGroupResponse extends ActionResponse implements ToXContent, ToXContentObject { - private final QueryGroup queryGroup; +public class CreateWorkloadGroupResponse extends ActionResponse implements ToXContent, ToXContentObject { + private final WorkloadGroup workloadGroup; private final RestStatus restStatus; /** - * Constructor for CreateQueryGroupResponse - * @param queryGroup - The QueryGroup to be included in the response + * Constructor for CreateWorkloadGroupResponse + * @param workloadGroup - The WorkloadGroup to be included in the response * @param restStatus - The restStatus for the response */ - public CreateQueryGroupResponse(final QueryGroup queryGroup, RestStatus restStatus) { - this.queryGroup = queryGroup; + public CreateWorkloadGroupResponse(final WorkloadGroup workloadGroup, RestStatus restStatus) { + this.workloadGroup = workloadGroup; this.restStatus = restStatus; } /** - * Constructor for CreateQueryGroupResponse + * Constructor for CreateWorkloadGroupResponse * @param in - A {@link StreamInput} object */ - public CreateQueryGroupResponse(StreamInput in) throws IOException { - queryGroup = new QueryGroup(in); + public CreateWorkloadGroupResponse(StreamInput in) throws IOException { + workloadGroup = new WorkloadGroup(in); restStatus = RestStatus.readFrom(in); } @Override public void writeTo(StreamOutput out) throws IOException { - queryGroup.writeTo(out); + workloadGroup.writeTo(out); RestStatus.writeTo(out, restStatus); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return queryGroup.toXContent(builder, params); + return workloadGroup.toXContent(builder, params); } /** - * queryGroup getter + * workloadGroup getter */ - public QueryGroup getQueryGroup() { - return queryGroup; + public WorkloadGroup getWorkloadGroup() { + return workloadGroup; } /** diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteWorkloadGroupAction.java similarity index 61% rename from plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupAction.java rename to plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteWorkloadGroupAction.java index b638dbd61ca1a..39b47d69776f4 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupAction.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteWorkloadGroupAction.java @@ -12,27 +12,27 @@ import org.opensearch.action.support.clustermanager.AcknowledgedResponse; /** - * Transport action for delete QueryGroup + * Transport action for delete WorkloadGroup * * @opensearch.experimental */ -public class DeleteQueryGroupAction extends ActionType { +public class DeleteWorkloadGroupAction extends ActionType { /** /** - * An instance of DeleteQueryGroupAction + * An instance of DeleteWorkloadGroupAction */ - public static final DeleteQueryGroupAction INSTANCE = new DeleteQueryGroupAction(); + public static final DeleteWorkloadGroupAction INSTANCE = new DeleteWorkloadGroupAction(); /** - * Name for DeleteQueryGroupAction + * Name for DeleteWorkloadGroupAction */ - public static final String NAME = "cluster:admin/opensearch/wlm/query_group/_delete"; + public static final String NAME = "cluster:admin/opensearch/wlm/workload_group/_delete"; /** * Default constructor */ - private DeleteQueryGroupAction() { + private DeleteWorkloadGroupAction() { super(NAME, AcknowledgedResponse::new); } } diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequest.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteWorkloadGroupRequest.java similarity index 75% rename from plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequest.java rename to plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteWorkloadGroupRequest.java index e798c8e137062..940a3815b1662 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequest.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteWorkloadGroupRequest.java @@ -16,26 +16,26 @@ import java.io.IOException; /** - * Request for delete QueryGroup + * Request for delete WorkloadGroup * * @opensearch.experimental */ -public class DeleteQueryGroupRequest extends AcknowledgedRequest { +public class DeleteWorkloadGroupRequest extends AcknowledgedRequest { private final String name; /** - * Default constructor for DeleteQueryGroupRequest - * @param name - name for the QueryGroup to get + * Default constructor for DeleteWorkloadGroupRequest + * @param name - name for the WorkloadGroup to get */ - public DeleteQueryGroupRequest(String name) { + public DeleteWorkloadGroupRequest(String name) { this.name = name; } /** - * Constructor for DeleteQueryGroupRequest + * Constructor for DeleteWorkloadGroupRequest * @param in - A {@link StreamInput} object */ - public DeleteQueryGroupRequest(StreamInput in) throws IOException { + public DeleteWorkloadGroupRequest(StreamInput in) throws IOException { super(in); name = in.readOptionalString(); } @@ -44,7 +44,7 @@ public DeleteQueryGroupRequest(StreamInput in) throws IOException { public ActionRequestValidationException validate() { if (name == null) { ActionRequestValidationException actionRequestValidationException = new ActionRequestValidationException(); - actionRequestValidationException.addValidationError("QueryGroup name is missing"); + actionRequestValidationException.addValidationError("WorkloadGroup name is missing"); return actionRequestValidationException; } return null; diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/GetWorkloadGroupAction.java similarity index 53% rename from plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupAction.java rename to plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/GetWorkloadGroupAction.java index 14cb8cfcd125a..ee1b40a2f9bbc 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupAction.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/GetWorkloadGroupAction.java @@ -11,26 +11,26 @@ import org.opensearch.action.ActionType; /** - * Transport action to create QueryGroup + * Transport action to get WorkloadGroup * * @opensearch.experimental */ -public class CreateQueryGroupAction extends ActionType { +public class GetWorkloadGroupAction extends ActionType { /** - * An instance of CreateQueryGroupAction + * An instance of GetWorkloadGroupAction */ - public static final CreateQueryGroupAction INSTANCE = new CreateQueryGroupAction(); + public static final GetWorkloadGroupAction INSTANCE = new GetWorkloadGroupAction(); /** - * Name for CreateQueryGroupAction + * Name for GetWorkloadGroupAction */ - public static final String NAME = "cluster:admin/opensearch/wlm/query_group/_create"; + public static final String NAME = "cluster:admin/opensearch/wlm/workload_group/_get"; /** * Default constructor */ - private CreateQueryGroupAction() { - super(NAME, CreateQueryGroupResponse::new); + private GetWorkloadGroupAction() { + super(NAME, GetWorkloadGroupResponse::new); } } diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/GetQueryGroupRequest.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/GetWorkloadGroupRequest.java similarity index 69% rename from plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/GetQueryGroupRequest.java rename to plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/GetWorkloadGroupRequest.java index 0524c615a84e7..4b8a5f85fd236 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/GetQueryGroupRequest.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/GetWorkloadGroupRequest.java @@ -10,33 +10,33 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; -import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.cluster.metadata.WorkloadGroup; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import java.io.IOException; /** - * Request for get QueryGroup + * Request for get WorkloadGroup * * @opensearch.experimental */ -public class GetQueryGroupRequest extends ClusterManagerNodeReadRequest { +public class GetWorkloadGroupRequest extends ClusterManagerNodeReadRequest { final String name; /** - * Default constructor for GetQueryGroupRequest - * @param name - name for the QueryGroup to get + * Default constructor for GetWorkloadGroupRequest + * @param name - name for the WorkloadGroup to get */ - public GetQueryGroupRequest(String name) { + public GetWorkloadGroupRequest(String name) { this.name = name; } /** - * Constructor for GetQueryGroupRequest + * Constructor for GetWorkloadGroupRequest * @param in - A {@link StreamInput} object */ - public GetQueryGroupRequest(StreamInput in) throws IOException { + public GetWorkloadGroupRequest(StreamInput in) throws IOException { super(in); name = in.readOptionalString(); } @@ -44,7 +44,7 @@ public GetQueryGroupRequest(StreamInput in) throws IOException { @Override public ActionRequestValidationException validate() { if (name != null) { - QueryGroup.validateName(name); + WorkloadGroup.validateName(name); } return null; } diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/GetQueryGroupResponse.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/GetWorkloadGroupResponse.java similarity index 61% rename from plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/GetQueryGroupResponse.java rename to plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/GetWorkloadGroupResponse.java index 547c501e6a28e..ab8f773088a37 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/GetQueryGroupResponse.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/GetWorkloadGroupResponse.java @@ -8,7 +8,7 @@ package org.opensearch.plugin.wlm.action; -import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.cluster.metadata.WorkloadGroup; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -21,44 +21,44 @@ import java.util.Collection; /** - * Response for the get API for QueryGroup + * Response for the get API for WorkloadGroup * * @opensearch.experimental */ -public class GetQueryGroupResponse extends ActionResponse implements ToXContent, ToXContentObject { - private final Collection queryGroups; +public class GetWorkloadGroupResponse extends ActionResponse implements ToXContent, ToXContentObject { + private final Collection workloadGroups; private final RestStatus restStatus; /** - * Constructor for GetQueryGroupResponse - * @param queryGroups - The QueryGroup list to be fetched + * Constructor for GetWorkloadGroupResponse + * @param workloadGroups - The WorkloadGroup list to be fetched * @param restStatus - The rest status of the request */ - public GetQueryGroupResponse(final Collection queryGroups, RestStatus restStatus) { - this.queryGroups = queryGroups; + public GetWorkloadGroupResponse(final Collection workloadGroups, RestStatus restStatus) { + this.workloadGroups = workloadGroups; this.restStatus = restStatus; } /** - * Constructor for GetQueryGroupResponse + * Constructor for GetWorkloadGroupResponse * @param in - A {@link StreamInput} object */ - public GetQueryGroupResponse(StreamInput in) throws IOException { - this.queryGroups = in.readList(QueryGroup::new); + public GetWorkloadGroupResponse(StreamInput in) throws IOException { + this.workloadGroups = in.readList(WorkloadGroup::new); restStatus = RestStatus.readFrom(in); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeCollection(queryGroups); + out.writeCollection(workloadGroups); RestStatus.writeTo(out, restStatus); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.startArray("query_groups"); - for (QueryGroup group : queryGroups) { + builder.startArray("workload_groups"); + for (WorkloadGroup group : workloadGroups) { group.toXContent(builder, params); } builder.endArray(); @@ -67,10 +67,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } /** - * queryGroups getter + * workloadGroups getter */ - public Collection getQueryGroups() { - return queryGroups; + public Collection getWorkloadGroups() { + return workloadGroups; } /** diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportUpdateQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportCreateWorkloadGroupAction.java similarity index 59% rename from plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportUpdateQueryGroupAction.java rename to plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportCreateWorkloadGroupAction.java index 09a0da7086b36..2039f1cb590ff 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportUpdateQueryGroupAction.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportCreateWorkloadGroupAction.java @@ -17,7 +17,7 @@ import org.opensearch.common.inject.Inject; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; +import org.opensearch.plugin.wlm.service.WorkloadGroupPersistenceService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -26,50 +26,52 @@ import static org.opensearch.threadpool.ThreadPool.Names.SAME; /** - * Transport action to update QueryGroup + * Transport action to create WorkloadGroup * * @opensearch.experimental */ -public class TransportUpdateQueryGroupAction extends TransportClusterManagerNodeAction { +public class TransportCreateWorkloadGroupAction extends TransportClusterManagerNodeAction< + CreateWorkloadGroupRequest, + CreateWorkloadGroupResponse> { - private final QueryGroupPersistenceService queryGroupPersistenceService; + private final WorkloadGroupPersistenceService workloadGroupPersistenceService; /** - * Constructor for TransportUpdateQueryGroupAction + * Constructor for TransportCreateWorkloadGroupAction * * @param threadPool - {@link ThreadPool} object * @param transportService - a {@link TransportService} object * @param actionFilters - a {@link ActionFilters} object * @param indexNameExpressionResolver - {@link IndexNameExpressionResolver} object - * @param queryGroupPersistenceService - a {@link QueryGroupPersistenceService} object + * @param workloadGroupPersistenceService - a {@link WorkloadGroupPersistenceService} object */ @Inject - public TransportUpdateQueryGroupAction( + public TransportCreateWorkloadGroupAction( ThreadPool threadPool, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - QueryGroupPersistenceService queryGroupPersistenceService + WorkloadGroupPersistenceService workloadGroupPersistenceService ) { super( - UpdateQueryGroupAction.NAME, + CreateWorkloadGroupAction.NAME, transportService, - queryGroupPersistenceService.getClusterService(), + workloadGroupPersistenceService.getClusterService(), threadPool, actionFilters, - UpdateQueryGroupRequest::new, + CreateWorkloadGroupRequest::new, indexNameExpressionResolver ); - this.queryGroupPersistenceService = queryGroupPersistenceService; + this.workloadGroupPersistenceService = workloadGroupPersistenceService; } @Override protected void clusterManagerOperation( - UpdateQueryGroupRequest request, + CreateWorkloadGroupRequest request, ClusterState clusterState, - ActionListener listener + ActionListener listener ) { - queryGroupPersistenceService.updateInClusterStateMetadata(request, listener); + workloadGroupPersistenceService.persistInClusterStateMetadata(request.getWorkloadGroup(), listener); } @Override @@ -78,12 +80,13 @@ protected String executor() { } @Override - protected UpdateQueryGroupResponse read(StreamInput in) throws IOException { - return new UpdateQueryGroupResponse(in); + protected CreateWorkloadGroupResponse read(StreamInput in) throws IOException { + return new CreateWorkloadGroupResponse(in); } @Override - protected ClusterBlockException checkBlock(UpdateQueryGroupRequest request, ClusterState state) { + protected ClusterBlockException checkBlock(CreateWorkloadGroupRequest request, ClusterState state) { return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); } + } diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportDeleteWorkloadGroupAction.java similarity index 69% rename from plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupAction.java rename to plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportDeleteWorkloadGroupAction.java index dd37f9df399ce..2bfbadba4d51d 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupAction.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportDeleteWorkloadGroupAction.java @@ -19,59 +19,61 @@ import org.opensearch.common.inject.Inject; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; +import org.opensearch.plugin.wlm.service.WorkloadGroupPersistenceService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; import java.io.IOException; /** - * Transport action for delete QueryGroup + * Transport action for delete WorkloadGroup * * @opensearch.experimental */ -public class TransportDeleteQueryGroupAction extends TransportClusterManagerNodeAction { +public class TransportDeleteWorkloadGroupAction extends TransportClusterManagerNodeAction< + DeleteWorkloadGroupRequest, + AcknowledgedResponse> { - private final QueryGroupPersistenceService queryGroupPersistenceService; + private final WorkloadGroupPersistenceService workloadGroupPersistenceService; /** - * Constructor for TransportDeleteQueryGroupAction + * Constructor for TransportDeleteWorkloadGroupAction * * @param clusterService - a {@link ClusterService} object * @param transportService - a {@link TransportService} object * @param actionFilters - a {@link ActionFilters} object * @param threadPool - a {@link ThreadPool} object * @param indexNameExpressionResolver - a {@link IndexNameExpressionResolver} object - * @param queryGroupPersistenceService - a {@link QueryGroupPersistenceService} object + * @param workloadGroupPersistenceService - a {@link WorkloadGroupPersistenceService} object */ @Inject - public TransportDeleteQueryGroupAction( + public TransportDeleteWorkloadGroupAction( ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, ThreadPool threadPool, IndexNameExpressionResolver indexNameExpressionResolver, - QueryGroupPersistenceService queryGroupPersistenceService + WorkloadGroupPersistenceService workloadGroupPersistenceService ) { super( - DeleteQueryGroupAction.NAME, + DeleteWorkloadGroupAction.NAME, transportService, clusterService, threadPool, actionFilters, - DeleteQueryGroupRequest::new, + DeleteWorkloadGroupRequest::new, indexNameExpressionResolver ); - this.queryGroupPersistenceService = queryGroupPersistenceService; + this.workloadGroupPersistenceService = workloadGroupPersistenceService; } @Override protected void clusterManagerOperation( - DeleteQueryGroupRequest request, + DeleteWorkloadGroupRequest request, ClusterState state, ActionListener listener ) throws Exception { - queryGroupPersistenceService.deleteInClusterStateMetadata(request, listener); + workloadGroupPersistenceService.deleteInClusterStateMetadata(request, listener); } @Override @@ -85,7 +87,7 @@ protected AcknowledgedResponse read(StreamInput in) throws IOException { } @Override - protected ClusterBlockException checkBlock(DeleteQueryGroupRequest request, ClusterState state) { + protected ClusterBlockException checkBlock(DeleteWorkloadGroupRequest request, ClusterState state) { return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); } } diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportGetQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportGetWorkloadGroupAction.java similarity index 65% rename from plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportGetQueryGroupAction.java rename to plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportGetWorkloadGroupAction.java index 51bb21b255511..bb2fbab047343 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportGetQueryGroupAction.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportGetWorkloadGroupAction.java @@ -17,13 +17,13 @@ import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; -import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.cluster.metadata.WorkloadGroup; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.rest.RestStatus; -import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; +import org.opensearch.plugin.wlm.service.WorkloadGroupPersistenceService; import org.opensearch.search.pipeline.SearchPipelineService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -32,15 +32,17 @@ import java.util.Collection; /** - * Transport action to get QueryGroup + * Transport action to get WorkloadGroup * * @opensearch.experimental */ -public class TransportGetQueryGroupAction extends TransportClusterManagerNodeReadAction { +public class TransportGetWorkloadGroupAction extends TransportClusterManagerNodeReadAction< + GetWorkloadGroupRequest, + GetWorkloadGroupResponse> { private static final Logger logger = LogManager.getLogger(SearchPipelineService.class); /** - * Constructor for TransportGetQueryGroupAction + * Constructor for TransportGetWorkloadGroupAction * * @param clusterService - a {@link ClusterService} object * @param transportService - a {@link TransportService} object @@ -49,7 +51,7 @@ public class TransportGetQueryGroupAction extends TransportClusterManagerNodeRea * @param indexNameExpressionResolver - a {@link IndexNameExpressionResolver} object */ @Inject - public TransportGetQueryGroupAction( + public TransportGetWorkloadGroupAction( ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, @@ -57,12 +59,12 @@ public TransportGetQueryGroupAction( IndexNameExpressionResolver indexNameExpressionResolver ) { super( - GetQueryGroupAction.NAME, + GetWorkloadGroupAction.NAME, transportService, clusterService, threadPool, actionFilters, - GetQueryGroupRequest::new, + GetWorkloadGroupRequest::new, indexNameExpressionResolver, true ); @@ -74,25 +76,28 @@ protected String executor() { } @Override - protected GetQueryGroupResponse read(StreamInput in) throws IOException { - return new GetQueryGroupResponse(in); + protected GetWorkloadGroupResponse read(StreamInput in) throws IOException { + return new GetWorkloadGroupResponse(in); } @Override - protected ClusterBlockException checkBlock(GetQueryGroupRequest request, ClusterState state) { + protected ClusterBlockException checkBlock(GetWorkloadGroupRequest request, ClusterState state) { return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); } @Override - protected void clusterManagerOperation(GetQueryGroupRequest request, ClusterState state, ActionListener listener) - throws Exception { + protected void clusterManagerOperation( + GetWorkloadGroupRequest request, + ClusterState state, + ActionListener listener + ) throws Exception { final String name = request.getName(); - final Collection resultGroups = QueryGroupPersistenceService.getFromClusterStateMetadata(name, state); + final Collection resultGroups = WorkloadGroupPersistenceService.getFromClusterStateMetadata(name, state); if (resultGroups.isEmpty() && name != null && !name.isEmpty()) { - logger.warn("No QueryGroup exists with the provided name: {}", name); - throw new ResourceNotFoundException("No QueryGroup exists with the provided name: " + name); + logger.warn("No WorkloadGroup exists with the provided name: {}", name); + throw new ResourceNotFoundException("No WorkloadGroup exists with the provided name: " + name); } - listener.onResponse(new GetQueryGroupResponse(resultGroups, RestStatus.OK)); + listener.onResponse(new GetWorkloadGroupResponse(resultGroups, RestStatus.OK)); } } diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportCreateQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportUpdateWorkloadGroupAction.java similarity index 60% rename from plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportCreateQueryGroupAction.java rename to plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportUpdateWorkloadGroupAction.java index dff9c429d63b0..ef639d44b4155 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportCreateQueryGroupAction.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportUpdateWorkloadGroupAction.java @@ -17,7 +17,7 @@ import org.opensearch.common.inject.Inject; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; +import org.opensearch.plugin.wlm.service.WorkloadGroupPersistenceService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -26,50 +26,52 @@ import static org.opensearch.threadpool.ThreadPool.Names.SAME; /** - * Transport action to create QueryGroup + * Transport action to update WorkloadGroup * * @opensearch.experimental */ -public class TransportCreateQueryGroupAction extends TransportClusterManagerNodeAction { +public class TransportUpdateWorkloadGroupAction extends TransportClusterManagerNodeAction< + UpdateWorkloadGroupRequest, + UpdateWorkloadGroupResponse> { - private final QueryGroupPersistenceService queryGroupPersistenceService; + private final WorkloadGroupPersistenceService workloadGroupPersistenceService; /** - * Constructor for TransportCreateQueryGroupAction + * Constructor for TransportUpdateWorkloadGroupAction * * @param threadPool - {@link ThreadPool} object * @param transportService - a {@link TransportService} object * @param actionFilters - a {@link ActionFilters} object * @param indexNameExpressionResolver - {@link IndexNameExpressionResolver} object - * @param queryGroupPersistenceService - a {@link QueryGroupPersistenceService} object + * @param workloadGroupPersistenceService - a {@link WorkloadGroupPersistenceService} object */ @Inject - public TransportCreateQueryGroupAction( + public TransportUpdateWorkloadGroupAction( ThreadPool threadPool, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - QueryGroupPersistenceService queryGroupPersistenceService + WorkloadGroupPersistenceService workloadGroupPersistenceService ) { super( - CreateQueryGroupAction.NAME, + UpdateWorkloadGroupAction.NAME, transportService, - queryGroupPersistenceService.getClusterService(), + workloadGroupPersistenceService.getClusterService(), threadPool, actionFilters, - CreateQueryGroupRequest::new, + UpdateWorkloadGroupRequest::new, indexNameExpressionResolver ); - this.queryGroupPersistenceService = queryGroupPersistenceService; + this.workloadGroupPersistenceService = workloadGroupPersistenceService; } @Override protected void clusterManagerOperation( - CreateQueryGroupRequest request, + UpdateWorkloadGroupRequest request, ClusterState clusterState, - ActionListener listener + ActionListener listener ) { - queryGroupPersistenceService.persistInClusterStateMetadata(request.getQueryGroup(), listener); + workloadGroupPersistenceService.updateInClusterStateMetadata(request, listener); } @Override @@ -78,13 +80,12 @@ protected String executor() { } @Override - protected CreateQueryGroupResponse read(StreamInput in) throws IOException { - return new CreateQueryGroupResponse(in); + protected UpdateWorkloadGroupResponse read(StreamInput in) throws IOException { + return new UpdateWorkloadGroupResponse(in); } @Override - protected ClusterBlockException checkBlock(CreateQueryGroupRequest request, ClusterState state) { + protected ClusterBlockException checkBlock(UpdateWorkloadGroupRequest request, ClusterState state) { return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); } - } diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/UpdateQueryGroupRequest.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/UpdateQueryGroupRequest.java deleted file mode 100644 index 18af58289be13..0000000000000 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/UpdateQueryGroupRequest.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.wlm.action; - -import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; -import org.opensearch.cluster.metadata.QueryGroup; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.wlm.MutableQueryGroupFragment; - -import java.io.IOException; - -/** - * A request for update QueryGroup - * - * @opensearch.experimental - */ -public class UpdateQueryGroupRequest extends ClusterManagerNodeRequest { - private final String name; - private final MutableQueryGroupFragment mutableQueryGroupFragment; - - /** - * Constructor for UpdateQueryGroupRequest - * @param name - QueryGroup name for UpdateQueryGroupRequest - * @param mutableQueryGroupFragment - MutableQueryGroupFragment for UpdateQueryGroupRequest - */ - UpdateQueryGroupRequest(String name, MutableQueryGroupFragment mutableQueryGroupFragment) { - this.name = name; - this.mutableQueryGroupFragment = mutableQueryGroupFragment; - } - - /** - * Constructor for UpdateQueryGroupRequest - * @param in - A {@link StreamInput} object - */ - UpdateQueryGroupRequest(StreamInput in) throws IOException { - this(in.readString(), new MutableQueryGroupFragment(in)); - } - - /** - * Generate a UpdateQueryGroupRequest from XContent - * @param parser - A {@link XContentParser} object - * @param name - name of the QueryGroup to be updated - */ - public static UpdateQueryGroupRequest fromXContent(XContentParser parser, String name) throws IOException { - QueryGroup.Builder builder = QueryGroup.Builder.fromXContent(parser); - return new UpdateQueryGroupRequest(name, builder.getMutableQueryGroupFragment()); - } - - @Override - public ActionRequestValidationException validate() { - QueryGroup.validateName(name); - return null; - } - - /** - * name getter - */ - public String getName() { - return name; - } - - /** - * mutableQueryGroupFragment getter - */ - public MutableQueryGroupFragment getmMutableQueryGroupFragment() { - return mutableQueryGroupFragment; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(name); - mutableQueryGroupFragment.writeTo(out); - } -} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/GetQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/UpdateWorkloadGroupAction.java similarity index 51% rename from plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/GetQueryGroupAction.java rename to plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/UpdateWorkloadGroupAction.java index 0200185580f7d..b4f8e1ce90126 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/GetQueryGroupAction.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/UpdateWorkloadGroupAction.java @@ -11,26 +11,26 @@ import org.opensearch.action.ActionType; /** - * Transport action to get QueryGroup + * Transport action to update WorkloadGroup * * @opensearch.experimental */ -public class GetQueryGroupAction extends ActionType { +public class UpdateWorkloadGroupAction extends ActionType { /** - * An instance of GetQueryGroupAction + * An instance of UpdateWorkloadGroupAction */ - public static final GetQueryGroupAction INSTANCE = new GetQueryGroupAction(); + public static final UpdateWorkloadGroupAction INSTANCE = new UpdateWorkloadGroupAction(); /** - * Name for GetQueryGroupAction + * Name for UpdateWorkloadGroupAction */ - public static final String NAME = "cluster:admin/opensearch/wlm/query_group/_get"; + public static final String NAME = "cluster:admin/opensearch/wlm/workload_group/_update"; /** * Default constructor */ - private GetQueryGroupAction() { - super(NAME, GetQueryGroupResponse::new); + private UpdateWorkloadGroupAction() { + super(NAME, UpdateWorkloadGroupResponse::new); } } diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/UpdateWorkloadGroupRequest.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/UpdateWorkloadGroupRequest.java new file mode 100644 index 0000000000000..1e99cebcf20b4 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/UpdateWorkloadGroupRequest.java @@ -0,0 +1,83 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.cluster.metadata.WorkloadGroup; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.wlm.MutableWorkloadGroupFragment; + +import java.io.IOException; + +/** + * A request for update WorkloadGroup + * + * @opensearch.experimental + */ +public class UpdateWorkloadGroupRequest extends ClusterManagerNodeRequest { + private final String name; + private final MutableWorkloadGroupFragment mutableWorkloadGroupFragment; + + /** + * Constructor for UpdateWorkloadGroupRequest + * @param name - WorkloadGroup name for UpdateWorkloadGroupRequest + * @param mutableWorkloadGroupFragment - MutableWorkloadGroupFragment for UpdateWorkloadGroupRequest + */ + UpdateWorkloadGroupRequest(String name, MutableWorkloadGroupFragment mutableWorkloadGroupFragment) { + this.name = name; + this.mutableWorkloadGroupFragment = mutableWorkloadGroupFragment; + } + + /** + * Constructor for UpdateWorkloadGroupRequest + * @param in - A {@link StreamInput} object + */ + UpdateWorkloadGroupRequest(StreamInput in) throws IOException { + this(in.readString(), new MutableWorkloadGroupFragment(in)); + } + + /** + * Generate a UpdateWorkloadGroupRequest from XContent + * @param parser - A {@link XContentParser} object + * @param name - name of the WorkloadGroup to be updated + */ + public static UpdateWorkloadGroupRequest fromXContent(XContentParser parser, String name) throws IOException { + WorkloadGroup.Builder builder = WorkloadGroup.Builder.fromXContent(parser); + return new UpdateWorkloadGroupRequest(name, builder.getMutableWorkloadGroupFragment()); + } + + @Override + public ActionRequestValidationException validate() { + WorkloadGroup.validateName(name); + return null; + } + + /** + * name getter + */ + public String getName() { + return name; + } + + /** + * mutableWorkloadGroupFragment getter + */ + public MutableWorkloadGroupFragment getmMutableWorkloadGroupFragment() { + return mutableWorkloadGroupFragment; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(name); + mutableWorkloadGroupFragment.writeTo(out); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/UpdateQueryGroupResponse.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/UpdateWorkloadGroupResponse.java similarity index 60% rename from plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/UpdateQueryGroupResponse.java rename to plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/UpdateWorkloadGroupResponse.java index 9071f52ecb5a7..9b8fccbdb5346 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/UpdateQueryGroupResponse.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/UpdateWorkloadGroupResponse.java @@ -8,7 +8,7 @@ package org.opensearch.plugin.wlm.action; -import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.cluster.metadata.WorkloadGroup; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -20,49 +20,49 @@ import java.io.IOException; /** - * Response for the update API for QueryGroup + * Response for the update API for WorkloadGroup * * @opensearch.experimental */ -public class UpdateQueryGroupResponse extends ActionResponse implements ToXContent, ToXContentObject { - private final QueryGroup queryGroup; +public class UpdateWorkloadGroupResponse extends ActionResponse implements ToXContent, ToXContentObject { + private final WorkloadGroup workloadGroup; private final RestStatus restStatus; /** - * Constructor for UpdateQueryGroupResponse - * @param queryGroup - the QueryGroup to be updated + * Constructor for UpdateWorkloadGroupResponse + * @param workloadGroup - the WorkloadGroup to be updated * @param restStatus - the rest status for the response */ - public UpdateQueryGroupResponse(final QueryGroup queryGroup, RestStatus restStatus) { - this.queryGroup = queryGroup; + public UpdateWorkloadGroupResponse(final WorkloadGroup workloadGroup, RestStatus restStatus) { + this.workloadGroup = workloadGroup; this.restStatus = restStatus; } /** - * Constructor for UpdateQueryGroupResponse + * Constructor for UpdateWorkloadGroupResponse * @param in - a {@link StreamInput} object */ - public UpdateQueryGroupResponse(StreamInput in) throws IOException { - queryGroup = new QueryGroup(in); + public UpdateWorkloadGroupResponse(StreamInput in) throws IOException { + workloadGroup = new WorkloadGroup(in); restStatus = RestStatus.readFrom(in); } @Override public void writeTo(StreamOutput out) throws IOException { - queryGroup.writeTo(out); + workloadGroup.writeTo(out); RestStatus.writeTo(out, restStatus); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return queryGroup.toXContent(builder, params); + return workloadGroup.toXContent(builder, params); } /** - * queryGroup getter + * workloadGroup getter */ - public QueryGroup getQueryGroup() { - return queryGroup; + public WorkloadGroup getWorkloadGroup() { + return workloadGroup; } /** diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestUpdateQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestCreateWorkloadGroupAction.java similarity index 58% rename from plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestUpdateQueryGroupAction.java rename to plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestCreateWorkloadGroupAction.java index 6b0d49cf868f4..5ef59602f7893 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestUpdateQueryGroupAction.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestCreateWorkloadGroupAction.java @@ -11,9 +11,9 @@ import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.plugin.wlm.action.UpdateQueryGroupAction; -import org.opensearch.plugin.wlm.action.UpdateQueryGroupRequest; -import org.opensearch.plugin.wlm.action.UpdateQueryGroupResponse; +import org.opensearch.plugin.wlm.action.CreateWorkloadGroupAction; +import org.opensearch.plugin.wlm.action.CreateWorkloadGroupRequest; +import org.opensearch.plugin.wlm.action.CreateWorkloadGroupResponse; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestChannel; @@ -29,20 +29,20 @@ import static org.opensearch.rest.RestRequest.Method.PUT; /** - * Rest action to update a QueryGroup + * Rest action to create a WorkloadGroup * * @opensearch.experimental */ -public class RestUpdateQueryGroupAction extends BaseRestHandler { +public class RestCreateWorkloadGroupAction extends BaseRestHandler { /** - * Constructor for RestUpdateQueryGroupAction + * Constructor for RestCreateWorkloadGroupAction */ - public RestUpdateQueryGroupAction() {} + public RestCreateWorkloadGroupAction() {} @Override public String getName() { - return "update_query_group"; + return "create_workload_group"; } /** @@ -50,21 +50,25 @@ public String getName() { */ @Override public List routes() { - return List.of(new Route(POST, "_wlm/query_group/{name}"), new Route(PUT, "_wlm/query_group/{name}")); + return List.of(new Route(POST, "_wlm/workload_group/"), new Route(PUT, "_wlm/workload_group/")); } @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { try (XContentParser parser = request.contentParser()) { - UpdateQueryGroupRequest updateQueryGroupRequest = UpdateQueryGroupRequest.fromXContent(parser, request.param("name")); - return channel -> client.execute(UpdateQueryGroupAction.INSTANCE, updateQueryGroupRequest, updateQueryGroupResponse(channel)); + CreateWorkloadGroupRequest createWorkloadGroupRequest = CreateWorkloadGroupRequest.fromXContent(parser); + return channel -> client.execute( + CreateWorkloadGroupAction.INSTANCE, + createWorkloadGroupRequest, + createWorkloadGroupResponse(channel) + ); } } - private RestResponseListener updateQueryGroupResponse(final RestChannel channel) { + private RestResponseListener createWorkloadGroupResponse(final RestChannel channel) { return new RestResponseListener<>(channel) { @Override - public RestResponse buildResponse(final UpdateQueryGroupResponse response) throws Exception { + public RestResponse buildResponse(final CreateWorkloadGroupResponse response) throws Exception { return new BytesRestResponse(RestStatus.OK, response.toXContent(channel.newBuilder(), ToXContent.EMPTY_PARAMS)); } }; diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestDeleteWorkloadGroupAction.java similarity index 50% rename from plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupAction.java rename to plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestDeleteWorkloadGroupAction.java index 264eeb5fcb974..d0d82f43679fa 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupAction.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestDeleteWorkloadGroupAction.java @@ -8,8 +8,8 @@ package org.opensearch.plugin.wlm.rest; -import org.opensearch.plugin.wlm.action.DeleteQueryGroupAction; -import org.opensearch.plugin.wlm.action.DeleteQueryGroupRequest; +import org.opensearch.plugin.wlm.action.DeleteWorkloadGroupAction; +import org.opensearch.plugin.wlm.action.DeleteWorkloadGroupRequest; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.RestToXContentListener; @@ -21,20 +21,20 @@ import static org.opensearch.rest.RestRequest.Method.DELETE; /** - * Rest action to delete a QueryGroup + * Rest action to delete a WorkloadGroup * * @opensearch.experimental */ -public class RestDeleteQueryGroupAction extends BaseRestHandler { +public class RestDeleteWorkloadGroupAction extends BaseRestHandler { /** - * Constructor for RestDeleteQueryGroupAction + * Constructor for RestDeleteWorkloadGroupAction */ - public RestDeleteQueryGroupAction() {} + public RestDeleteWorkloadGroupAction() {} @Override public String getName() { - return "delete_query_group"; + return "delete_workload_group"; } /** @@ -42,16 +42,20 @@ public String getName() { */ @Override public List routes() { - return List.of(new Route(DELETE, "_wlm/query_group/{name}")); + return List.of(new Route(DELETE, "_wlm/workload_group/{name}")); } @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { - DeleteQueryGroupRequest deleteQueryGroupRequest = new DeleteQueryGroupRequest(request.param("name")); - deleteQueryGroupRequest.clusterManagerNodeTimeout( - request.paramAsTime("cluster_manager_timeout", deleteQueryGroupRequest.clusterManagerNodeTimeout()) + DeleteWorkloadGroupRequest deleteWorkloadGroupRequest = new DeleteWorkloadGroupRequest(request.param("name")); + deleteWorkloadGroupRequest.clusterManagerNodeTimeout( + request.paramAsTime("cluster_manager_timeout", deleteWorkloadGroupRequest.clusterManagerNodeTimeout()) + ); + deleteWorkloadGroupRequest.timeout(request.paramAsTime("timeout", deleteWorkloadGroupRequest.timeout())); + return channel -> client.execute( + DeleteWorkloadGroupAction.INSTANCE, + deleteWorkloadGroupRequest, + new RestToXContentListener<>(channel) ); - deleteQueryGroupRequest.timeout(request.paramAsTime("timeout", deleteQueryGroupRequest.timeout())); - return channel -> client.execute(DeleteQueryGroupAction.INSTANCE, deleteQueryGroupRequest, new RestToXContentListener<>(channel)); } } diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestGetQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestGetWorkloadGroupAction.java similarity index 58% rename from plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestGetQueryGroupAction.java rename to plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestGetWorkloadGroupAction.java index fecfbfe334a9c..818531352f4d3 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestGetQueryGroupAction.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestGetWorkloadGroupAction.java @@ -10,9 +10,9 @@ import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.plugin.wlm.action.GetQueryGroupAction; -import org.opensearch.plugin.wlm.action.GetQueryGroupRequest; -import org.opensearch.plugin.wlm.action.GetQueryGroupResponse; +import org.opensearch.plugin.wlm.action.GetWorkloadGroupAction; +import org.opensearch.plugin.wlm.action.GetWorkloadGroupRequest; +import org.opensearch.plugin.wlm.action.GetWorkloadGroupResponse; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestChannel; @@ -27,20 +27,20 @@ import static org.opensearch.rest.RestRequest.Method.GET; /** - * Rest action to get a QueryGroup + * Rest action to get a WorkloadGroup * * @opensearch.experimental */ -public class RestGetQueryGroupAction extends BaseRestHandler { +public class RestGetWorkloadGroupAction extends BaseRestHandler { /** - * Constructor for RestGetQueryGroupAction + * Constructor for RestGetWorkloadGroupAction */ - public RestGetQueryGroupAction() {} + public RestGetWorkloadGroupAction() {} @Override public String getName() { - return "get_query_group"; + return "get_workload_group"; } /** @@ -48,19 +48,19 @@ public String getName() { */ @Override public List routes() { - return List.of(new Route(GET, "_wlm/query_group/{name}"), new Route(GET, "_wlm/query_group/")); + return List.of(new Route(GET, "_wlm/workload_group/{name}"), new Route(GET, "_wlm/workload_group/")); } @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { - final GetQueryGroupRequest getQueryGroupRequest = new GetQueryGroupRequest(request.param("name")); - return channel -> client.execute(GetQueryGroupAction.INSTANCE, getQueryGroupRequest, getQueryGroupResponse(channel)); + final GetWorkloadGroupRequest getWorkloadGroupRequest = new GetWorkloadGroupRequest(request.param("name")); + return channel -> client.execute(GetWorkloadGroupAction.INSTANCE, getWorkloadGroupRequest, getWorkloadGroupResponse(channel)); } - private RestResponseListener getQueryGroupResponse(final RestChannel channel) { + private RestResponseListener getWorkloadGroupResponse(final RestChannel channel) { return new RestResponseListener<>(channel) { @Override - public RestResponse buildResponse(final GetQueryGroupResponse response) throws Exception { + public RestResponse buildResponse(final GetWorkloadGroupResponse response) throws Exception { return new BytesRestResponse(RestStatus.OK, response.toXContent(channel.newBuilder(), ToXContent.EMPTY_PARAMS)); } }; diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestCreateQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestUpdateWorkloadGroupAction.java similarity index 58% rename from plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestCreateQueryGroupAction.java rename to plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestUpdateWorkloadGroupAction.java index 2a3f79c1c6791..db77dc5963037 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestCreateQueryGroupAction.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestUpdateWorkloadGroupAction.java @@ -11,9 +11,9 @@ import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.plugin.wlm.action.CreateQueryGroupAction; -import org.opensearch.plugin.wlm.action.CreateQueryGroupRequest; -import org.opensearch.plugin.wlm.action.CreateQueryGroupResponse; +import org.opensearch.plugin.wlm.action.UpdateWorkloadGroupAction; +import org.opensearch.plugin.wlm.action.UpdateWorkloadGroupRequest; +import org.opensearch.plugin.wlm.action.UpdateWorkloadGroupResponse; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestChannel; @@ -29,20 +29,20 @@ import static org.opensearch.rest.RestRequest.Method.PUT; /** - * Rest action to create a QueryGroup + * Rest action to update a WorkloadGroup * * @opensearch.experimental */ -public class RestCreateQueryGroupAction extends BaseRestHandler { +public class RestUpdateWorkloadGroupAction extends BaseRestHandler { /** - * Constructor for RestCreateQueryGroupAction + * Constructor for RestUpdateWorkloadGroupAction */ - public RestCreateQueryGroupAction() {} + public RestUpdateWorkloadGroupAction() {} @Override public String getName() { - return "create_query_group"; + return "update_workload_group"; } /** @@ -50,21 +50,25 @@ public String getName() { */ @Override public List routes() { - return List.of(new Route(POST, "_wlm/query_group/"), new Route(PUT, "_wlm/query_group/")); + return List.of(new Route(POST, "_wlm/workload_group/{name}"), new Route(PUT, "_wlm/workload_group/{name}")); } @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { try (XContentParser parser = request.contentParser()) { - CreateQueryGroupRequest createQueryGroupRequest = CreateQueryGroupRequest.fromXContent(parser); - return channel -> client.execute(CreateQueryGroupAction.INSTANCE, createQueryGroupRequest, createQueryGroupResponse(channel)); + UpdateWorkloadGroupRequest updateWorkloadGroupRequest = UpdateWorkloadGroupRequest.fromXContent(parser, request.param("name")); + return channel -> client.execute( + UpdateWorkloadGroupAction.INSTANCE, + updateWorkloadGroupRequest, + updateWorkloadGroupResponse(channel) + ); } } - private RestResponseListener createQueryGroupResponse(final RestChannel channel) { + private RestResponseListener updateWorkloadGroupResponse(final RestChannel channel) { return new RestResponseListener<>(channel) { @Override - public RestResponse buildResponse(final CreateQueryGroupResponse response) throws Exception { + public RestResponse buildResponse(final UpdateWorkloadGroupResponse response) throws Exception { return new BytesRestResponse(RestStatus.OK, response.toXContent(channel.newBuilder(), ToXContent.EMPTY_PARAMS)); } }; diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceService.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/WorkloadGroupPersistenceService.java similarity index 50% rename from plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceService.java rename to plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/WorkloadGroupPersistenceService.java index 73dff306d0e69..35a7dc7f3219f 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceService.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/WorkloadGroupPersistenceService.java @@ -16,7 +16,7 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.metadata.Metadata; -import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.cluster.metadata.WorkloadGroup; import org.opensearch.cluster.service.ClusterManagerTaskThrottler; import org.opensearch.cluster.service.ClusterManagerTaskThrottler.ThrottlingKey; import org.opensearch.cluster.service.ClusterService; @@ -27,11 +27,11 @@ import org.opensearch.common.settings.Settings; import org.opensearch.core.action.ActionListener; import org.opensearch.core.rest.RestStatus; -import org.opensearch.plugin.wlm.action.CreateQueryGroupResponse; -import org.opensearch.plugin.wlm.action.DeleteQueryGroupRequest; -import org.opensearch.plugin.wlm.action.UpdateQueryGroupRequest; -import org.opensearch.plugin.wlm.action.UpdateQueryGroupResponse; -import org.opensearch.wlm.MutableQueryGroupFragment; +import org.opensearch.plugin.wlm.action.CreateWorkloadGroupResponse; +import org.opensearch.plugin.wlm.action.DeleteWorkloadGroupRequest; +import org.opensearch.plugin.wlm.action.UpdateWorkloadGroupRequest; +import org.opensearch.plugin.wlm.action.UpdateWorkloadGroupResponse; +import org.opensearch.wlm.MutableWorkloadGroupFragment; import org.opensearch.wlm.ResourceType; import java.util.Collection; @@ -40,112 +40,112 @@ import java.util.Optional; import java.util.stream.Collectors; -import static org.opensearch.cluster.metadata.QueryGroup.updateExistingQueryGroup; +import static org.opensearch.cluster.metadata.WorkloadGroup.updateExistingWorkloadGroup; /** - * This class defines the functions for QueryGroup persistence + * This class defines the functions for WorkloadGroup persistence */ -public class QueryGroupPersistenceService { +public class WorkloadGroupPersistenceService { static final String SOURCE = "query-group-persistence-service"; private static final String CREATE_QUERY_GROUP_THROTTLING_KEY = "create-query-group"; private static final String DELETE_QUERY_GROUP_THROTTLING_KEY = "delete-query-group"; private static final String UPDATE_QUERY_GROUP_THROTTLING_KEY = "update-query-group"; - private static final Logger logger = LogManager.getLogger(QueryGroupPersistenceService.class); + private static final Logger logger = LogManager.getLogger(WorkloadGroupPersistenceService.class); /** - * max QueryGroup count setting name + * max WorkloadGroup count setting name */ - public static final String QUERY_GROUP_COUNT_SETTING_NAME = "node.query_group.max_count"; + public static final String QUERY_GROUP_COUNT_SETTING_NAME = "node.workload_group.max_count"; /** - * default max queryGroup count on any node at any given point in time + * default max workloadGroup count on any node at any given point in time */ private static final int DEFAULT_MAX_QUERY_GROUP_COUNT_VALUE = 100; /** - * min queryGroup count on any node at any given point in time + * min workloadGroup count on any node at any given point in time */ private static final int MIN_QUERY_GROUP_COUNT_VALUE = 1; /** - * max QueryGroup count setting + * max WorkloadGroup count setting */ public static final Setting MAX_QUERY_GROUP_COUNT = Setting.intSetting( QUERY_GROUP_COUNT_SETTING_NAME, DEFAULT_MAX_QUERY_GROUP_COUNT_VALUE, 0, - QueryGroupPersistenceService::validateMaxQueryGroupCount, + WorkloadGroupPersistenceService::validateMaxWorkloadGroupCount, Setting.Property.Dynamic, Setting.Property.NodeScope ); private final ClusterService clusterService; - private volatile int maxQueryGroupCount; - final ThrottlingKey createQueryGroupThrottlingKey; - final ThrottlingKey deleteQueryGroupThrottlingKey; - final ThrottlingKey updateQueryGroupThrottlingKey; + private volatile int maxWorkloadGroupCount; + final ThrottlingKey createWorkloadGroupThrottlingKey; + final ThrottlingKey deleteWorkloadGroupThrottlingKey; + final ThrottlingKey updateWorkloadGroupThrottlingKey; /** - * Constructor for QueryGroupPersistenceService + * Constructor for WorkloadGroupPersistenceService * - * @param clusterService {@link ClusterService} - The cluster service to be used by QueryGroupPersistenceService - * @param settings {@link Settings} - The settings to be used by QueryGroupPersistenceService - * @param clusterSettings {@link ClusterSettings} - The cluster settings to be used by QueryGroupPersistenceService + * @param clusterService {@link ClusterService} - The cluster service to be used by WorkloadGroupPersistenceService + * @param settings {@link Settings} - The settings to be used by WorkloadGroupPersistenceService + * @param clusterSettings {@link ClusterSettings} - The cluster settings to be used by WorkloadGroupPersistenceService */ @Inject - public QueryGroupPersistenceService( + public WorkloadGroupPersistenceService( final ClusterService clusterService, final Settings settings, final ClusterSettings clusterSettings ) { this.clusterService = clusterService; - this.createQueryGroupThrottlingKey = clusterService.registerClusterManagerTask(CREATE_QUERY_GROUP_THROTTLING_KEY, true); - this.deleteQueryGroupThrottlingKey = clusterService.registerClusterManagerTask(DELETE_QUERY_GROUP_THROTTLING_KEY, true); - this.updateQueryGroupThrottlingKey = clusterService.registerClusterManagerTask(UPDATE_QUERY_GROUP_THROTTLING_KEY, true); - setMaxQueryGroupCount(MAX_QUERY_GROUP_COUNT.get(settings)); - clusterSettings.addSettingsUpdateConsumer(MAX_QUERY_GROUP_COUNT, this::setMaxQueryGroupCount); + this.createWorkloadGroupThrottlingKey = clusterService.registerClusterManagerTask(CREATE_QUERY_GROUP_THROTTLING_KEY, true); + this.deleteWorkloadGroupThrottlingKey = clusterService.registerClusterManagerTask(DELETE_QUERY_GROUP_THROTTLING_KEY, true); + this.updateWorkloadGroupThrottlingKey = clusterService.registerClusterManagerTask(UPDATE_QUERY_GROUP_THROTTLING_KEY, true); + setMaxWorkloadGroupCount(MAX_QUERY_GROUP_COUNT.get(settings)); + clusterSettings.addSettingsUpdateConsumer(MAX_QUERY_GROUP_COUNT, this::setMaxWorkloadGroupCount); } /** - * Set maxQueryGroupCount to be newMaxQueryGroupCount - * @param newMaxQueryGroupCount - the max number of QueryGroup allowed + * Set maxWorkloadGroupCount to be newMaxWorkloadGroupCount + * @param newMaxWorkloadGroupCount - the max number of WorkloadGroup allowed */ - public void setMaxQueryGroupCount(int newMaxQueryGroupCount) { - validateMaxQueryGroupCount(newMaxQueryGroupCount); - this.maxQueryGroupCount = newMaxQueryGroupCount; + public void setMaxWorkloadGroupCount(int newMaxWorkloadGroupCount) { + validateMaxWorkloadGroupCount(newMaxWorkloadGroupCount); + this.maxWorkloadGroupCount = newMaxWorkloadGroupCount; } /** - * Validator for maxQueryGroupCount - * @param maxQueryGroupCount - the maxQueryGroupCount number to be verified + * Validator for maxWorkloadGroupCount + * @param maxWorkloadGroupCount - the maxWorkloadGroupCount number to be verified */ - private static void validateMaxQueryGroupCount(int maxQueryGroupCount) { - if (maxQueryGroupCount > DEFAULT_MAX_QUERY_GROUP_COUNT_VALUE || maxQueryGroupCount < MIN_QUERY_GROUP_COUNT_VALUE) { + private static void validateMaxWorkloadGroupCount(int maxWorkloadGroupCount) { + if (maxWorkloadGroupCount > DEFAULT_MAX_QUERY_GROUP_COUNT_VALUE || maxWorkloadGroupCount < MIN_QUERY_GROUP_COUNT_VALUE) { throw new IllegalArgumentException(QUERY_GROUP_COUNT_SETTING_NAME + " should be in range [1-100]."); } } /** - * Update cluster state to include the new QueryGroup - * @param queryGroup {@link QueryGroup} - the QueryGroup we're currently creating - * @param listener - ActionListener for CreateQueryGroupResponse + * Update cluster state to include the new WorkloadGroup + * @param workloadGroup {@link WorkloadGroup} - the WorkloadGroup we're currently creating + * @param listener - ActionListener for CreateWorkloadGroupResponse */ - public void persistInClusterStateMetadata(QueryGroup queryGroup, ActionListener listener) { + public void persistInClusterStateMetadata(WorkloadGroup workloadGroup, ActionListener listener) { clusterService.submitStateUpdateTask(SOURCE, new ClusterStateUpdateTask(Priority.NORMAL) { @Override public ClusterState execute(ClusterState currentState) throws Exception { - return saveQueryGroupInClusterState(queryGroup, currentState); + return saveWorkloadGroupInClusterState(workloadGroup, currentState); } @Override public ThrottlingKey getClusterManagerThrottlingKey() { - return createQueryGroupThrottlingKey; + return createWorkloadGroupThrottlingKey; } @Override public void onFailure(String source, Exception e) { - logger.warn("failed to save QueryGroup object due to error: {}, for source: {}.", e.getMessage(), source); + logger.warn("failed to save WorkloadGroup object due to error: {}, for source: {}.", e.getMessage(), source); listener.onFailure(e); } @Override public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - CreateQueryGroupResponse response = new CreateQueryGroupResponse(queryGroup, RestStatus.OK); + CreateWorkloadGroupResponse response = new CreateWorkloadGroupResponse(workloadGroup, RestStatus.OK); listener.onResponse(response); } }); @@ -153,44 +153,44 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS /** * This method will be executed before we submit the new cluster state - * @param queryGroup - the QueryGroup we're currently creating + * @param workloadGroup - the WorkloadGroup we're currently creating * @param currentClusterState - the cluster state before the update */ - ClusterState saveQueryGroupInClusterState(final QueryGroup queryGroup, final ClusterState currentClusterState) { - final Map existingQueryGroups = currentClusterState.metadata().queryGroups(); - String groupName = queryGroup.getName(); - - // check if maxQueryGroupCount will breach - if (existingQueryGroups.size() == maxQueryGroupCount) { - logger.warn("{} value exceeded its assigned limit of {}.", QUERY_GROUP_COUNT_SETTING_NAME, maxQueryGroupCount); - throw new IllegalStateException("Can't create more than " + maxQueryGroupCount + " QueryGroups in the system."); + ClusterState saveWorkloadGroupInClusterState(final WorkloadGroup workloadGroup, final ClusterState currentClusterState) { + final Map existingWorkloadGroups = currentClusterState.metadata().workloadGroups(); + String groupName = workloadGroup.getName(); + + // check if maxWorkloadGroupCount will breach + if (existingWorkloadGroups.size() == maxWorkloadGroupCount) { + logger.warn("{} value exceeded its assigned limit of {}.", QUERY_GROUP_COUNT_SETTING_NAME, maxWorkloadGroupCount); + throw new IllegalStateException("Can't create more than " + maxWorkloadGroupCount + " WorkloadGroups in the system."); } // check for duplicate name - Optional findExistingGroup = existingQueryGroups.values() + Optional findExistingGroup = existingWorkloadGroups.values() .stream() .filter(group -> group.getName().equals(groupName)) .findFirst(); if (findExistingGroup.isPresent()) { - logger.warn("QueryGroup with name {} already exists. Not creating a new one.", groupName); - throw new IllegalArgumentException("QueryGroup with name " + groupName + " already exists. Not creating a new one."); + logger.warn("WorkloadGroup with name {} already exists. Not creating a new one.", groupName); + throw new IllegalArgumentException("WorkloadGroup with name " + groupName + " already exists. Not creating a new one."); } // check if there's any resource allocation that exceed limit of 1.0 - validateTotalUsage(existingQueryGroups, groupName, queryGroup.getResourceLimits()); + validateTotalUsage(existingWorkloadGroups, groupName, workloadGroup.getResourceLimits()); return ClusterState.builder(currentClusterState) - .metadata(Metadata.builder(currentClusterState.metadata()).put(queryGroup).build()) + .metadata(Metadata.builder(currentClusterState.metadata()).put(workloadGroup).build()) .build(); } /** - * Get the QueryGroups with the specified name from cluster state - * @param name - the QueryGroup name we are getting + * Get the WorkloadGroups with the specified name from cluster state + * @param name - the WorkloadGroup name we are getting * @param currentState - current cluster state */ - public static Collection getFromClusterStateMetadata(String name, ClusterState currentState) { - final Map currentGroups = currentState.getMetadata().queryGroups(); + public static Collection getFromClusterStateMetadata(String name, ClusterState currentState) { + final Map currentGroups = currentState.getMetadata().workloadGroups(); if (name == null || name.isEmpty()) { return currentGroups.values(); } @@ -203,23 +203,23 @@ public static Collection getFromClusterStateMetadata(String name, Cl } /** - * Modify cluster state to delete the QueryGroup - * @param deleteQueryGroupRequest - request to delete a QueryGroup + * Modify cluster state to delete the WorkloadGroup + * @param deleteWorkloadGroupRequest - request to delete a WorkloadGroup * @param listener - ActionListener for AcknowledgedResponse */ public void deleteInClusterStateMetadata( - DeleteQueryGroupRequest deleteQueryGroupRequest, + DeleteWorkloadGroupRequest deleteWorkloadGroupRequest, ActionListener listener ) { - clusterService.submitStateUpdateTask(SOURCE, new AckedClusterStateUpdateTask<>(deleteQueryGroupRequest, listener) { + clusterService.submitStateUpdateTask(SOURCE, new AckedClusterStateUpdateTask<>(deleteWorkloadGroupRequest, listener) { @Override public ClusterState execute(ClusterState currentState) { - return deleteQueryGroupInClusterState(deleteQueryGroupRequest.getName(), currentState); + return deleteWorkloadGroupInClusterState(deleteWorkloadGroupRequest.getName(), currentState); } @Override public ClusterManagerTaskThrottler.ThrottlingKey getClusterManagerThrottlingKey() { - return deleteQueryGroupThrottlingKey; + return deleteWorkloadGroupThrottlingKey; } @Override @@ -230,85 +230,88 @@ protected AcknowledgedResponse newResponse(boolean acknowledged) { } /** - * Modify cluster state to delete the QueryGroup, and return the new cluster state - * @param name - the name for QueryGroup to be deleted + * Modify cluster state to delete the WorkloadGroup, and return the new cluster state + * @param name - the name for WorkloadGroup to be deleted * @param currentClusterState - current cluster state */ - ClusterState deleteQueryGroupInClusterState(final String name, final ClusterState currentClusterState) { + ClusterState deleteWorkloadGroupInClusterState(final String name, final ClusterState currentClusterState) { final Metadata metadata = currentClusterState.metadata(); - final QueryGroup queryGroupToRemove = metadata.queryGroups() + final WorkloadGroup workloadGroupToRemove = metadata.workloadGroups() .values() .stream() - .filter(queryGroup -> queryGroup.getName().equals(name)) + .filter(workloadGroup -> workloadGroup.getName().equals(name)) .findAny() - .orElseThrow(() -> new ResourceNotFoundException("No QueryGroup exists with the provided name: " + name)); + .orElseThrow(() -> new ResourceNotFoundException("No WorkloadGroup exists with the provided name: " + name)); - return ClusterState.builder(currentClusterState).metadata(Metadata.builder(metadata).remove(queryGroupToRemove).build()).build(); + return ClusterState.builder(currentClusterState).metadata(Metadata.builder(metadata).remove(workloadGroupToRemove).build()).build(); } /** - * Modify cluster state to update the QueryGroup - * @param toUpdateGroup {@link QueryGroup} - the QueryGroup that we want to update - * @param listener - ActionListener for UpdateQueryGroupResponse + * Modify cluster state to update the WorkloadGroup + * @param toUpdateGroup {@link WorkloadGroup} - the WorkloadGroup that we want to update + * @param listener - ActionListener for UpdateWorkloadGroupResponse */ - public void updateInClusterStateMetadata(UpdateQueryGroupRequest toUpdateGroup, ActionListener listener) { + public void updateInClusterStateMetadata( + UpdateWorkloadGroupRequest toUpdateGroup, + ActionListener listener + ) { clusterService.submitStateUpdateTask(SOURCE, new ClusterStateUpdateTask(Priority.NORMAL) { @Override public ClusterState execute(ClusterState currentState) { - return updateQueryGroupInClusterState(toUpdateGroup, currentState); + return updateWorkloadGroupInClusterState(toUpdateGroup, currentState); } @Override public ThrottlingKey getClusterManagerThrottlingKey() { - return updateQueryGroupThrottlingKey; + return updateWorkloadGroupThrottlingKey; } @Override public void onFailure(String source, Exception e) { - logger.warn("Failed to update QueryGroup due to error: {}, for source: {}", e.getMessage(), source); + logger.warn("Failed to update WorkloadGroup due to error: {}, for source: {}", e.getMessage(), source); listener.onFailure(e); } @Override public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { String name = toUpdateGroup.getName(); - Optional findUpdatedGroup = newState.metadata() - .queryGroups() + Optional findUpdatedGroup = newState.metadata() + .workloadGroups() .values() .stream() .filter(group -> group.getName().equals(name)) .findFirst(); assert findUpdatedGroup.isPresent(); - QueryGroup updatedGroup = findUpdatedGroup.get(); - UpdateQueryGroupResponse response = new UpdateQueryGroupResponse(updatedGroup, RestStatus.OK); + WorkloadGroup updatedGroup = findUpdatedGroup.get(); + UpdateWorkloadGroupResponse response = new UpdateWorkloadGroupResponse(updatedGroup, RestStatus.OK); listener.onResponse(response); } }); } /** - * Modify cluster state to update the existing QueryGroup - * @param updateQueryGroupRequest {@link QueryGroup} - the QueryGroup that we want to update + * Modify cluster state to update the existing WorkloadGroup + * @param updateWorkloadGroupRequest {@link WorkloadGroup} - the WorkloadGroup that we want to update * @param currentState - current cluster state */ - ClusterState updateQueryGroupInClusterState(UpdateQueryGroupRequest updateQueryGroupRequest, ClusterState currentState) { + ClusterState updateWorkloadGroupInClusterState(UpdateWorkloadGroupRequest updateWorkloadGroupRequest, ClusterState currentState) { final Metadata metadata = currentState.metadata(); - final Map existingGroups = currentState.metadata().queryGroups(); - String name = updateQueryGroupRequest.getName(); - MutableQueryGroupFragment mutableQueryGroupFragment = updateQueryGroupRequest.getmMutableQueryGroupFragment(); + final Map existingGroups = currentState.metadata().workloadGroups(); + String name = updateWorkloadGroupRequest.getName(); + MutableWorkloadGroupFragment mutableWorkloadGroupFragment = updateWorkloadGroupRequest.getmMutableWorkloadGroupFragment(); - final QueryGroup existingGroup = existingGroups.values() + final WorkloadGroup existingGroup = existingGroups.values() .stream() .filter(group -> group.getName().equals(name)) .findFirst() - .orElseThrow(() -> new ResourceNotFoundException("No QueryGroup exists with the provided name: " + name)); + .orElseThrow(() -> new ResourceNotFoundException("No WorkloadGroup exists with the provided name: " + name)); - validateTotalUsage(existingGroups, name, mutableQueryGroupFragment.getResourceLimits()); + validateTotalUsage(existingGroups, name, mutableWorkloadGroupFragment.getResourceLimits()); return ClusterState.builder(currentState) .metadata( Metadata.builder(metadata) .remove(existingGroup) - .put(updateExistingQueryGroup(existingGroup, mutableQueryGroupFragment)) + .put(updateExistingWorkloadGroup(existingGroup, mutableWorkloadGroupFragment)) .build() ) .build(); @@ -316,16 +319,20 @@ ClusterState updateQueryGroupInClusterState(UpdateQueryGroupRequest updateQueryG /** * This method checks if there's any resource allocation that exceed limit of 1.0 - * @param existingQueryGroups - existing QueryGroups in the system - * @param resourceLimits - the QueryGroup we're creating or updating + * @param existingWorkloadGroups - existing WorkloadGroups in the system + * @param resourceLimits - the WorkloadGroup we're creating or updating */ - private void validateTotalUsage(Map existingQueryGroups, String name, Map resourceLimits) { + private void validateTotalUsage( + Map existingWorkloadGroups, + String name, + Map resourceLimits + ) { if (resourceLimits == null || resourceLimits.isEmpty()) { return; } final Map totalUsage = new EnumMap<>(ResourceType.class); totalUsage.putAll(resourceLimits); - for (QueryGroup currGroup : existingQueryGroups.values()) { + for (WorkloadGroup currGroup : existingWorkloadGroups.values()) { if (!currGroup.getName().equals(name)) { for (ResourceType resourceType : resourceLimits.keySet()) { totalUsage.compute(resourceType, (k, v) -> v + currGroup.getResourceLimits().getOrDefault(resourceType, 0.0)); @@ -343,10 +350,10 @@ private void validateTotalUsage(Map existingQueryGroups, Str } /** - * maxQueryGroupCount getter + * maxWorkloadGroupCount getter */ - public int getMaxQueryGroupCount() { - return maxQueryGroupCount; + public int getMaxWorkloadGroupCount() { + return maxWorkloadGroupCount; } /** diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/QueryGroupTestUtils.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/WorkloadGroupTestUtils.java similarity index 62% rename from plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/QueryGroupTestUtils.java rename to plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/WorkloadGroupTestUtils.java index c6eb3140e943d..bac644a172c1e 100644 --- a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/QueryGroupTestUtils.java +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/WorkloadGroupTestUtils.java @@ -11,7 +11,7 @@ import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.Metadata; -import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.cluster.metadata.WorkloadGroup; import org.opensearch.cluster.service.ClusterApplierService; import org.opensearch.cluster.service.ClusterManagerService; import org.opensearch.cluster.service.ClusterService; @@ -19,9 +19,9 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; +import org.opensearch.plugin.wlm.service.WorkloadGroupPersistenceService; import org.opensearch.threadpool.ThreadPool; -import org.opensearch.wlm.MutableQueryGroupFragment; +import org.opensearch.wlm.MutableWorkloadGroupFragment; import org.opensearch.wlm.ResourceType; import java.util.ArrayList; @@ -32,52 +32,52 @@ import java.util.Map; import java.util.Set; -import static org.opensearch.cluster.metadata.QueryGroup.builder; +import static org.opensearch.cluster.metadata.WorkloadGroup.builder; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; -public class QueryGroupTestUtils { - public static final String NAME_ONE = "query_group_one"; - public static final String NAME_TWO = "query_group_two"; +public class WorkloadGroupTestUtils { + public static final String NAME_ONE = "workload_group_one"; + public static final String NAME_TWO = "workload_group_two"; public static final String _ID_ONE = "AgfUO5Ja9yfsYlONlYi3TQ=="; public static final String _ID_TWO = "G5iIqHy4g7eK1qIAAAAIH53=1"; - public static final String NAME_NONE_EXISTED = "query_group_none_existed"; + public static final String NAME_NONE_EXISTED = "workload_group_none_existed"; public static final long TIMESTAMP_ONE = 4513232413L; public static final long TIMESTAMP_TWO = 4513232415L; - public static final QueryGroup queryGroupOne = builder().name(NAME_ONE) + public static final WorkloadGroup workloadGroupOne = builder().name(NAME_ONE) ._id(_ID_ONE) - .mutableQueryGroupFragment( - new MutableQueryGroupFragment(MutableQueryGroupFragment.ResiliencyMode.MONITOR, Map.of(ResourceType.MEMORY, 0.3)) + .mutableWorkloadGroupFragment( + new MutableWorkloadGroupFragment(MutableWorkloadGroupFragment.ResiliencyMode.MONITOR, Map.of(ResourceType.MEMORY, 0.3)) ) .updatedAt(TIMESTAMP_ONE) .build(); - public static final QueryGroup queryGroupTwo = builder().name(NAME_TWO) + public static final WorkloadGroup workloadGroupTwo = builder().name(NAME_TWO) ._id(_ID_TWO) - .mutableQueryGroupFragment( - new MutableQueryGroupFragment(MutableQueryGroupFragment.ResiliencyMode.MONITOR, Map.of(ResourceType.MEMORY, 0.6)) + .mutableWorkloadGroupFragment( + new MutableWorkloadGroupFragment(MutableWorkloadGroupFragment.ResiliencyMode.MONITOR, Map.of(ResourceType.MEMORY, 0.6)) ) .updatedAt(TIMESTAMP_TWO) .build(); - public static List queryGroupList() { - List list = new ArrayList<>(); - list.add(queryGroupOne); - list.add(queryGroupTwo); + public static List workloadGroupList() { + List list = new ArrayList<>(); + list.add(workloadGroupOne); + list.add(workloadGroupTwo); return list; } public static ClusterState clusterState() { - final Metadata metadata = Metadata.builder().queryGroups(Map.of(_ID_ONE, queryGroupOne, _ID_TWO, queryGroupTwo)).build(); + final Metadata metadata = Metadata.builder().workloadGroups(Map.of(_ID_ONE, workloadGroupOne, _ID_TWO, workloadGroupTwo)).build(); return ClusterState.builder(new ClusterName("_name")).metadata(metadata).build(); } public static Set> clusterSettingsSet() { Set> set = new HashSet<>(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - set.add(QueryGroupPersistenceService.MAX_QUERY_GROUP_COUNT); - assertFalse(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS.contains(QueryGroupPersistenceService.MAX_QUERY_GROUP_COUNT)); + set.add(WorkloadGroupPersistenceService.MAX_QUERY_GROUP_COUNT); + assertFalse(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS.contains(WorkloadGroupPersistenceService.MAX_QUERY_GROUP_COUNT)); return set; } @@ -89,7 +89,7 @@ public static ClusterSettings clusterSettings() { return new ClusterSettings(settings(), clusterSettingsSet()); } - public static QueryGroupPersistenceService queryGroupPersistenceService() { + public static WorkloadGroupPersistenceService workloadGroupPersistenceService() { ClusterApplierService clusterApplierService = new ClusterApplierService( "name", settings(), @@ -103,11 +103,13 @@ public static QueryGroupPersistenceService queryGroupPersistenceService() { mock(ClusterManagerService.class), clusterApplierService ); - return new QueryGroupPersistenceService(clusterService, settings(), clusterSettings()); + return new WorkloadGroupPersistenceService(clusterService, settings(), clusterSettings()); } - public static Tuple preparePersistenceServiceSetup(Map queryGroups) { - Metadata metadata = Metadata.builder().queryGroups(queryGroups).build(); + public static Tuple preparePersistenceServiceSetup( + Map workloadGroups + ) { + Metadata metadata = Metadata.builder().workloadGroups(workloadGroups).build(); Settings settings = Settings.builder().build(); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).metadata(metadata).build(); ClusterSettings clusterSettings = new ClusterSettings(settings, clusterSettingsSet()); @@ -124,12 +126,12 @@ public static Tuple preparePersisten mock(ClusterManagerService.class), clusterApplierService ); - QueryGroupPersistenceService queryGroupPersistenceService = new QueryGroupPersistenceService( + WorkloadGroupPersistenceService workloadGroupPersistenceService = new WorkloadGroupPersistenceService( clusterService, settings, clusterSettings ); - return new Tuple(queryGroupPersistenceService, clusterState); + return new Tuple(workloadGroupPersistenceService, clusterState); } public static void assertEqualResourceLimits( @@ -140,20 +142,20 @@ public static void assertEqualResourceLimits( assertTrue(resourceLimitMapOne.values().containsAll(resourceLimitMapTwo.values())); } - public static void assertEqualQueryGroups( - Collection collectionOne, - Collection collectionTwo, + public static void assertEqualWorkloadGroups( + Collection collectionOne, + Collection collectionTwo, boolean assertUpdateAt ) { assertEquals(collectionOne.size(), collectionTwo.size()); - List listOne = new ArrayList<>(collectionOne); - List listTwo = new ArrayList<>(collectionTwo); - listOne.sort(Comparator.comparing(QueryGroup::getName)); - listTwo.sort(Comparator.comparing(QueryGroup::getName)); + List listOne = new ArrayList<>(collectionOne); + List listTwo = new ArrayList<>(collectionTwo); + listOne.sort(Comparator.comparing(WorkloadGroup::getName)); + listTwo.sort(Comparator.comparing(WorkloadGroup::getName)); for (int i = 0; i < listOne.size(); i++) { if (assertUpdateAt) { - QueryGroup one = listOne.get(i); - QueryGroup two = listTwo.get(i); + WorkloadGroup one = listOne.get(i); + WorkloadGroup two = listTwo.get(i); assertEquals(one.getName(), two.getName()); assertEquals(one.getResourceLimits(), two.getResourceLimits()); assertEquals(one.getResiliencyMode(), two.getResiliencyMode()); diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupRequestTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateWorkloadGroupRequestTests.java similarity index 50% rename from plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupRequestTests.java rename to plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateWorkloadGroupRequestTests.java index dd9de4bf8fb1a..31d3ea00b7bda 100644 --- a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupRequestTests.java +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateWorkloadGroupRequestTests.java @@ -8,7 +8,7 @@ package org.opensearch.plugin.wlm.action; -import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.cluster.metadata.WorkloadGroup; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.test.OpenSearchTestCase; @@ -17,24 +17,24 @@ import java.util.ArrayList; import java.util.List; -import static org.opensearch.plugin.wlm.QueryGroupTestUtils.assertEqualQueryGroups; -import static org.opensearch.plugin.wlm.QueryGroupTestUtils.queryGroupOne; +import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.assertEqualWorkloadGroups; +import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.workloadGroupOne; -public class CreateQueryGroupRequestTests extends OpenSearchTestCase { +public class CreateWorkloadGroupRequestTests extends OpenSearchTestCase { /** - * Test case to verify the serialization and deserialization of CreateQueryGroupRequest. + * Test case to verify the serialization and deserialization of CreateWorkloadGroupRequest. */ public void testSerialization() throws IOException { - CreateQueryGroupRequest request = new CreateQueryGroupRequest(queryGroupOne); + CreateWorkloadGroupRequest request = new CreateWorkloadGroupRequest(workloadGroupOne); BytesStreamOutput out = new BytesStreamOutput(); request.writeTo(out); StreamInput streamInput = out.bytes().streamInput(); - CreateQueryGroupRequest otherRequest = new CreateQueryGroupRequest(streamInput); - List list1 = new ArrayList<>(); - List list2 = new ArrayList<>(); - list1.add(queryGroupOne); - list2.add(otherRequest.getQueryGroup()); - assertEqualQueryGroups(list1, list2, false); + CreateWorkloadGroupRequest otherRequest = new CreateWorkloadGroupRequest(streamInput); + List list1 = new ArrayList<>(); + List list2 = new ArrayList<>(); + list1.add(workloadGroupOne); + list2.add(otherRequest.getWorkloadGroup()); + assertEqualWorkloadGroups(list1, list2, false); } } diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponseTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateWorkloadGroupResponseTests.java similarity index 60% rename from plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponseTests.java rename to plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateWorkloadGroupResponseTests.java index 3a2ce215d21b5..d25050341f997 100644 --- a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponseTests.java +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateWorkloadGroupResponseTests.java @@ -8,14 +8,14 @@ package org.opensearch.plugin.wlm.action; -import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.cluster.metadata.WorkloadGroup; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.plugin.wlm.QueryGroupTestUtils; +import org.opensearch.plugin.wlm.WorkloadGroupTestUtils; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -24,37 +24,37 @@ import static org.mockito.Mockito.mock; -public class CreateQueryGroupResponseTests extends OpenSearchTestCase { +public class CreateWorkloadGroupResponseTests extends OpenSearchTestCase { /** - * Test case to verify serialization and deserialization of CreateQueryGroupResponse. + * Test case to verify serialization and deserialization of CreateWorkloadGroupResponse. */ public void testSerialization() throws IOException { - CreateQueryGroupResponse response = new CreateQueryGroupResponse(QueryGroupTestUtils.queryGroupOne, RestStatus.OK); + CreateWorkloadGroupResponse response = new CreateWorkloadGroupResponse(WorkloadGroupTestUtils.workloadGroupOne, RestStatus.OK); BytesStreamOutput out = new BytesStreamOutput(); response.writeTo(out); StreamInput streamInput = out.bytes().streamInput(); - CreateQueryGroupResponse otherResponse = new CreateQueryGroupResponse(streamInput); + CreateWorkloadGroupResponse otherResponse = new CreateWorkloadGroupResponse(streamInput); assertEquals(response.getRestStatus(), otherResponse.getRestStatus()); - QueryGroup responseGroup = response.getQueryGroup(); - QueryGroup otherResponseGroup = otherResponse.getQueryGroup(); - List listOne = new ArrayList<>(); - List listTwo = new ArrayList<>(); + WorkloadGroup responseGroup = response.getWorkloadGroup(); + WorkloadGroup otherResponseGroup = otherResponse.getWorkloadGroup(); + List listOne = new ArrayList<>(); + List listTwo = new ArrayList<>(); listOne.add(responseGroup); listTwo.add(otherResponseGroup); - QueryGroupTestUtils.assertEqualQueryGroups(listOne, listTwo, false); + WorkloadGroupTestUtils.assertEqualWorkloadGroups(listOne, listTwo, false); } /** - * Test case to validate the toXContent method of CreateQueryGroupResponse. + * Test case to validate the toXContent method of CreateWorkloadGroupResponse. */ - public void testToXContentCreateQueryGroup() throws IOException { + public void testToXContentCreateWorkloadGroup() throws IOException { XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); - CreateQueryGroupResponse response = new CreateQueryGroupResponse(QueryGroupTestUtils.queryGroupOne, RestStatus.OK); + CreateWorkloadGroupResponse response = new CreateWorkloadGroupResponse(WorkloadGroupTestUtils.workloadGroupOne, RestStatus.OK); String actual = response.toXContent(builder, mock(ToXContent.Params.class)).toString(); String expected = "{\n" + " \"_id\" : \"AgfUO5Ja9yfsYlONlYi3TQ==\",\n" - + " \"name\" : \"query_group_one\",\n" + + " \"name\" : \"workload_group_one\",\n" + " \"resiliency_mode\" : \"monitor\",\n" + " \"resource_limits\" : {\n" + " \"memory\" : 0.3\n" diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequestTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/DeleteWorkloadGroupRequestTests.java similarity index 65% rename from plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequestTests.java rename to plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/DeleteWorkloadGroupRequestTests.java index bc2e4f0faca4c..a7fa0939583c5 100644 --- a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequestTests.java +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/DeleteWorkloadGroupRequestTests.java @@ -11,31 +11,31 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.plugin.wlm.QueryGroupTestUtils; +import org.opensearch.plugin.wlm.WorkloadGroupTestUtils; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; -public class DeleteQueryGroupRequestTests extends OpenSearchTestCase { +public class DeleteWorkloadGroupRequestTests extends OpenSearchTestCase { /** - * Test case to verify the serialization and deserialization of DeleteQueryGroupRequest. + * Test case to verify the serialization and deserialization of DeleteWorkloadGroupRequest. */ public void testSerialization() throws IOException { - DeleteQueryGroupRequest request = new DeleteQueryGroupRequest(QueryGroupTestUtils.NAME_ONE); - assertEquals(QueryGroupTestUtils.NAME_ONE, request.getName()); + DeleteWorkloadGroupRequest request = new DeleteWorkloadGroupRequest(WorkloadGroupTestUtils.NAME_ONE); + assertEquals(WorkloadGroupTestUtils.NAME_ONE, request.getName()); BytesStreamOutput out = new BytesStreamOutput(); request.writeTo(out); StreamInput streamInput = out.bytes().streamInput(); - DeleteQueryGroupRequest otherRequest = new DeleteQueryGroupRequest(streamInput); + DeleteWorkloadGroupRequest otherRequest = new DeleteWorkloadGroupRequest(streamInput); assertEquals(request.getName(), otherRequest.getName()); } /** - * Test case to validate a DeleteQueryGroupRequest. + * Test case to validate a DeleteWorkloadGroupRequest. */ public void testSerializationWithNull() throws IOException { - DeleteQueryGroupRequest request = new DeleteQueryGroupRequest((String) null); + DeleteWorkloadGroupRequest request = new DeleteWorkloadGroupRequest((String) null); ActionRequestValidationException actionRequestValidationException = request.validate(); assertFalse(actionRequestValidationException.getMessage().isEmpty()); } diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/GetQueryGroupRequestTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/GetWorkloadGroupRequestTests.java similarity index 62% rename from plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/GetQueryGroupRequestTests.java rename to plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/GetWorkloadGroupRequestTests.java index 32b5f7ec9e2c3..832761d5084bb 100644 --- a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/GetQueryGroupRequestTests.java +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/GetWorkloadGroupRequestTests.java @@ -10,44 +10,44 @@ import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.plugin.wlm.QueryGroupTestUtils; +import org.opensearch.plugin.wlm.WorkloadGroupTestUtils; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; -public class GetQueryGroupRequestTests extends OpenSearchTestCase { +public class GetWorkloadGroupRequestTests extends OpenSearchTestCase { /** - * Test case to verify the serialization and deserialization of GetQueryGroupRequest. + * Test case to verify the serialization and deserialization of GetWorkloadGroupRequest. */ public void testSerialization() throws IOException { - GetQueryGroupRequest request = new GetQueryGroupRequest(QueryGroupTestUtils.NAME_ONE); - assertEquals(QueryGroupTestUtils.NAME_ONE, request.getName()); + GetWorkloadGroupRequest request = new GetWorkloadGroupRequest(WorkloadGroupTestUtils.NAME_ONE); + assertEquals(WorkloadGroupTestUtils.NAME_ONE, request.getName()); BytesStreamOutput out = new BytesStreamOutput(); request.writeTo(out); StreamInput streamInput = out.bytes().streamInput(); - GetQueryGroupRequest otherRequest = new GetQueryGroupRequest(streamInput); + GetWorkloadGroupRequest otherRequest = new GetWorkloadGroupRequest(streamInput); assertEquals(request.getName(), otherRequest.getName()); } /** - * Test case to verify the serialization and deserialization of GetQueryGroupRequest when name is null. + * Test case to verify the serialization and deserialization of GetWorkloadGroupRequest when name is null. */ public void testSerializationWithNull() throws IOException { - GetQueryGroupRequest request = new GetQueryGroupRequest((String) null); + GetWorkloadGroupRequest request = new GetWorkloadGroupRequest((String) null); assertNull(request.getName()); BytesStreamOutput out = new BytesStreamOutput(); request.writeTo(out); StreamInput streamInput = out.bytes().streamInput(); - GetQueryGroupRequest otherRequest = new GetQueryGroupRequest(streamInput); + GetWorkloadGroupRequest otherRequest = new GetWorkloadGroupRequest(streamInput); assertEquals(request.getName(), otherRequest.getName()); } /** - * Test case the validation function of GetQueryGroupRequest + * Test case the validation function of GetWorkloadGroupRequest */ public void testValidation() { - GetQueryGroupRequest request = new GetQueryGroupRequest("a".repeat(51)); + GetWorkloadGroupRequest request = new GetWorkloadGroupRequest("a".repeat(51)); assertThrows(IllegalArgumentException.class, request::validate); } } diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/GetQueryGroupResponseTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/GetWorkloadGroupResponseTests.java similarity index 51% rename from plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/GetQueryGroupResponseTests.java rename to plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/GetWorkloadGroupResponseTests.java index 1a2ac282d86a4..dc0aeabc7a033 100644 --- a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/GetQueryGroupResponseTests.java +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/GetWorkloadGroupResponseTests.java @@ -8,14 +8,14 @@ package org.opensearch.plugin.wlm.action; -import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.cluster.metadata.WorkloadGroup; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.plugin.wlm.QueryGroupTestUtils; +import org.opensearch.plugin.wlm.WorkloadGroupTestUtils; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -24,74 +24,74 @@ import static org.mockito.Mockito.mock; -public class GetQueryGroupResponseTests extends OpenSearchTestCase { +public class GetWorkloadGroupResponseTests extends OpenSearchTestCase { /** - * Test case to verify the serialization and deserialization of GetQueryGroupResponse. + * Test case to verify the serialization and deserialization of GetWorkloadGroupResponse. */ - public void testSerializationSingleQueryGroup() throws IOException { - List list = new ArrayList<>(); - list.add(QueryGroupTestUtils.queryGroupOne); - GetQueryGroupResponse response = new GetQueryGroupResponse(list, RestStatus.OK); - assertEquals(response.getQueryGroups(), list); + public void testSerializationSingleWorkloadGroup() throws IOException { + List list = new ArrayList<>(); + list.add(WorkloadGroupTestUtils.workloadGroupOne); + GetWorkloadGroupResponse response = new GetWorkloadGroupResponse(list, RestStatus.OK); + assertEquals(response.getWorkloadGroups(), list); BytesStreamOutput out = new BytesStreamOutput(); response.writeTo(out); StreamInput streamInput = out.bytes().streamInput(); - GetQueryGroupResponse otherResponse = new GetQueryGroupResponse(streamInput); + GetWorkloadGroupResponse otherResponse = new GetWorkloadGroupResponse(streamInput); assertEquals(response.getRestStatus(), otherResponse.getRestStatus()); - QueryGroupTestUtils.assertEqualQueryGroups(response.getQueryGroups(), otherResponse.getQueryGroups(), false); + WorkloadGroupTestUtils.assertEqualWorkloadGroups(response.getWorkloadGroups(), otherResponse.getWorkloadGroups(), false); } /** - * Test case to verify the serialization and deserialization of GetQueryGroupResponse when the result contains multiple QueryGroups. + * Test case to verify the serialization and deserialization of GetWorkloadGroupResponse when the result contains multiple WorkloadGroups. */ - public void testSerializationMultipleQueryGroup() throws IOException { - GetQueryGroupResponse response = new GetQueryGroupResponse(QueryGroupTestUtils.queryGroupList(), RestStatus.OK); - assertEquals(response.getQueryGroups(), QueryGroupTestUtils.queryGroupList()); + public void testSerializationMultipleWorkloadGroup() throws IOException { + GetWorkloadGroupResponse response = new GetWorkloadGroupResponse(WorkloadGroupTestUtils.workloadGroupList(), RestStatus.OK); + assertEquals(response.getWorkloadGroups(), WorkloadGroupTestUtils.workloadGroupList()); BytesStreamOutput out = new BytesStreamOutput(); response.writeTo(out); StreamInput streamInput = out.bytes().streamInput(); - GetQueryGroupResponse otherResponse = new GetQueryGroupResponse(streamInput); + GetWorkloadGroupResponse otherResponse = new GetWorkloadGroupResponse(streamInput); assertEquals(response.getRestStatus(), otherResponse.getRestStatus()); - assertEquals(2, otherResponse.getQueryGroups().size()); - QueryGroupTestUtils.assertEqualQueryGroups(response.getQueryGroups(), otherResponse.getQueryGroups(), false); + assertEquals(2, otherResponse.getWorkloadGroups().size()); + WorkloadGroupTestUtils.assertEqualWorkloadGroups(response.getWorkloadGroups(), otherResponse.getWorkloadGroups(), false); } /** - * Test case to verify the serialization and deserialization of GetQueryGroupResponse when the result is empty. + * Test case to verify the serialization and deserialization of GetWorkloadGroupResponse when the result is empty. */ public void testSerializationNull() throws IOException { - List list = new ArrayList<>(); - GetQueryGroupResponse response = new GetQueryGroupResponse(list, RestStatus.OK); - assertEquals(response.getQueryGroups(), list); + List list = new ArrayList<>(); + GetWorkloadGroupResponse response = new GetWorkloadGroupResponse(list, RestStatus.OK); + assertEquals(response.getWorkloadGroups(), list); BytesStreamOutput out = new BytesStreamOutput(); response.writeTo(out); StreamInput streamInput = out.bytes().streamInput(); - GetQueryGroupResponse otherResponse = new GetQueryGroupResponse(streamInput); + GetWorkloadGroupResponse otherResponse = new GetWorkloadGroupResponse(streamInput); assertEquals(response.getRestStatus(), otherResponse.getRestStatus()); - assertEquals(0, otherResponse.getQueryGroups().size()); + assertEquals(0, otherResponse.getWorkloadGroups().size()); } /** - * Test case to verify the toXContent of GetQueryGroupResponse. + * Test case to verify the toXContent of GetWorkloadGroupResponse. */ - public void testToXContentGetSingleQueryGroup() throws IOException { - List queryGroupList = new ArrayList<>(); - queryGroupList.add(QueryGroupTestUtils.queryGroupOne); + public void testToXContentGetSingleWorkloadGroup() throws IOException { + List workloadGroupList = new ArrayList<>(); + workloadGroupList.add(WorkloadGroupTestUtils.workloadGroupOne); XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); - GetQueryGroupResponse response = new GetQueryGroupResponse(queryGroupList, RestStatus.OK); + GetWorkloadGroupResponse response = new GetWorkloadGroupResponse(workloadGroupList, RestStatus.OK); String actual = response.toXContent(builder, mock(ToXContent.Params.class)).toString(); String expected = "{\n" - + " \"query_groups\" : [\n" + + " \"workload_groups\" : [\n" + " {\n" + " \"_id\" : \"AgfUO5Ja9yfsYlONlYi3TQ==\",\n" - + " \"name\" : \"query_group_one\",\n" + + " \"name\" : \"workload_group_one\",\n" + " \"resiliency_mode\" : \"monitor\",\n" + " \"resource_limits\" : {\n" + " \"memory\" : 0.3\n" @@ -104,20 +104,20 @@ public void testToXContentGetSingleQueryGroup() throws IOException { } /** - * Test case to verify the toXContent of GetQueryGroupResponse when the result contains multiple QueryGroups. + * Test case to verify the toXContent of GetWorkloadGroupResponse when the result contains multiple WorkloadGroups. */ - public void testToXContentGetMultipleQueryGroup() throws IOException { - List queryGroupList = new ArrayList<>(); - queryGroupList.add(QueryGroupTestUtils.queryGroupOne); - queryGroupList.add(QueryGroupTestUtils.queryGroupTwo); + public void testToXContentGetMultipleWorkloadGroup() throws IOException { + List workloadGroupList = new ArrayList<>(); + workloadGroupList.add(WorkloadGroupTestUtils.workloadGroupOne); + workloadGroupList.add(WorkloadGroupTestUtils.workloadGroupTwo); XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); - GetQueryGroupResponse response = new GetQueryGroupResponse(queryGroupList, RestStatus.OK); + GetWorkloadGroupResponse response = new GetWorkloadGroupResponse(workloadGroupList, RestStatus.OK); String actual = response.toXContent(builder, mock(ToXContent.Params.class)).toString(); String expected = "{\n" - + " \"query_groups\" : [\n" + + " \"workload_groups\" : [\n" + " {\n" + " \"_id\" : \"AgfUO5Ja9yfsYlONlYi3TQ==\",\n" - + " \"name\" : \"query_group_one\",\n" + + " \"name\" : \"workload_group_one\",\n" + " \"resiliency_mode\" : \"monitor\",\n" + " \"resource_limits\" : {\n" + " \"memory\" : 0.3\n" @@ -126,7 +126,7 @@ public void testToXContentGetMultipleQueryGroup() throws IOException { + " },\n" + " {\n" + " \"_id\" : \"G5iIqHy4g7eK1qIAAAAIH53=1\",\n" - + " \"name\" : \"query_group_two\",\n" + + " \"name\" : \"workload_group_two\",\n" + " \"resiliency_mode\" : \"monitor\",\n" + " \"resource_limits\" : {\n" + " \"memory\" : 0.6\n" @@ -139,13 +139,13 @@ public void testToXContentGetMultipleQueryGroup() throws IOException { } /** - * Test case to verify toXContent of GetQueryGroupResponse when the result contains zero QueryGroup. + * Test case to verify toXContent of GetWorkloadGroupResponse when the result contains zero WorkloadGroup. */ - public void testToXContentGetZeroQueryGroup() throws IOException { + public void testToXContentGetZeroWorkloadGroup() throws IOException { XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); - GetQueryGroupResponse otherResponse = new GetQueryGroupResponse(new ArrayList<>(), RestStatus.OK); + GetWorkloadGroupResponse otherResponse = new GetWorkloadGroupResponse(new ArrayList<>(), RestStatus.OK); String actual = otherResponse.toXContent(builder, mock(ToXContent.Params.class)).toString(); - String expected = "{\n" + " \"query_groups\" : [ ]\n" + "}"; + String expected = "{\n" + " \"workload_groups\" : [ ]\n" + "}"; assertEquals(expected, actual); } } diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/QueryGroupActionTestUtils.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/QueryGroupActionTestUtils.java deleted file mode 100644 index 08d128ca7ed59..0000000000000 --- a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/QueryGroupActionTestUtils.java +++ /dev/null @@ -1,17 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.wlm.action; - -import org.opensearch.wlm.MutableQueryGroupFragment; - -public class QueryGroupActionTestUtils { - public static UpdateQueryGroupRequest updateQueryGroupRequest(String name, MutableQueryGroupFragment mutableQueryGroupFragment) { - return new UpdateQueryGroupRequest(name, mutableQueryGroupFragment); - } -} diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupActionTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportDeleteWorkloadGroupActionTests.java similarity index 75% rename from plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupActionTests.java rename to plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportDeleteWorkloadGroupActionTests.java index 39d263bfdb150..7ffa33aa8a80a 100644 --- a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupActionTests.java +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportDeleteWorkloadGroupActionTests.java @@ -14,7 +14,7 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; import org.opensearch.core.action.ActionListener; -import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; +import org.opensearch.plugin.wlm.service.WorkloadGroupPersistenceService; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -23,26 +23,26 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; -public class TransportDeleteQueryGroupActionTests extends OpenSearchTestCase { +public class TransportDeleteWorkloadGroupActionTests extends OpenSearchTestCase { ClusterService clusterService = mock(ClusterService.class); TransportService transportService = mock(TransportService.class); ActionFilters actionFilters = mock(ActionFilters.class); ThreadPool threadPool = mock(ThreadPool.class); IndexNameExpressionResolver indexNameExpressionResolver = mock(IndexNameExpressionResolver.class); - QueryGroupPersistenceService queryGroupPersistenceService = mock(QueryGroupPersistenceService.class); + WorkloadGroupPersistenceService workloadGroupPersistenceService = mock(WorkloadGroupPersistenceService.class); - TransportDeleteQueryGroupAction action = new TransportDeleteQueryGroupAction( + TransportDeleteWorkloadGroupAction action = new TransportDeleteWorkloadGroupAction( clusterService, transportService, actionFilters, threadPool, indexNameExpressionResolver, - queryGroupPersistenceService + workloadGroupPersistenceService ); /** - * Test case to validate the construction for TransportDeleteQueryGroupAction + * Test case to validate the construction for TransportDeleteWorkloadGroupAction */ public void testConstruction() { assertNotNull(action); @@ -50,14 +50,14 @@ public void testConstruction() { } /** - * Test case to validate the clusterManagerOperation function in TransportDeleteQueryGroupAction + * Test case to validate the clusterManagerOperation function in TransportDeleteWorkloadGroupAction */ public void testClusterManagerOperation() throws Exception { - DeleteQueryGroupRequest request = new DeleteQueryGroupRequest("testGroup"); + DeleteWorkloadGroupRequest request = new DeleteWorkloadGroupRequest("testGroup"); @SuppressWarnings("unchecked") ActionListener listener = mock(ActionListener.class); ClusterState clusterState = mock(ClusterState.class); action.clusterManagerOperation(request, clusterState, listener); - verify(queryGroupPersistenceService).deleteInClusterStateMetadata(eq(request), eq(listener)); + verify(workloadGroupPersistenceService).deleteInClusterStateMetadata(eq(request), eq(listener)); } } diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportGetQueryGroupActionTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportGetWorkloadGroupActionTests.java similarity index 54% rename from plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportGetQueryGroupActionTests.java rename to plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportGetWorkloadGroupActionTests.java index 755b11a5f4b89..cf12d9f6408cf 100644 --- a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportGetQueryGroupActionTests.java +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportGetWorkloadGroupActionTests.java @@ -17,21 +17,21 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; -import static org.opensearch.plugin.wlm.QueryGroupTestUtils.NAME_NONE_EXISTED; -import static org.opensearch.plugin.wlm.QueryGroupTestUtils.NAME_ONE; -import static org.opensearch.plugin.wlm.QueryGroupTestUtils.clusterState; +import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.NAME_NONE_EXISTED; +import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.NAME_ONE; +import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.clusterState; import static org.mockito.Mockito.mock; -public class TransportGetQueryGroupActionTests extends OpenSearchTestCase { +public class TransportGetWorkloadGroupActionTests extends OpenSearchTestCase { /** * Test case for ClusterManagerOperation function */ @SuppressWarnings("unchecked") public void testClusterManagerOperation() throws Exception { - GetQueryGroupRequest getQueryGroupRequest1 = new GetQueryGroupRequest(NAME_NONE_EXISTED); - GetQueryGroupRequest getQueryGroupRequest2 = new GetQueryGroupRequest(NAME_ONE); - TransportGetQueryGroupAction transportGetQueryGroupAction = new TransportGetQueryGroupAction( + GetWorkloadGroupRequest getWorkloadGroupRequest1 = new GetWorkloadGroupRequest(NAME_NONE_EXISTED); + GetWorkloadGroupRequest getWorkloadGroupRequest2 = new GetWorkloadGroupRequest(NAME_ONE); + TransportGetWorkloadGroupAction transportGetWorkloadGroupAction = new TransportGetWorkloadGroupAction( mock(ClusterService.class), mock(TransportService.class), mock(ActionFilters.class), @@ -40,8 +40,12 @@ public void testClusterManagerOperation() throws Exception { ); assertThrows( ResourceNotFoundException.class, - () -> transportGetQueryGroupAction.clusterManagerOperation(getQueryGroupRequest1, clusterState(), mock(ActionListener.class)) + () -> transportGetWorkloadGroupAction.clusterManagerOperation( + getWorkloadGroupRequest1, + clusterState(), + mock(ActionListener.class) + ) ); - transportGetQueryGroupAction.clusterManagerOperation(getQueryGroupRequest2, clusterState(), mock(ActionListener.class)); + transportGetWorkloadGroupAction.clusterManagerOperation(getWorkloadGroupRequest2, clusterState(), mock(ActionListener.class)); } } diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/UpdateQueryGroupRequestTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/UpdateWorkloadGroupRequestTests.java similarity index 54% rename from plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/UpdateQueryGroupRequestTests.java rename to plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/UpdateWorkloadGroupRequestTests.java index b99f079e81984..e8d883da5c6eb 100644 --- a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/UpdateQueryGroupRequestTests.java +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/UpdateWorkloadGroupRequestTests.java @@ -11,59 +11,62 @@ import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.wlm.MutableQueryGroupFragment; -import org.opensearch.wlm.MutableQueryGroupFragment.ResiliencyMode; +import org.opensearch.wlm.MutableWorkloadGroupFragment; +import org.opensearch.wlm.MutableWorkloadGroupFragment.ResiliencyMode; import org.opensearch.wlm.ResourceType; import java.io.IOException; import java.util.HashMap; import java.util.Map; -import static org.opensearch.plugin.wlm.QueryGroupTestUtils.NAME_ONE; -import static org.opensearch.plugin.wlm.QueryGroupTestUtils.queryGroupOne; +import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.NAME_ONE; +import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.workloadGroupOne; -public class UpdateQueryGroupRequestTests extends OpenSearchTestCase { +public class UpdateWorkloadGroupRequestTests extends OpenSearchTestCase { /** - * Test case to verify the serialization and deserialization of UpdateQueryGroupRequest. + * Test case to verify the serialization and deserialization of UpdateWorkloadGroupRequest. */ public void testSerialization() throws IOException { - UpdateQueryGroupRequest request = new UpdateQueryGroupRequest(NAME_ONE, queryGroupOne.getMutableQueryGroupFragment()); + UpdateWorkloadGroupRequest request = new UpdateWorkloadGroupRequest(NAME_ONE, workloadGroupOne.getMutableWorkloadGroupFragment()); BytesStreamOutput out = new BytesStreamOutput(); request.writeTo(out); StreamInput streamInput = out.bytes().streamInput(); - UpdateQueryGroupRequest otherRequest = new UpdateQueryGroupRequest(streamInput); + UpdateWorkloadGroupRequest otherRequest = new UpdateWorkloadGroupRequest(streamInput); assertEquals(request.getName(), otherRequest.getName()); - assertEquals(request.getmMutableQueryGroupFragment(), otherRequest.getmMutableQueryGroupFragment()); + assertEquals(request.getmMutableWorkloadGroupFragment(), otherRequest.getmMutableWorkloadGroupFragment()); } /** - * Test case to verify the serialization and deserialization of UpdateQueryGroupRequest with only name field. + * Test case to verify the serialization and deserialization of UpdateWorkloadGroupRequest with only name field. */ public void testSerializationOnlyName() throws IOException { - UpdateQueryGroupRequest request = new UpdateQueryGroupRequest(NAME_ONE, new MutableQueryGroupFragment(null, new HashMap<>())); + UpdateWorkloadGroupRequest request = new UpdateWorkloadGroupRequest( + NAME_ONE, + new MutableWorkloadGroupFragment(null, new HashMap<>()) + ); BytesStreamOutput out = new BytesStreamOutput(); request.writeTo(out); StreamInput streamInput = out.bytes().streamInput(); - UpdateQueryGroupRequest otherRequest = new UpdateQueryGroupRequest(streamInput); + UpdateWorkloadGroupRequest otherRequest = new UpdateWorkloadGroupRequest(streamInput); assertEquals(request.getName(), otherRequest.getName()); - assertEquals(request.getmMutableQueryGroupFragment(), otherRequest.getmMutableQueryGroupFragment()); + assertEquals(request.getmMutableWorkloadGroupFragment(), otherRequest.getmMutableWorkloadGroupFragment()); } /** - * Test case to verify the serialization and deserialization of UpdateQueryGroupRequest with only resourceLimits field. + * Test case to verify the serialization and deserialization of UpdateWorkloadGroupRequest with only resourceLimits field. */ public void testSerializationOnlyResourceLimit() throws IOException { - UpdateQueryGroupRequest request = new UpdateQueryGroupRequest( + UpdateWorkloadGroupRequest request = new UpdateWorkloadGroupRequest( NAME_ONE, - new MutableQueryGroupFragment(null, Map.of(ResourceType.MEMORY, 0.4)) + new MutableWorkloadGroupFragment(null, Map.of(ResourceType.MEMORY, 0.4)) ); BytesStreamOutput out = new BytesStreamOutput(); request.writeTo(out); StreamInput streamInput = out.bytes().streamInput(); - UpdateQueryGroupRequest otherRequest = new UpdateQueryGroupRequest(streamInput); + UpdateWorkloadGroupRequest otherRequest = new UpdateWorkloadGroupRequest(streamInput); assertEquals(request.getName(), otherRequest.getName()); - assertEquals(request.getmMutableQueryGroupFragment(), otherRequest.getmMutableQueryGroupFragment()); + assertEquals(request.getmMutableWorkloadGroupFragment(), otherRequest.getmMutableWorkloadGroupFragment()); } /** @@ -72,9 +75,9 @@ public void testSerializationOnlyResourceLimit() throws IOException { public void testInvalidResourceLimitList() { assertThrows( IllegalArgumentException.class, - () -> new UpdateQueryGroupRequest( + () -> new UpdateWorkloadGroupRequest( NAME_ONE, - new MutableQueryGroupFragment( + new MutableWorkloadGroupFragment( ResiliencyMode.MONITOR, Map.of(ResourceType.MEMORY, 0.3, ResourceType.fromName("random"), 0.4) ) @@ -88,9 +91,9 @@ public void testInvalidResourceLimitList() { public void testInvalidEnforcement() { assertThrows( IllegalArgumentException.class, - () -> new UpdateQueryGroupRequest( + () -> new UpdateWorkloadGroupRequest( NAME_ONE, - new MutableQueryGroupFragment(ResiliencyMode.fromName("random"), Map.of(ResourceType.fromName("memory"), 0.3)) + new MutableWorkloadGroupFragment(ResiliencyMode.fromName("random"), Map.of(ResourceType.fromName("memory"), 0.3)) ) ); } diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/UpdateQueryGroupResponseTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/UpdateWorkloadGroupResponseTests.java similarity index 59% rename from plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/UpdateQueryGroupResponseTests.java rename to plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/UpdateWorkloadGroupResponseTests.java index a7ab4c6a682ef..97b9b9029373f 100644 --- a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/UpdateQueryGroupResponseTests.java +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/UpdateWorkloadGroupResponseTests.java @@ -8,54 +8,54 @@ package org.opensearch.plugin.wlm.action; -import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.cluster.metadata.WorkloadGroup; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.plugin.wlm.QueryGroupTestUtils; +import org.opensearch.plugin.wlm.WorkloadGroupTestUtils; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; import java.util.ArrayList; import java.util.List; -import static org.opensearch.plugin.wlm.QueryGroupTestUtils.queryGroupOne; +import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.workloadGroupOne; import static org.mockito.Mockito.mock; -public class UpdateQueryGroupResponseTests extends OpenSearchTestCase { +public class UpdateWorkloadGroupResponseTests extends OpenSearchTestCase { /** - * Test case to verify the serialization and deserialization of UpdateQueryGroupResponse. + * Test case to verify the serialization and deserialization of UpdateWorkloadGroupResponse. */ public void testSerialization() throws IOException { - UpdateQueryGroupResponse response = new UpdateQueryGroupResponse(queryGroupOne, RestStatus.OK); + UpdateWorkloadGroupResponse response = new UpdateWorkloadGroupResponse(workloadGroupOne, RestStatus.OK); BytesStreamOutput out = new BytesStreamOutput(); response.writeTo(out); StreamInput streamInput = out.bytes().streamInput(); - UpdateQueryGroupResponse otherResponse = new UpdateQueryGroupResponse(streamInput); + UpdateWorkloadGroupResponse otherResponse = new UpdateWorkloadGroupResponse(streamInput); assertEquals(response.getRestStatus(), otherResponse.getRestStatus()); - QueryGroup responseGroup = response.getQueryGroup(); - QueryGroup otherResponseGroup = otherResponse.getQueryGroup(); - List list1 = new ArrayList<>(); - List list2 = new ArrayList<>(); + WorkloadGroup responseGroup = response.getWorkloadGroup(); + WorkloadGroup otherResponseGroup = otherResponse.getWorkloadGroup(); + List list1 = new ArrayList<>(); + List list2 = new ArrayList<>(); list1.add(responseGroup); list2.add(otherResponseGroup); - QueryGroupTestUtils.assertEqualQueryGroups(list1, list2, false); + WorkloadGroupTestUtils.assertEqualWorkloadGroups(list1, list2, false); } /** - * Test case to verify the toXContent method of UpdateQueryGroupResponse. + * Test case to verify the toXContent method of UpdateWorkloadGroupResponse. */ - public void testToXContentUpdateSingleQueryGroup() throws IOException { + public void testToXContentUpdateSingleWorkloadGroup() throws IOException { XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); - UpdateQueryGroupResponse otherResponse = new UpdateQueryGroupResponse(queryGroupOne, RestStatus.OK); + UpdateWorkloadGroupResponse otherResponse = new UpdateWorkloadGroupResponse(workloadGroupOne, RestStatus.OK); String actual = otherResponse.toXContent(builder, mock(ToXContent.Params.class)).toString(); String expected = "{\n" + " \"_id\" : \"AgfUO5Ja9yfsYlONlYi3TQ==\",\n" - + " \"name\" : \"query_group_one\",\n" + + " \"name\" : \"workload_group_one\",\n" + " \"resiliency_mode\" : \"monitor\",\n" + " \"resource_limits\" : {\n" + " \"memory\" : 0.3\n" diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/WorkloadGroupActionTestUtils.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/WorkloadGroupActionTestUtils.java new file mode 100644 index 0000000000000..ddbfe13545c89 --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/WorkloadGroupActionTestUtils.java @@ -0,0 +1,20 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.wlm.MutableWorkloadGroupFragment; + +public class WorkloadGroupActionTestUtils { + public static UpdateWorkloadGroupRequest updateWorkloadGroupRequest( + String name, + MutableWorkloadGroupFragment mutableWorkloadGroupFragment + ) { + return new UpdateWorkloadGroupRequest(name, mutableWorkloadGroupFragment); + } +} diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupActionTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestDeleteWorkloadGroupActionTests.java similarity index 69% rename from plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupActionTests.java rename to plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestDeleteWorkloadGroupActionTests.java index 959c9dccef2d0..8ce5c869f4481 100644 --- a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupActionTests.java +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestDeleteWorkloadGroupActionTests.java @@ -11,8 +11,8 @@ import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.common.CheckedConsumer; import org.opensearch.common.unit.TimeValue; -import org.opensearch.plugin.wlm.action.DeleteQueryGroupAction; -import org.opensearch.plugin.wlm.action.DeleteQueryGroupRequest; +import org.opensearch.plugin.wlm.action.DeleteWorkloadGroupAction; +import org.opensearch.plugin.wlm.action.DeleteWorkloadGroupRequest; import org.opensearch.rest.RestChannel; import org.opensearch.rest.RestHandler; import org.opensearch.rest.RestRequest; @@ -25,7 +25,7 @@ import org.mockito.ArgumentCaptor; -import static org.opensearch.plugin.wlm.QueryGroupTestUtils.NAME_ONE; +import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.NAME_ONE; import static org.opensearch.rest.RestRequest.Method.DELETE; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; @@ -35,27 +35,27 @@ import static org.mockito.Mockito.spy; import static org.mockito.Mockito.verify; -public class RestDeleteQueryGroupActionTests extends OpenSearchTestCase { +public class RestDeleteWorkloadGroupActionTests extends OpenSearchTestCase { /** - * Test case to validate the construction for RestDeleteQueryGroupAction + * Test case to validate the construction for RestDeleteWorkloadGroupAction */ public void testConstruction() { - RestDeleteQueryGroupAction action = new RestDeleteQueryGroupAction(); + RestDeleteWorkloadGroupAction action = new RestDeleteWorkloadGroupAction(); assertNotNull(action); - assertEquals("delete_query_group", action.getName()); + assertEquals("delete_workload_group", action.getName()); List routes = action.routes(); assertEquals(1, routes.size()); RestHandler.Route route = routes.get(0); assertEquals(DELETE, route.getMethod()); - assertEquals("_wlm/query_group/{name}", route.getPath()); + assertEquals("_wlm/workload_group/{name}", route.getPath()); } /** - * Test case to validate the prepareRequest logic for RestDeleteQueryGroupAction + * Test case to validate the prepareRequest logic for RestDeleteWorkloadGroupAction */ @SuppressWarnings("unchecked") public void testPrepareRequest() throws Exception { - RestDeleteQueryGroupAction restDeleteQueryGroupAction = new RestDeleteQueryGroupAction(); + RestDeleteWorkloadGroupAction restDeleteWorkloadGroupAction = new RestDeleteWorkloadGroupAction(); NodeClient nodeClient = mock(NodeClient.class); RestRequest realRequest = new FakeRestRequest(); realRequest.params().put("name", NAME_ONE); @@ -65,20 +65,20 @@ public void testPrepareRequest() throws Exception { doReturn(TimeValue.timeValueSeconds(30)).when(spyRequest).paramAsTime(eq("cluster_manager_timeout"), any(TimeValue.class)); doReturn(TimeValue.timeValueSeconds(60)).when(spyRequest).paramAsTime(eq("timeout"), any(TimeValue.class)); - CheckedConsumer consumer = restDeleteQueryGroupAction.prepareRequest(spyRequest, nodeClient); + CheckedConsumer consumer = restDeleteWorkloadGroupAction.prepareRequest(spyRequest, nodeClient); assertNotNull(consumer); - ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(DeleteQueryGroupRequest.class); + ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(DeleteWorkloadGroupRequest.class); ArgumentCaptor> listenerCaptor = ArgumentCaptor.forClass(RestToXContentListener.class); - doNothing().when(nodeClient).execute(eq(DeleteQueryGroupAction.INSTANCE), requestCaptor.capture(), listenerCaptor.capture()); + doNothing().when(nodeClient).execute(eq(DeleteWorkloadGroupAction.INSTANCE), requestCaptor.capture(), listenerCaptor.capture()); consumer.accept(mock(RestChannel.class)); - DeleteQueryGroupRequest capturedRequest = requestCaptor.getValue(); + DeleteWorkloadGroupRequest capturedRequest = requestCaptor.getValue(); assertEquals(NAME_ONE, capturedRequest.getName()); assertEquals(TimeValue.timeValueSeconds(30), capturedRequest.clusterManagerNodeTimeout()); assertEquals(TimeValue.timeValueSeconds(60), capturedRequest.timeout()); verify(nodeClient).execute( - eq(DeleteQueryGroupAction.INSTANCE), - any(DeleteQueryGroupRequest.class), + eq(DeleteWorkloadGroupAction.INSTANCE), + any(DeleteWorkloadGroupRequest.class), any(RestToXContentListener.class) ); } diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceServiceTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceServiceTests.java deleted file mode 100644 index 67e47be1a55ce..0000000000000 --- a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceServiceTests.java +++ /dev/null @@ -1,515 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.wlm.service; - -import org.opensearch.ResourceNotFoundException; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; -import org.opensearch.cluster.AckedClusterStateUpdateTask; -import org.opensearch.cluster.ClusterName; -import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.ClusterStateUpdateTask; -import org.opensearch.cluster.metadata.Metadata; -import org.opensearch.cluster.metadata.QueryGroup; -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.collect.Tuple; -import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.settings.Settings; -import org.opensearch.core.action.ActionListener; -import org.opensearch.plugin.wlm.QueryGroupTestUtils; -import org.opensearch.plugin.wlm.action.CreateQueryGroupResponse; -import org.opensearch.plugin.wlm.action.DeleteQueryGroupRequest; -import org.opensearch.plugin.wlm.action.UpdateQueryGroupRequest; -import org.opensearch.plugin.wlm.action.UpdateQueryGroupResponse; -import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.threadpool.ThreadPool; -import org.opensearch.wlm.MutableQueryGroupFragment; -import org.opensearch.wlm.MutableQueryGroupFragment.ResiliencyMode; -import org.opensearch.wlm.ResourceType; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.stream.Collectors; - -import org.mockito.ArgumentCaptor; - -import static org.opensearch.cluster.metadata.QueryGroup.builder; -import static org.opensearch.plugin.wlm.QueryGroupTestUtils.NAME_NONE_EXISTED; -import static org.opensearch.plugin.wlm.QueryGroupTestUtils.NAME_ONE; -import static org.opensearch.plugin.wlm.QueryGroupTestUtils.NAME_TWO; -import static org.opensearch.plugin.wlm.QueryGroupTestUtils._ID_ONE; -import static org.opensearch.plugin.wlm.QueryGroupTestUtils._ID_TWO; -import static org.opensearch.plugin.wlm.QueryGroupTestUtils.assertEqualQueryGroups; -import static org.opensearch.plugin.wlm.QueryGroupTestUtils.clusterSettings; -import static org.opensearch.plugin.wlm.QueryGroupTestUtils.clusterSettingsSet; -import static org.opensearch.plugin.wlm.QueryGroupTestUtils.clusterState; -import static org.opensearch.plugin.wlm.QueryGroupTestUtils.preparePersistenceServiceSetup; -import static org.opensearch.plugin.wlm.QueryGroupTestUtils.queryGroupList; -import static org.opensearch.plugin.wlm.QueryGroupTestUtils.queryGroupOne; -import static org.opensearch.plugin.wlm.QueryGroupTestUtils.queryGroupPersistenceService; -import static org.opensearch.plugin.wlm.QueryGroupTestUtils.queryGroupTwo; -import static org.opensearch.plugin.wlm.action.QueryGroupActionTestUtils.updateQueryGroupRequest; -import static org.opensearch.plugin.wlm.service.QueryGroupPersistenceService.QUERY_GROUP_COUNT_SETTING_NAME; -import static org.opensearch.plugin.wlm.service.QueryGroupPersistenceService.SOURCE; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.anyString; -import static org.mockito.Mockito.argThat; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - -public class QueryGroupPersistenceServiceTests extends OpenSearchTestCase { - - /** - * Test case to validate the creation logic of a QueryGroup - */ - public void testCreateQueryGroup() { - Tuple setup = preparePersistenceServiceSetup(new HashMap<>()); - QueryGroupPersistenceService queryGroupPersistenceService1 = setup.v1(); - ClusterState clusterState = setup.v2(); - ClusterState newClusterState = queryGroupPersistenceService1.saveQueryGroupInClusterState(queryGroupOne, clusterState); - Map updatedGroupsMap = newClusterState.getMetadata().queryGroups(); - assertEquals(1, updatedGroupsMap.size()); - assertTrue(updatedGroupsMap.containsKey(_ID_ONE)); - List listOne = new ArrayList<>(); - List listTwo = new ArrayList<>(); - listOne.add(queryGroupOne); - listTwo.add(updatedGroupsMap.get(_ID_ONE)); - assertEqualQueryGroups(listOne, listTwo, false); - } - - /** - * Test case to validate the logic for adding a new QueryGroup to a cluster state that already contains - * an existing QueryGroup - */ - public void testCreateAnotherQueryGroup() { - Tuple setup = preparePersistenceServiceSetup(Map.of(_ID_ONE, queryGroupOne)); - QueryGroupPersistenceService queryGroupPersistenceService1 = setup.v1(); - ClusterState clusterState = setup.v2(); - ClusterState newClusterState = queryGroupPersistenceService1.saveQueryGroupInClusterState(queryGroupTwo, clusterState); - Map updatedGroups = newClusterState.getMetadata().queryGroups(); - assertEquals(2, updatedGroups.size()); - assertTrue(updatedGroups.containsKey(_ID_TWO)); - Collection values = updatedGroups.values(); - assertEqualQueryGroups(queryGroupList(), new ArrayList<>(values), false); - } - - /** - * Test case to ensure the error is thrown when we try to create another QueryGroup with duplicate name - */ - public void testCreateQueryGroupDuplicateName() { - Tuple setup = preparePersistenceServiceSetup(Map.of(_ID_ONE, queryGroupOne)); - QueryGroupPersistenceService queryGroupPersistenceService1 = setup.v1(); - ClusterState clusterState = setup.v2(); - QueryGroup toCreate = builder().name(NAME_ONE) - ._id("W5iIqHyhgi4K1qIAAAAIHw==") - .mutableQueryGroupFragment(new MutableQueryGroupFragment(ResiliencyMode.MONITOR, Map.of(ResourceType.MEMORY, 0.3))) - .updatedAt(1690934400000L) - .build(); - assertThrows(RuntimeException.class, () -> queryGroupPersistenceService1.saveQueryGroupInClusterState(toCreate, clusterState)); - } - - /** - * Test case to ensure the error is thrown when we try to create another QueryGroup that will make - * the total resource limits go above 1 - */ - public void testCreateQueryGroupOverflowAllocation() { - Tuple setup = preparePersistenceServiceSetup(Map.of(_ID_TWO, queryGroupTwo)); - QueryGroup toCreate = builder().name(NAME_ONE) - ._id("W5iIqHyhgi4K1qIAAAAIHw==") - .mutableQueryGroupFragment(new MutableQueryGroupFragment(ResiliencyMode.MONITOR, Map.of(ResourceType.MEMORY, 0.41))) - .updatedAt(1690934400000L) - .build(); - - QueryGroupPersistenceService queryGroupPersistenceService1 = setup.v1(); - ClusterState clusterState = setup.v2(); - assertThrows(RuntimeException.class, () -> queryGroupPersistenceService1.saveQueryGroupInClusterState(toCreate, clusterState)); - } - - /** - * Test case to ensure the error is thrown when we already have the max allowed number of QueryGroups, but - * we want to create another one - */ - public void testCreateQueryGroupOverflowCount() { - QueryGroup toCreate = builder().name(NAME_NONE_EXISTED) - ._id("W5iIqHyhgi4K1qIAAAAIHw==") - .mutableQueryGroupFragment(new MutableQueryGroupFragment(ResiliencyMode.MONITOR, Map.of(ResourceType.MEMORY, 0.5))) - .updatedAt(1690934400000L) - .build(); - Metadata metadata = Metadata.builder().queryGroups(Map.of(_ID_ONE, queryGroupOne, _ID_TWO, queryGroupTwo)).build(); - Settings settings = Settings.builder().put(QUERY_GROUP_COUNT_SETTING_NAME, 2).build(); - ClusterSettings clusterSettings = new ClusterSettings(settings, clusterSettingsSet()); - ClusterService clusterService = new ClusterService(settings, clusterSettings, mock(ThreadPool.class)); - ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).metadata(metadata).build(); - QueryGroupPersistenceService queryGroupPersistenceService1 = new QueryGroupPersistenceService( - clusterService, - settings, - clusterSettings - ); - assertThrows(RuntimeException.class, () -> queryGroupPersistenceService1.saveQueryGroupInClusterState(toCreate, clusterState)); - } - - /** - * Tests the invalid value of {@code node.query_group.max_count} - */ - public void testInvalidMaxQueryGroupCount() { - Settings settings = Settings.builder().put(QUERY_GROUP_COUNT_SETTING_NAME, 2).build(); - ClusterSettings clusterSettings = new ClusterSettings(settings, clusterSettingsSet()); - ClusterService clusterService = new ClusterService(settings, clusterSettings, mock(ThreadPool.class)); - QueryGroupPersistenceService queryGroupPersistenceService = new QueryGroupPersistenceService( - clusterService, - settings, - clusterSettings - ); - assertThrows(IllegalArgumentException.class, () -> queryGroupPersistenceService.setMaxQueryGroupCount(-1)); - } - - /** - * Tests the valid value of {@code node.query_group.max_count} - */ - public void testValidMaxSandboxCountSetting() { - Settings settings = Settings.builder().put(QUERY_GROUP_COUNT_SETTING_NAME, 100).build(); - ClusterService clusterService = new ClusterService(settings, clusterSettings(), mock(ThreadPool.class)); - QueryGroupPersistenceService queryGroupPersistenceService = new QueryGroupPersistenceService( - clusterService, - settings, - clusterSettings() - ); - queryGroupPersistenceService.setMaxQueryGroupCount(50); - assertEquals(50, queryGroupPersistenceService.getMaxQueryGroupCount()); - } - - /** - * Tests PersistInClusterStateMetadata function - */ - public void testPersistInClusterStateMetadata() { - ClusterService clusterService = mock(ClusterService.class); - @SuppressWarnings("unchecked") - ActionListener listener = mock(ActionListener.class); - QueryGroupPersistenceService queryGroupPersistenceService = new QueryGroupPersistenceService( - clusterService, - QueryGroupTestUtils.settings(), - clusterSettings() - ); - queryGroupPersistenceService.persistInClusterStateMetadata(queryGroupOne, listener); - verify(clusterService).submitStateUpdateTask(eq(SOURCE), any()); - } - - /** - * Tests PersistInClusterStateMetadata function with inner functions - */ - public void testPersistInClusterStateMetadataInner() { - ClusterService clusterService = mock(ClusterService.class); - @SuppressWarnings("unchecked") - ActionListener listener = mock(ActionListener.class); - QueryGroupPersistenceService queryGroupPersistenceService = new QueryGroupPersistenceService( - clusterService, - QueryGroupTestUtils.settings(), - clusterSettings() - ); - ArgumentCaptor captor = ArgumentCaptor.forClass(ClusterStateUpdateTask.class); - queryGroupPersistenceService.persistInClusterStateMetadata(queryGroupOne, listener); - verify(clusterService, times(1)).submitStateUpdateTask(eq(SOURCE), captor.capture()); - ClusterStateUpdateTask capturedTask = captor.getValue(); - assertEquals(queryGroupPersistenceService.createQueryGroupThrottlingKey, capturedTask.getClusterManagerThrottlingKey()); - - doAnswer(invocation -> { - ClusterStateUpdateTask task = invocation.getArgument(1); - task.clusterStateProcessed(SOURCE, mock(ClusterState.class), mock(ClusterState.class)); - return null; - }).when(clusterService).submitStateUpdateTask(anyString(), any()); - queryGroupPersistenceService.persistInClusterStateMetadata(queryGroupOne, listener); - verify(listener).onResponse(any(CreateQueryGroupResponse.class)); - } - - /** - * Tests PersistInClusterStateMetadata function with failure - */ - public void testPersistInClusterStateMetadataFailure() { - ClusterService clusterService = mock(ClusterService.class); - @SuppressWarnings("unchecked") - ActionListener listener = mock(ActionListener.class); - QueryGroupPersistenceService queryGroupPersistenceService = new QueryGroupPersistenceService( - clusterService, - QueryGroupTestUtils.settings(), - clusterSettings() - ); - doAnswer(invocation -> { - ClusterStateUpdateTask task = invocation.getArgument(1); - Exception exception = new RuntimeException("Test Exception"); - task.onFailure(SOURCE, exception); - return null; - }).when(clusterService).submitStateUpdateTask(anyString(), any()); - queryGroupPersistenceService.persistInClusterStateMetadata(queryGroupOne, listener); - verify(listener).onFailure(any(RuntimeException.class)); - } - - /** - * Tests getting a single QueryGroup - */ - public void testGetSingleQueryGroup() { - Collection groupsCollections = QueryGroupPersistenceService.getFromClusterStateMetadata(NAME_ONE, clusterState()); - List groups = new ArrayList<>(groupsCollections); - assertEquals(1, groups.size()); - QueryGroup queryGroup = groups.get(0); - List listOne = new ArrayList<>(); - List listTwo = new ArrayList<>(); - listOne.add(QueryGroupTestUtils.queryGroupOne); - listTwo.add(queryGroup); - QueryGroupTestUtils.assertEqualQueryGroups(listOne, listTwo, false); - } - - /** - * Tests getting all QueryGroups - */ - public void testGetAllQueryGroups() { - assertEquals(2, QueryGroupTestUtils.clusterState().metadata().queryGroups().size()); - Collection groupsCollections = QueryGroupPersistenceService.getFromClusterStateMetadata(null, clusterState()); - List res = new ArrayList<>(groupsCollections); - assertEquals(2, res.size()); - Set currentNAME = res.stream().map(QueryGroup::getName).collect(Collectors.toSet()); - assertTrue(currentNAME.contains(QueryGroupTestUtils.NAME_ONE)); - assertTrue(currentNAME.contains(QueryGroupTestUtils.NAME_TWO)); - QueryGroupTestUtils.assertEqualQueryGroups(QueryGroupTestUtils.queryGroupList(), res, false); - } - - /** - * Tests getting a QueryGroup with invalid name - */ - public void testGetNonExistedQueryGroups() { - Collection groupsCollections = QueryGroupPersistenceService.getFromClusterStateMetadata( - NAME_NONE_EXISTED, - clusterState() - ); - List groups = new ArrayList<>(groupsCollections); - assertEquals(0, groups.size()); - } - - /** - * Tests setting maxQueryGroupCount - */ - public void testMaxQueryGroupCount() { - assertThrows(IllegalArgumentException.class, () -> QueryGroupTestUtils.queryGroupPersistenceService().setMaxQueryGroupCount(-1)); - QueryGroupPersistenceService queryGroupPersistenceService = QueryGroupTestUtils.queryGroupPersistenceService(); - queryGroupPersistenceService.setMaxQueryGroupCount(50); - assertEquals(50, queryGroupPersistenceService.getMaxQueryGroupCount()); - } - - /** - * Tests delete a single QueryGroup - */ - public void testDeleteSingleQueryGroup() { - ClusterState newClusterState = queryGroupPersistenceService().deleteQueryGroupInClusterState(NAME_TWO, clusterState()); - Map afterDeletionGroups = newClusterState.getMetadata().queryGroups(); - assertFalse(afterDeletionGroups.containsKey(_ID_TWO)); - assertEquals(1, afterDeletionGroups.size()); - List oldQueryGroups = new ArrayList<>(); - oldQueryGroups.add(queryGroupOne); - assertEqualQueryGroups(new ArrayList<>(afterDeletionGroups.values()), oldQueryGroups, false); - } - - /** - * Tests delete a QueryGroup with invalid name - */ - public void testDeleteNonExistedQueryGroup() { - assertThrows( - ResourceNotFoundException.class, - () -> queryGroupPersistenceService().deleteQueryGroupInClusterState(NAME_NONE_EXISTED, clusterState()) - ); - } - - /** - * Tests DeleteInClusterStateMetadata function - */ - @SuppressWarnings("unchecked") - public void testDeleteInClusterStateMetadata() throws Exception { - DeleteQueryGroupRequest request = new DeleteQueryGroupRequest(NAME_ONE); - ClusterService clusterService = mock(ClusterService.class); - - ActionListener listener = mock(ActionListener.class); - QueryGroupPersistenceService queryGroupPersistenceService = new QueryGroupPersistenceService( - clusterService, - QueryGroupTestUtils.settings(), - clusterSettings() - ); - doAnswer(invocation -> { - AckedClusterStateUpdateTask task = invocation.getArgument(1); - ClusterState initialState = clusterState(); - ClusterState newState = task.execute(initialState); - assertNotNull(newState); - assertEquals(queryGroupPersistenceService.deleteQueryGroupThrottlingKey, task.getClusterManagerThrottlingKey()); - task.onAllNodesAcked(null); - verify(listener).onResponse(argThat(response -> response.isAcknowledged())); - return null; - }).when(clusterService).submitStateUpdateTask(anyString(), any()); - queryGroupPersistenceService.deleteInClusterStateMetadata(request, listener); - verify(clusterService).submitStateUpdateTask(eq(SOURCE), any(AckedClusterStateUpdateTask.class)); - } - - /** - * Tests updating a QueryGroup with all fields - */ - public void testUpdateQueryGroupAllFields() { - QueryGroup updated = builder().name(NAME_ONE) - ._id(_ID_ONE) - .mutableQueryGroupFragment(new MutableQueryGroupFragment(ResiliencyMode.ENFORCED, Map.of(ResourceType.MEMORY, 0.15))) - .updatedAt(1690934400000L) - .build(); - UpdateQueryGroupRequest updateQueryGroupRequest = updateQueryGroupRequest(NAME_ONE, updated.getMutableQueryGroupFragment()); - ClusterState newClusterState = queryGroupPersistenceService().updateQueryGroupInClusterState( - updateQueryGroupRequest, - clusterState() - ); - List updatedQueryGroups = new ArrayList<>(newClusterState.getMetadata().queryGroups().values()); - assertEquals(2, updatedQueryGroups.size()); - List expectedList = new ArrayList<>(); - expectedList.add(queryGroupTwo); - expectedList.add(updated); - assertEqualQueryGroups(expectedList, updatedQueryGroups, true); - } - - /** - * Tests updating a QueryGroup with only updated resourceLimits - */ - public void testUpdateQueryGroupResourceLimitsOnly() { - QueryGroup updated = builder().name(NAME_ONE) - ._id(_ID_ONE) - .mutableQueryGroupFragment(new MutableQueryGroupFragment(ResiliencyMode.MONITOR, Map.of(ResourceType.MEMORY, 0.15))) - .updatedAt(1690934400000L) - .build(); - UpdateQueryGroupRequest updateQueryGroupRequest = updateQueryGroupRequest(NAME_ONE, updated.getMutableQueryGroupFragment()); - ClusterState newClusterState = queryGroupPersistenceService().updateQueryGroupInClusterState( - updateQueryGroupRequest, - clusterState() - ); - List updatedQueryGroups = new ArrayList<>(newClusterState.getMetadata().queryGroups().values()); - assertEquals(2, updatedQueryGroups.size()); - Optional findUpdatedGroupOne = newClusterState.metadata() - .queryGroups() - .values() - .stream() - .filter(group -> group.getName().equals(NAME_ONE)) - .findFirst(); - Optional findUpdatedGroupTwo = newClusterState.metadata() - .queryGroups() - .values() - .stream() - .filter(group -> group.getName().equals(NAME_TWO)) - .findFirst(); - assertTrue(findUpdatedGroupOne.isPresent()); - assertTrue(findUpdatedGroupTwo.isPresent()); - List list1 = new ArrayList<>(); - list1.add(updated); - List list2 = new ArrayList<>(); - list2.add(findUpdatedGroupOne.get()); - assertEqualQueryGroups(list1, list2, true); - } - - /** - * Tests updating a QueryGroup with invalid name - */ - public void testUpdateQueryGroupNonExistedName() { - QueryGroupPersistenceService queryGroupPersistenceService = queryGroupPersistenceService(); - UpdateQueryGroupRequest updateQueryGroupRequest = updateQueryGroupRequest( - NAME_NONE_EXISTED, - new MutableQueryGroupFragment(ResiliencyMode.MONITOR, Map.of(ResourceType.MEMORY, 0.15)) - ); - assertThrows( - RuntimeException.class, - () -> queryGroupPersistenceService.updateQueryGroupInClusterState(updateQueryGroupRequest, clusterState()) - ); - List updatedQueryGroups = new ArrayList<>( - queryGroupPersistenceService.getClusterService().state().metadata().queryGroups().values() - ); - assertEquals(2, updatedQueryGroups.size()); - List expectedList = new ArrayList<>(); - expectedList.add(queryGroupTwo); - expectedList.add(queryGroupOne); - assertEqualQueryGroups(expectedList, updatedQueryGroups, true); - } - - /** - * Tests UpdateInClusterStateMetadata function - */ - public void testUpdateInClusterStateMetadata() { - ClusterService clusterService = mock(ClusterService.class); - @SuppressWarnings("unchecked") - ActionListener listener = mock(ActionListener.class); - QueryGroupPersistenceService queryGroupPersistenceService = new QueryGroupPersistenceService( - clusterService, - QueryGroupTestUtils.settings(), - clusterSettings() - ); - queryGroupPersistenceService.updateInClusterStateMetadata(null, listener); - verify(clusterService).submitStateUpdateTask(eq(SOURCE), any()); - } - - /** - * Tests UpdateInClusterStateMetadata function with inner functions - */ - public void testUpdateInClusterStateMetadataInner() { - ClusterService clusterService = mock(ClusterService.class); - @SuppressWarnings("unchecked") - ActionListener listener = mock(ActionListener.class); - QueryGroupPersistenceService queryGroupPersistenceService = new QueryGroupPersistenceService( - clusterService, - QueryGroupTestUtils.settings(), - clusterSettings() - ); - UpdateQueryGroupRequest updateQueryGroupRequest = updateQueryGroupRequest( - NAME_TWO, - new MutableQueryGroupFragment(ResiliencyMode.SOFT, new HashMap<>()) - ); - ArgumentCaptor captor = ArgumentCaptor.forClass(ClusterStateUpdateTask.class); - queryGroupPersistenceService.updateInClusterStateMetadata(updateQueryGroupRequest, listener); - verify(clusterService, times(1)).submitStateUpdateTask(eq(SOURCE), captor.capture()); - ClusterStateUpdateTask capturedTask = captor.getValue(); - assertEquals(queryGroupPersistenceService.updateQueryGroupThrottlingKey, capturedTask.getClusterManagerThrottlingKey()); - - doAnswer(invocation -> { - ClusterStateUpdateTask task = invocation.getArgument(1); - task.clusterStateProcessed(SOURCE, clusterState(), clusterState()); - return null; - }).when(clusterService).submitStateUpdateTask(anyString(), any()); - queryGroupPersistenceService.updateInClusterStateMetadata(updateQueryGroupRequest, listener); - verify(listener).onResponse(any(UpdateQueryGroupResponse.class)); - } - - /** - * Tests UpdateInClusterStateMetadata function with failure - */ - public void testUpdateInClusterStateMetadataFailure() { - ClusterService clusterService = mock(ClusterService.class); - @SuppressWarnings("unchecked") - ActionListener listener = mock(ActionListener.class); - QueryGroupPersistenceService queryGroupPersistenceService = new QueryGroupPersistenceService( - clusterService, - QueryGroupTestUtils.settings(), - clusterSettings() - ); - UpdateQueryGroupRequest updateQueryGroupRequest = updateQueryGroupRequest( - NAME_TWO, - new MutableQueryGroupFragment(ResiliencyMode.SOFT, new HashMap<>()) - ); - doAnswer(invocation -> { - ClusterStateUpdateTask task = invocation.getArgument(1); - Exception exception = new RuntimeException("Test Exception"); - task.onFailure(SOURCE, exception); - return null; - }).when(clusterService).submitStateUpdateTask(anyString(), any()); - queryGroupPersistenceService.updateInClusterStateMetadata(updateQueryGroupRequest, listener); - verify(listener).onFailure(any(RuntimeException.class)); - } -} diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/WorkloadGroupPersistenceServiceTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/WorkloadGroupPersistenceServiceTests.java new file mode 100644 index 0000000000000..571103b32205d --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/WorkloadGroupPersistenceServiceTests.java @@ -0,0 +1,533 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.service; + +import org.opensearch.ResourceNotFoundException; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.cluster.AckedClusterStateUpdateTask; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.ClusterStateUpdateTask; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.WorkloadGroup; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.plugin.wlm.WorkloadGroupTestUtils; +import org.opensearch.plugin.wlm.action.CreateWorkloadGroupResponse; +import org.opensearch.plugin.wlm.action.DeleteWorkloadGroupRequest; +import org.opensearch.plugin.wlm.action.UpdateWorkloadGroupRequest; +import org.opensearch.plugin.wlm.action.UpdateWorkloadGroupResponse; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.wlm.MutableWorkloadGroupFragment; +import org.opensearch.wlm.MutableWorkloadGroupFragment.ResiliencyMode; +import org.opensearch.wlm.ResourceType; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.stream.Collectors; + +import org.mockito.ArgumentCaptor; + +import static org.opensearch.cluster.metadata.WorkloadGroup.builder; +import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.NAME_NONE_EXISTED; +import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.NAME_ONE; +import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.NAME_TWO; +import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils._ID_ONE; +import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils._ID_TWO; +import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.assertEqualWorkloadGroups; +import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.clusterSettings; +import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.clusterSettingsSet; +import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.clusterState; +import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.preparePersistenceServiceSetup; +import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.workloadGroupList; +import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.workloadGroupOne; +import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.workloadGroupPersistenceService; +import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.workloadGroupTwo; +import static org.opensearch.plugin.wlm.action.WorkloadGroupActionTestUtils.updateWorkloadGroupRequest; +import static org.opensearch.plugin.wlm.service.WorkloadGroupPersistenceService.QUERY_GROUP_COUNT_SETTING_NAME; +import static org.opensearch.plugin.wlm.service.WorkloadGroupPersistenceService.SOURCE; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.argThat; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +public class WorkloadGroupPersistenceServiceTests extends OpenSearchTestCase { + + /** + * Test case to validate the creation logic of a WorkloadGroup + */ + public void testCreateWorkloadGroup() { + Tuple setup = preparePersistenceServiceSetup(new HashMap<>()); + WorkloadGroupPersistenceService workloadGroupPersistenceService1 = setup.v1(); + ClusterState clusterState = setup.v2(); + ClusterState newClusterState = workloadGroupPersistenceService1.saveWorkloadGroupInClusterState(workloadGroupOne, clusterState); + Map updatedGroupsMap = newClusterState.getMetadata().workloadGroups(); + assertEquals(1, updatedGroupsMap.size()); + assertTrue(updatedGroupsMap.containsKey(_ID_ONE)); + List listOne = new ArrayList<>(); + List listTwo = new ArrayList<>(); + listOne.add(workloadGroupOne); + listTwo.add(updatedGroupsMap.get(_ID_ONE)); + assertEqualWorkloadGroups(listOne, listTwo, false); + } + + /** + * Test case to validate the logic for adding a new WorkloadGroup to a cluster state that already contains + * an existing WorkloadGroup + */ + public void testCreateAnotherWorkloadGroup() { + Tuple setup = preparePersistenceServiceSetup(Map.of(_ID_ONE, workloadGroupOne)); + WorkloadGroupPersistenceService workloadGroupPersistenceService1 = setup.v1(); + ClusterState clusterState = setup.v2(); + ClusterState newClusterState = workloadGroupPersistenceService1.saveWorkloadGroupInClusterState(workloadGroupTwo, clusterState); + Map updatedGroups = newClusterState.getMetadata().workloadGroups(); + assertEquals(2, updatedGroups.size()); + assertTrue(updatedGroups.containsKey(_ID_TWO)); + Collection values = updatedGroups.values(); + assertEqualWorkloadGroups(workloadGroupList(), new ArrayList<>(values), false); + } + + /** + * Test case to ensure the error is thrown when we try to create another WorkloadGroup with duplicate name + */ + public void testCreateWorkloadGroupDuplicateName() { + Tuple setup = preparePersistenceServiceSetup(Map.of(_ID_ONE, workloadGroupOne)); + WorkloadGroupPersistenceService workloadGroupPersistenceService1 = setup.v1(); + ClusterState clusterState = setup.v2(); + WorkloadGroup toCreate = builder().name(NAME_ONE) + ._id("W5iIqHyhgi4K1qIAAAAIHw==") + .mutableWorkloadGroupFragment(new MutableWorkloadGroupFragment(ResiliencyMode.MONITOR, Map.of(ResourceType.MEMORY, 0.3))) + .updatedAt(1690934400000L) + .build(); + assertThrows( + RuntimeException.class, + () -> workloadGroupPersistenceService1.saveWorkloadGroupInClusterState(toCreate, clusterState) + ); + } + + /** + * Test case to ensure the error is thrown when we try to create another WorkloadGroup that will make + * the total resource limits go above 1 + */ + public void testCreateWorkloadGroupOverflowAllocation() { + Tuple setup = preparePersistenceServiceSetup(Map.of(_ID_TWO, workloadGroupTwo)); + WorkloadGroup toCreate = builder().name(NAME_ONE) + ._id("W5iIqHyhgi4K1qIAAAAIHw==") + .mutableWorkloadGroupFragment(new MutableWorkloadGroupFragment(ResiliencyMode.MONITOR, Map.of(ResourceType.MEMORY, 0.41))) + .updatedAt(1690934400000L) + .build(); + + WorkloadGroupPersistenceService workloadGroupPersistenceService1 = setup.v1(); + ClusterState clusterState = setup.v2(); + assertThrows( + RuntimeException.class, + () -> workloadGroupPersistenceService1.saveWorkloadGroupInClusterState(toCreate, clusterState) + ); + } + + /** + * Test case to ensure the error is thrown when we already have the max allowed number of WorkloadGroups, but + * we want to create another one + */ + public void testCreateWorkloadGroupOverflowCount() { + WorkloadGroup toCreate = builder().name(NAME_NONE_EXISTED) + ._id("W5iIqHyhgi4K1qIAAAAIHw==") + .mutableWorkloadGroupFragment(new MutableWorkloadGroupFragment(ResiliencyMode.MONITOR, Map.of(ResourceType.MEMORY, 0.5))) + .updatedAt(1690934400000L) + .build(); + Metadata metadata = Metadata.builder().workloadGroups(Map.of(_ID_ONE, workloadGroupOne, _ID_TWO, workloadGroupTwo)).build(); + Settings settings = Settings.builder().put(QUERY_GROUP_COUNT_SETTING_NAME, 2).build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, clusterSettingsSet()); + ClusterService clusterService = new ClusterService(settings, clusterSettings, mock(ThreadPool.class)); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).metadata(metadata).build(); + WorkloadGroupPersistenceService workloadGroupPersistenceService1 = new WorkloadGroupPersistenceService( + clusterService, + settings, + clusterSettings + ); + assertThrows( + RuntimeException.class, + () -> workloadGroupPersistenceService1.saveWorkloadGroupInClusterState(toCreate, clusterState) + ); + } + + /** + * Tests the invalid value of {@code node.workload_group.max_count} + */ + public void testInvalidMaxWorkloadGroupCount() { + Settings settings = Settings.builder().put(QUERY_GROUP_COUNT_SETTING_NAME, 2).build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, clusterSettingsSet()); + ClusterService clusterService = new ClusterService(settings, clusterSettings, mock(ThreadPool.class)); + WorkloadGroupPersistenceService workloadGroupPersistenceService = new WorkloadGroupPersistenceService( + clusterService, + settings, + clusterSettings + ); + assertThrows(IllegalArgumentException.class, () -> workloadGroupPersistenceService.setMaxWorkloadGroupCount(-1)); + } + + /** + * Tests the valid value of {@code node.workload_group.max_count} + */ + public void testValidMaxSandboxCountSetting() { + Settings settings = Settings.builder().put(QUERY_GROUP_COUNT_SETTING_NAME, 100).build(); + ClusterService clusterService = new ClusterService(settings, clusterSettings(), mock(ThreadPool.class)); + WorkloadGroupPersistenceService workloadGroupPersistenceService = new WorkloadGroupPersistenceService( + clusterService, + settings, + clusterSettings() + ); + workloadGroupPersistenceService.setMaxWorkloadGroupCount(50); + assertEquals(50, workloadGroupPersistenceService.getMaxWorkloadGroupCount()); + } + + /** + * Tests PersistInClusterStateMetadata function + */ + public void testPersistInClusterStateMetadata() { + ClusterService clusterService = mock(ClusterService.class); + @SuppressWarnings("unchecked") + ActionListener listener = mock(ActionListener.class); + WorkloadGroupPersistenceService workloadGroupPersistenceService = new WorkloadGroupPersistenceService( + clusterService, + WorkloadGroupTestUtils.settings(), + clusterSettings() + ); + workloadGroupPersistenceService.persistInClusterStateMetadata(workloadGroupOne, listener); + verify(clusterService).submitStateUpdateTask(eq(SOURCE), any()); + } + + /** + * Tests PersistInClusterStateMetadata function with inner functions + */ + public void testPersistInClusterStateMetadataInner() { + ClusterService clusterService = mock(ClusterService.class); + @SuppressWarnings("unchecked") + ActionListener listener = mock(ActionListener.class); + WorkloadGroupPersistenceService workloadGroupPersistenceService = new WorkloadGroupPersistenceService( + clusterService, + WorkloadGroupTestUtils.settings(), + clusterSettings() + ); + ArgumentCaptor captor = ArgumentCaptor.forClass(ClusterStateUpdateTask.class); + workloadGroupPersistenceService.persistInClusterStateMetadata(workloadGroupOne, listener); + verify(clusterService, times(1)).submitStateUpdateTask(eq(SOURCE), captor.capture()); + ClusterStateUpdateTask capturedTask = captor.getValue(); + assertEquals(workloadGroupPersistenceService.createWorkloadGroupThrottlingKey, capturedTask.getClusterManagerThrottlingKey()); + + doAnswer(invocation -> { + ClusterStateUpdateTask task = invocation.getArgument(1); + task.clusterStateProcessed(SOURCE, mock(ClusterState.class), mock(ClusterState.class)); + return null; + }).when(clusterService).submitStateUpdateTask(anyString(), any()); + workloadGroupPersistenceService.persistInClusterStateMetadata(workloadGroupOne, listener); + verify(listener).onResponse(any(CreateWorkloadGroupResponse.class)); + } + + /** + * Tests PersistInClusterStateMetadata function with failure + */ + public void testPersistInClusterStateMetadataFailure() { + ClusterService clusterService = mock(ClusterService.class); + @SuppressWarnings("unchecked") + ActionListener listener = mock(ActionListener.class); + WorkloadGroupPersistenceService workloadGroupPersistenceService = new WorkloadGroupPersistenceService( + clusterService, + WorkloadGroupTestUtils.settings(), + clusterSettings() + ); + doAnswer(invocation -> { + ClusterStateUpdateTask task = invocation.getArgument(1); + Exception exception = new RuntimeException("Test Exception"); + task.onFailure(SOURCE, exception); + return null; + }).when(clusterService).submitStateUpdateTask(anyString(), any()); + workloadGroupPersistenceService.persistInClusterStateMetadata(workloadGroupOne, listener); + verify(listener).onFailure(any(RuntimeException.class)); + } + + /** + * Tests getting a single WorkloadGroup + */ + public void testGetSingleWorkloadGroup() { + Collection groupsCollections = WorkloadGroupPersistenceService.getFromClusterStateMetadata(NAME_ONE, clusterState()); + List groups = new ArrayList<>(groupsCollections); + assertEquals(1, groups.size()); + WorkloadGroup workloadGroup = groups.get(0); + List listOne = new ArrayList<>(); + List listTwo = new ArrayList<>(); + listOne.add(WorkloadGroupTestUtils.workloadGroupOne); + listTwo.add(workloadGroup); + WorkloadGroupTestUtils.assertEqualWorkloadGroups(listOne, listTwo, false); + } + + /** + * Tests getting all WorkloadGroups + */ + public void testGetAllWorkloadGroups() { + assertEquals(2, WorkloadGroupTestUtils.clusterState().metadata().workloadGroups().size()); + Collection groupsCollections = WorkloadGroupPersistenceService.getFromClusterStateMetadata(null, clusterState()); + List res = new ArrayList<>(groupsCollections); + assertEquals(2, res.size()); + Set currentNAME = res.stream().map(WorkloadGroup::getName).collect(Collectors.toSet()); + assertTrue(currentNAME.contains(WorkloadGroupTestUtils.NAME_ONE)); + assertTrue(currentNAME.contains(WorkloadGroupTestUtils.NAME_TWO)); + WorkloadGroupTestUtils.assertEqualWorkloadGroups(WorkloadGroupTestUtils.workloadGroupList(), res, false); + } + + /** + * Tests getting a WorkloadGroup with invalid name + */ + public void testGetNonExistedWorkloadGroups() { + Collection groupsCollections = WorkloadGroupPersistenceService.getFromClusterStateMetadata( + NAME_NONE_EXISTED, + clusterState() + ); + List groups = new ArrayList<>(groupsCollections); + assertEquals(0, groups.size()); + } + + /** + * Tests setting maxWorkloadGroupCount + */ + public void testMaxWorkloadGroupCount() { + assertThrows( + IllegalArgumentException.class, + () -> WorkloadGroupTestUtils.workloadGroupPersistenceService().setMaxWorkloadGroupCount(-1) + ); + WorkloadGroupPersistenceService workloadGroupPersistenceService = WorkloadGroupTestUtils.workloadGroupPersistenceService(); + workloadGroupPersistenceService.setMaxWorkloadGroupCount(50); + assertEquals(50, workloadGroupPersistenceService.getMaxWorkloadGroupCount()); + } + + /** + * Tests delete a single WorkloadGroup + */ + public void testDeleteSingleWorkloadGroup() { + ClusterState newClusterState = workloadGroupPersistenceService().deleteWorkloadGroupInClusterState(NAME_TWO, clusterState()); + Map afterDeletionGroups = newClusterState.getMetadata().workloadGroups(); + assertFalse(afterDeletionGroups.containsKey(_ID_TWO)); + assertEquals(1, afterDeletionGroups.size()); + List oldWorkloadGroups = new ArrayList<>(); + oldWorkloadGroups.add(workloadGroupOne); + assertEqualWorkloadGroups(new ArrayList<>(afterDeletionGroups.values()), oldWorkloadGroups, false); + } + + /** + * Tests delete a WorkloadGroup with invalid name + */ + public void testDeleteNonExistedWorkloadGroup() { + assertThrows( + ResourceNotFoundException.class, + () -> workloadGroupPersistenceService().deleteWorkloadGroupInClusterState(NAME_NONE_EXISTED, clusterState()) + ); + } + + /** + * Tests DeleteInClusterStateMetadata function + */ + @SuppressWarnings("unchecked") + public void testDeleteInClusterStateMetadata() throws Exception { + DeleteWorkloadGroupRequest request = new DeleteWorkloadGroupRequest(NAME_ONE); + ClusterService clusterService = mock(ClusterService.class); + + ActionListener listener = mock(ActionListener.class); + WorkloadGroupPersistenceService workloadGroupPersistenceService = new WorkloadGroupPersistenceService( + clusterService, + WorkloadGroupTestUtils.settings(), + clusterSettings() + ); + doAnswer(invocation -> { + AckedClusterStateUpdateTask task = invocation.getArgument(1); + ClusterState initialState = clusterState(); + ClusterState newState = task.execute(initialState); + assertNotNull(newState); + assertEquals(workloadGroupPersistenceService.deleteWorkloadGroupThrottlingKey, task.getClusterManagerThrottlingKey()); + task.onAllNodesAcked(null); + verify(listener).onResponse(argThat(response -> response.isAcknowledged())); + return null; + }).when(clusterService).submitStateUpdateTask(anyString(), any()); + workloadGroupPersistenceService.deleteInClusterStateMetadata(request, listener); + verify(clusterService).submitStateUpdateTask(eq(SOURCE), any(AckedClusterStateUpdateTask.class)); + } + + /** + * Tests updating a WorkloadGroup with all fields + */ + public void testUpdateWorkloadGroupAllFields() { + WorkloadGroup updated = builder().name(NAME_ONE) + ._id(_ID_ONE) + .mutableWorkloadGroupFragment(new MutableWorkloadGroupFragment(ResiliencyMode.ENFORCED, Map.of(ResourceType.MEMORY, 0.15))) + .updatedAt(1690934400000L) + .build(); + UpdateWorkloadGroupRequest updateWorkloadGroupRequest = updateWorkloadGroupRequest( + NAME_ONE, + updated.getMutableWorkloadGroupFragment() + ); + ClusterState newClusterState = workloadGroupPersistenceService().updateWorkloadGroupInClusterState( + updateWorkloadGroupRequest, + clusterState() + ); + List updatedWorkloadGroups = new ArrayList<>(newClusterState.getMetadata().workloadGroups().values()); + assertEquals(2, updatedWorkloadGroups.size()); + List expectedList = new ArrayList<>(); + expectedList.add(workloadGroupTwo); + expectedList.add(updated); + assertEqualWorkloadGroups(expectedList, updatedWorkloadGroups, true); + } + + /** + * Tests updating a WorkloadGroup with only updated resourceLimits + */ + public void testUpdateWorkloadGroupResourceLimitsOnly() { + WorkloadGroup updated = builder().name(NAME_ONE) + ._id(_ID_ONE) + .mutableWorkloadGroupFragment(new MutableWorkloadGroupFragment(ResiliencyMode.MONITOR, Map.of(ResourceType.MEMORY, 0.15))) + .updatedAt(1690934400000L) + .build(); + UpdateWorkloadGroupRequest updateWorkloadGroupRequest = updateWorkloadGroupRequest( + NAME_ONE, + updated.getMutableWorkloadGroupFragment() + ); + ClusterState newClusterState = workloadGroupPersistenceService().updateWorkloadGroupInClusterState( + updateWorkloadGroupRequest, + clusterState() + ); + List updatedWorkloadGroups = new ArrayList<>(newClusterState.getMetadata().workloadGroups().values()); + assertEquals(2, updatedWorkloadGroups.size()); + Optional findUpdatedGroupOne = newClusterState.metadata() + .workloadGroups() + .values() + .stream() + .filter(group -> group.getName().equals(NAME_ONE)) + .findFirst(); + Optional findUpdatedGroupTwo = newClusterState.metadata() + .workloadGroups() + .values() + .stream() + .filter(group -> group.getName().equals(NAME_TWO)) + .findFirst(); + assertTrue(findUpdatedGroupOne.isPresent()); + assertTrue(findUpdatedGroupTwo.isPresent()); + List list1 = new ArrayList<>(); + list1.add(updated); + List list2 = new ArrayList<>(); + list2.add(findUpdatedGroupOne.get()); + assertEqualWorkloadGroups(list1, list2, true); + } + + /** + * Tests updating a WorkloadGroup with invalid name + */ + public void testUpdateWorkloadGroupNonExistedName() { + WorkloadGroupPersistenceService workloadGroupPersistenceService = workloadGroupPersistenceService(); + UpdateWorkloadGroupRequest updateWorkloadGroupRequest = updateWorkloadGroupRequest( + NAME_NONE_EXISTED, + new MutableWorkloadGroupFragment(ResiliencyMode.MONITOR, Map.of(ResourceType.MEMORY, 0.15)) + ); + assertThrows( + RuntimeException.class, + () -> workloadGroupPersistenceService.updateWorkloadGroupInClusterState(updateWorkloadGroupRequest, clusterState()) + ); + List updatedWorkloadGroups = new ArrayList<>( + workloadGroupPersistenceService.getClusterService().state().metadata().workloadGroups().values() + ); + assertEquals(2, updatedWorkloadGroups.size()); + List expectedList = new ArrayList<>(); + expectedList.add(workloadGroupTwo); + expectedList.add(workloadGroupOne); + assertEqualWorkloadGroups(expectedList, updatedWorkloadGroups, true); + } + + /** + * Tests UpdateInClusterStateMetadata function + */ + public void testUpdateInClusterStateMetadata() { + ClusterService clusterService = mock(ClusterService.class); + @SuppressWarnings("unchecked") + ActionListener listener = mock(ActionListener.class); + WorkloadGroupPersistenceService workloadGroupPersistenceService = new WorkloadGroupPersistenceService( + clusterService, + WorkloadGroupTestUtils.settings(), + clusterSettings() + ); + workloadGroupPersistenceService.updateInClusterStateMetadata(null, listener); + verify(clusterService).submitStateUpdateTask(eq(SOURCE), any()); + } + + /** + * Tests UpdateInClusterStateMetadata function with inner functions + */ + public void testUpdateInClusterStateMetadataInner() { + ClusterService clusterService = mock(ClusterService.class); + @SuppressWarnings("unchecked") + ActionListener listener = mock(ActionListener.class); + WorkloadGroupPersistenceService workloadGroupPersistenceService = new WorkloadGroupPersistenceService( + clusterService, + WorkloadGroupTestUtils.settings(), + clusterSettings() + ); + UpdateWorkloadGroupRequest updateWorkloadGroupRequest = updateWorkloadGroupRequest( + NAME_TWO, + new MutableWorkloadGroupFragment(ResiliencyMode.SOFT, new HashMap<>()) + ); + ArgumentCaptor captor = ArgumentCaptor.forClass(ClusterStateUpdateTask.class); + workloadGroupPersistenceService.updateInClusterStateMetadata(updateWorkloadGroupRequest, listener); + verify(clusterService, times(1)).submitStateUpdateTask(eq(SOURCE), captor.capture()); + ClusterStateUpdateTask capturedTask = captor.getValue(); + assertEquals(workloadGroupPersistenceService.updateWorkloadGroupThrottlingKey, capturedTask.getClusterManagerThrottlingKey()); + + doAnswer(invocation -> { + ClusterStateUpdateTask task = invocation.getArgument(1); + task.clusterStateProcessed(SOURCE, clusterState(), clusterState()); + return null; + }).when(clusterService).submitStateUpdateTask(anyString(), any()); + workloadGroupPersistenceService.updateInClusterStateMetadata(updateWorkloadGroupRequest, listener); + verify(listener).onResponse(any(UpdateWorkloadGroupResponse.class)); + } + + /** + * Tests UpdateInClusterStateMetadata function with failure + */ + public void testUpdateInClusterStateMetadataFailure() { + ClusterService clusterService = mock(ClusterService.class); + @SuppressWarnings("unchecked") + ActionListener listener = mock(ActionListener.class); + WorkloadGroupPersistenceService workloadGroupPersistenceService = new WorkloadGroupPersistenceService( + clusterService, + WorkloadGroupTestUtils.settings(), + clusterSettings() + ); + UpdateWorkloadGroupRequest updateWorkloadGroupRequest = updateWorkloadGroupRequest( + NAME_TWO, + new MutableWorkloadGroupFragment(ResiliencyMode.SOFT, new HashMap<>()) + ); + doAnswer(invocation -> { + ClusterStateUpdateTask task = invocation.getArgument(1); + Exception exception = new RuntimeException("Test Exception"); + task.onFailure(SOURCE, exception); + return null; + }).when(clusterService).submitStateUpdateTask(anyString(), any()); + workloadGroupPersistenceService.updateInClusterStateMetadata(updateWorkloadGroupRequest, listener); + verify(listener).onFailure(any(RuntimeException.class)); + } +} diff --git a/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/create_query_group_context.json b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/create_workload_group_context.json similarity index 61% rename from plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/create_query_group_context.json rename to plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/create_workload_group_context.json index bb4620c01f2d6..17ac552eaee36 100644 --- a/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/create_query_group_context.json +++ b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/create_workload_group_context.json @@ -1,10 +1,10 @@ { - "create_query_group_context": { + "create_workload_group_context": { "stability": "experimental", "url": { "paths": [ { - "path": "/_wlm/query_group", + "path": "/_wlm/workload_group", "methods": ["PUT", "POST"], "parts": {} } @@ -12,7 +12,7 @@ }, "params":{}, "body":{ - "description":"The QueryGroup schema" + "description":"The WorkloadGroup schema" } } } diff --git a/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/delete_query_group_context.json b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/delete_workload_group_context.json similarity index 67% rename from plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/delete_query_group_context.json rename to plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/delete_workload_group_context.json index 16930427fc2fe..8f52d6cf523c2 100644 --- a/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/delete_query_group_context.json +++ b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/delete_workload_group_context.json @@ -1,17 +1,17 @@ { - "delete_query_group_context": { + "delete_workload_group_context": { "stability": "experimental", "url": { "paths": [ { - "path": "/_wlm/query_group/{name}", + "path": "/_wlm/workload_group/{name}", "methods": [ "DELETE" ], "parts": { "name": { "type": "string", - "description": "QueryGroup name" + "description": "WorkloadGroup name" } } } diff --git a/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/get_query_group_context.json b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/get_workload_group_context.json similarity index 65% rename from plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/get_query_group_context.json rename to plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/get_workload_group_context.json index e0d552be616b2..f11e0839b4fb6 100644 --- a/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/get_query_group_context.json +++ b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/get_workload_group_context.json @@ -1,20 +1,20 @@ { - "get_query_group_context": { + "get_workload_group_context": { "stability": "experimental", "url": { "paths": [ { - "path": "/_wlm/query_group", + "path": "/_wlm/workload_group", "methods": ["GET"], "parts": {} }, { - "path": "/_wlm/query_group/{name}", + "path": "/_wlm/workload_group/{name}", "methods": ["GET"], "parts": { "name": { "type": "string", - "description": "QueryGroup name" + "description": "WorkloadGroup name" } } } diff --git a/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/update_query_group_context.json b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/update_workload_group_context.json similarity index 59% rename from plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/update_query_group_context.json rename to plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/update_workload_group_context.json index fbfa2dde292ee..7b96b2bcbf44f 100644 --- a/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/update_query_group_context.json +++ b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/update_workload_group_context.json @@ -1,15 +1,15 @@ { - "update_query_group_context": { + "update_workload_group_context": { "stability": "experimental", "url": { "paths": [ { - "path": "/_wlm/query_group/{name}", + "path": "/_wlm/workload_group/{name}", "methods": ["PUT", "POST"], "parts": { "name": { "type": "string", - "description": "QueryGroup name" + "description": "WorkloadGroup name" } } } @@ -17,7 +17,7 @@ }, "params":{}, "body":{ - "description":"The updated QueryGroup schema" + "description":"The updated WorkloadGroup schema" } } } diff --git a/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/test/wlm/10_query_group.yml b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/test/wlm/10_workload_group.yml similarity index 76% rename from plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/test/wlm/10_query_group.yml rename to plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/test/wlm/10_workload_group.yml index 40ec665351094..178639638890d 100644 --- a/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/test/wlm/10_query_group.yml +++ b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/test/wlm/10_workload_group.yml @@ -4,7 +4,7 @@ reason: "QueryGroup WorkloadManagement feature was added in 2.17" - do: - create_query_group_context: + create_workload_group_context: body: { "name": "analytics", @@ -21,16 +21,16 @@ - match: { resource_limits.memory: 0.2 } - do: - get_query_group_context: + get_workload_group_context: name: "analytics" - - match: { query_groups.0.name: "analytics" } - - match: { query_groups.0.resiliency_mode: "monitor" } - - match: { query_groups.0.resource_limits.cpu: 0.4 } - - match: { query_groups.0.resource_limits.memory: 0.2 } + - match: { workload_groups.0.name: "analytics" } + - match: { workload_groups.0.resiliency_mode: "monitor" } + - match: { workload_groups.0.resource_limits.cpu: 0.4 } + - match: { workload_groups.0.resource_limits.memory: 0.2 } - do: - update_query_group_context: + update_workload_group_context: name: "analytics" body: { @@ -48,7 +48,7 @@ - do: catch: /resource_not_found_exception/ - update_query_group_context: + update_workload_group_context: name: "analytics5" body: { @@ -61,7 +61,7 @@ - do: catch: /illegal_argument_exception/ - update_query_group_context: + update_workload_group_context: name: "analytics" body: { @@ -73,7 +73,7 @@ - do: catch: /illegal_argument_exception/ - create_query_group_context: + create_workload_group_context: body: { "name": "analytics", @@ -86,7 +86,7 @@ - do: catch: /illegal_argument_exception/ - create_query_group_context: + create_workload_group_context: body: { "name": "analytics2", @@ -99,7 +99,7 @@ - do: catch: /illegal_argument_exception/ - create_query_group_context: + create_workload_group_context: body: { "name": "analytics2", @@ -112,7 +112,7 @@ - do: catch: /illegal_argument_exception/ - create_query_group_context: + create_workload_group_context: body: { "name": "", @@ -124,7 +124,7 @@ } - do: - create_query_group_context: + create_workload_group_context: body: { "name": "analytics2", @@ -141,16 +141,16 @@ - match: { resource_limits.memory: 0.25 } - do: - get_query_group_context: + get_workload_group_context: name: "analytics2" - - match: { query_groups.0.name: "analytics2" } - - match: { query_groups.0.resiliency_mode: "monitor" } - - match: { query_groups.0.resource_limits.cpu: 0.35 } - - match: { query_groups.0.resource_limits.memory: 0.25 } + - match: { workload_groups.0.name: "analytics2" } + - match: { workload_groups.0.resiliency_mode: "monitor" } + - match: { workload_groups.0.resource_limits.cpu: 0.35 } + - match: { workload_groups.0.resource_limits.memory: 0.25 } - do: - delete_query_group_context: + delete_workload_group_context: name: "analytics2" - match: { acknowledged: true } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java b/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java index d200b9177353a..558ce82071c21 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java @@ -39,7 +39,7 @@ import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; -import org.opensearch.wlm.QueryGroupTask; +import org.opensearch.wlm.WorkloadGroupTask; import org.hamcrest.MatcherAssert; import org.junit.After; import org.junit.Before; @@ -416,7 +416,7 @@ protected void doExecute(Task task, TestRequest request, ActionListener { try { CancellableTask cancellableTask = (CancellableTask) task; - ((QueryGroupTask) task).setQueryGroupId(threadPool.getThreadContext()); + ((WorkloadGroupTask) task).setWorkloadGroupId(threadPool.getThreadContext()); long startTime = System.nanoTime(); // Doing a busy-wait until task cancellation or timeout. diff --git a/server/src/internalClusterTest/java/org/opensearch/wlm/WorkloadManagementIT.java b/server/src/internalClusterTest/java/org/opensearch/wlm/WorkloadManagementIT.java index 6b68a83da94e2..957f9f6b10ab6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/wlm/WorkloadManagementIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/wlm/WorkloadManagementIT.java @@ -24,7 +24,7 @@ import org.opensearch.cluster.block.ClusterBlockLevel; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.metadata.Metadata; -import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.cluster.metadata.WorkloadGroup; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; import org.opensearch.common.settings.Settings; @@ -60,7 +60,7 @@ import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.threadpool.ThreadPool.Names.SAME; -import static org.opensearch.wlm.QueryGroupTask.QUERY_GROUP_ID_HEADER; +import static org.opensearch.wlm.WorkloadGroupTask.WORKLOAD_GROUP_ID_HEADER; import static org.hamcrest.Matchers.instanceOf; public class WorkloadManagementIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { @@ -115,80 +115,80 @@ public final void cleanupNodeSettings() { public void testHighCPUInEnforcedMode() throws InterruptedException { Settings request = Settings.builder().put(WorkloadManagementSettings.WLM_MODE_SETTING.getKey(), ENABLED).build(); assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(request).get()); - QueryGroup queryGroup = new QueryGroup( + WorkloadGroup workloadGroup = new WorkloadGroup( "name", - new MutableQueryGroupFragment( - MutableQueryGroupFragment.ResiliencyMode.ENFORCED, + new MutableWorkloadGroupFragment( + MutableWorkloadGroupFragment.ResiliencyMode.ENFORCED, Map.of(ResourceType.CPU, 0.01, ResourceType.MEMORY, 0.01) ) ); - updateQueryGroupInClusterState(PUT, queryGroup); - Exception caughtException = executeQueryGroupTask(CPU, queryGroup.get_id()); + updateWorkloadGroupInClusterState(PUT, workloadGroup); + Exception caughtException = executeWorkloadGroupTask(CPU, workloadGroup.get_id()); assertNotNull("SearchTask should have been cancelled with TaskCancelledException", caughtException); MatcherAssert.assertThat(caughtException, instanceOf(TaskCancelledException.class)); - updateQueryGroupInClusterState(DELETE, queryGroup); + updateWorkloadGroupInClusterState(DELETE, workloadGroup); } public void testHighCPUInMonitorMode() throws InterruptedException { - QueryGroup queryGroup = new QueryGroup( + WorkloadGroup workloadGroup = new WorkloadGroup( "name", - new MutableQueryGroupFragment( - MutableQueryGroupFragment.ResiliencyMode.ENFORCED, + new MutableWorkloadGroupFragment( + MutableWorkloadGroupFragment.ResiliencyMode.ENFORCED, Map.of(ResourceType.CPU, 0.01, ResourceType.MEMORY, 0.01) ) ); - updateQueryGroupInClusterState(PUT, queryGroup); - Exception caughtException = executeQueryGroupTask(CPU, queryGroup.get_id()); + updateWorkloadGroupInClusterState(PUT, workloadGroup); + Exception caughtException = executeWorkloadGroupTask(CPU, workloadGroup.get_id()); assertNull(caughtException); - updateQueryGroupInClusterState(DELETE, queryGroup); + updateWorkloadGroupInClusterState(DELETE, workloadGroup); } public void testHighMemoryInEnforcedMode() throws InterruptedException { Settings request = Settings.builder().put(WorkloadManagementSettings.WLM_MODE_SETTING.getKey(), ENABLED).build(); assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(request).get()); - QueryGroup queryGroup = new QueryGroup( + WorkloadGroup workloadGroup = new WorkloadGroup( "name", - new MutableQueryGroupFragment(MutableQueryGroupFragment.ResiliencyMode.ENFORCED, Map.of(ResourceType.MEMORY, 0.01)) + new MutableWorkloadGroupFragment(MutableWorkloadGroupFragment.ResiliencyMode.ENFORCED, Map.of(ResourceType.MEMORY, 0.01)) ); - updateQueryGroupInClusterState(PUT, queryGroup); - Exception caughtException = executeQueryGroupTask(MEMORY, queryGroup.get_id()); + updateWorkloadGroupInClusterState(PUT, workloadGroup); + Exception caughtException = executeWorkloadGroupTask(MEMORY, workloadGroup.get_id()); assertNotNull("SearchTask should have been cancelled with TaskCancelledException", caughtException); MatcherAssert.assertThat(caughtException, instanceOf(TaskCancelledException.class)); - updateQueryGroupInClusterState(DELETE, queryGroup); + updateWorkloadGroupInClusterState(DELETE, workloadGroup); } public void testHighMemoryInMonitorMode() throws InterruptedException { - QueryGroup queryGroup = new QueryGroup( + WorkloadGroup workloadGroup = new WorkloadGroup( "name", - new MutableQueryGroupFragment(MutableQueryGroupFragment.ResiliencyMode.ENFORCED, Map.of(ResourceType.MEMORY, 0.01)) + new MutableWorkloadGroupFragment(MutableWorkloadGroupFragment.ResiliencyMode.ENFORCED, Map.of(ResourceType.MEMORY, 0.01)) ); - updateQueryGroupInClusterState(PUT, queryGroup); - Exception caughtException = executeQueryGroupTask(MEMORY, queryGroup.get_id()); + updateWorkloadGroupInClusterState(PUT, workloadGroup); + Exception caughtException = executeWorkloadGroupTask(MEMORY, workloadGroup.get_id()); assertNull("SearchTask should have been cancelled with TaskCancelledException", caughtException); - updateQueryGroupInClusterState(DELETE, queryGroup); + updateWorkloadGroupInClusterState(DELETE, workloadGroup); } public void testNoCancellation() throws InterruptedException { - QueryGroup queryGroup = new QueryGroup( + WorkloadGroup workloadGroup = new WorkloadGroup( "name", - new MutableQueryGroupFragment( - MutableQueryGroupFragment.ResiliencyMode.ENFORCED, + new MutableWorkloadGroupFragment( + MutableWorkloadGroupFragment.ResiliencyMode.ENFORCED, Map.of(ResourceType.CPU, 0.8, ResourceType.MEMORY, 0.8) ) ); - updateQueryGroupInClusterState(PUT, queryGroup); - Exception caughtException = executeQueryGroupTask(CPU, queryGroup.get_id()); + updateWorkloadGroupInClusterState(PUT, workloadGroup); + Exception caughtException = executeWorkloadGroupTask(CPU, workloadGroup.get_id()); assertNull(caughtException); - updateQueryGroupInClusterState(DELETE, queryGroup); + updateWorkloadGroupInClusterState(DELETE, workloadGroup); } - public Exception executeQueryGroupTask(String resourceType, String queryGroupId) throws InterruptedException { + public Exception executeWorkloadGroupTask(String resourceType, String workloadGroupId) throws InterruptedException { ExceptionCatchingListener listener = new ExceptionCatchingListener(); client().execute( - TestQueryGroupTaskTransportAction.ACTION, - new TestQueryGroupTaskRequest( + TestWorkloadGroupTaskTransportAction.ACTION, + new TestWorkloadGroupTaskRequest( resourceType, - queryGroupId, + workloadGroupId, (TaskFactory) (id, type, action, description, parentTaskId, headers) -> new SearchTask( id, type, @@ -204,26 +204,26 @@ public Exception executeQueryGroupTask(String resourceType, String queryGroupId) return listener.getException(); } - public void updateQueryGroupInClusterState(String method, QueryGroup queryGroup) throws InterruptedException { + public void updateWorkloadGroupInClusterState(String method, WorkloadGroup workloadGroup) throws InterruptedException { ExceptionCatchingListener listener = new ExceptionCatchingListener(); - client().execute(TestClusterUpdateTransportAction.ACTION, new TestClusterUpdateRequest(queryGroup, method), listener); + client().execute(TestClusterUpdateTransportAction.ACTION, new TestClusterUpdateRequest(workloadGroup, method), listener); assertTrue(listener.getLatch().await(TIMEOUT.getSeconds(), TimeUnit.SECONDS)); assertEquals(0, listener.getLatch().getCount()); } public static class TestClusterUpdateRequest extends ClusterManagerNodeRequest { final private String method; - final private QueryGroup queryGroup; + final private WorkloadGroup workloadGroup; - public TestClusterUpdateRequest(QueryGroup queryGroup, String method) { + public TestClusterUpdateRequest(WorkloadGroup workloadGroup, String method) { this.method = method; - this.queryGroup = queryGroup; + this.workloadGroup = workloadGroup; } public TestClusterUpdateRequest(StreamInput in) throws IOException { super(in); this.method = in.readString(); - this.queryGroup = new QueryGroup(in); + this.workloadGroup = new WorkloadGroup(in); } @Override @@ -235,11 +235,11 @@ public ActionRequestValidationException validate() { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(method); - queryGroup.writeTo(out); + workloadGroup.writeTo(out); } - public QueryGroup getQueryGroup() { - return queryGroup; + public WorkloadGroup getWorkloadGroup() { + return workloadGroup; } public String getMethod() { @@ -293,13 +293,13 @@ protected void clusterManagerOperation( clusterService.submitStateUpdateTask("query-group-persistence-service", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { - Map currentGroups = currentState.metadata().queryGroups(); - QueryGroup queryGroup = request.getQueryGroup(); - String id = queryGroup.get_id(); + Map currentGroups = currentState.metadata().workloadGroups(); + WorkloadGroup workloadGroup = request.getWorkloadGroup(); + String id = workloadGroup.get_id(); String method = request.getMethod(); Metadata metadata; if (method.equals(PUT)) { // create - metadata = Metadata.builder(currentState.metadata()).put(queryGroup).build(); + metadata = Metadata.builder(currentState.metadata()).put(workloadGroup).build(); } else { // delete metadata = Metadata.builder(currentState.metadata()).remove(currentGroups.get(id)).build(); } @@ -319,21 +319,21 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS } } - public static class TestQueryGroupTaskRequest extends ActionRequest { + public static class TestWorkloadGroupTaskRequest extends ActionRequest { private final String type; - private final String queryGroupId; + private final String workloadGroupId; private TaskFactory taskFactory; - public TestQueryGroupTaskRequest(String type, String queryGroupId, TaskFactory taskFactory) { + public TestWorkloadGroupTaskRequest(String type, String workloadGroupId, TaskFactory taskFactory) { this.type = type; - this.queryGroupId = queryGroupId; + this.workloadGroupId = workloadGroupId; this.taskFactory = taskFactory; } - public TestQueryGroupTaskRequest(StreamInput in) throws IOException { + public TestWorkloadGroupTaskRequest(StreamInput in) throws IOException { super(in); this.type = in.readString(); - this.queryGroupId = in.readString(); + this.workloadGroupId = in.readString(); } @Override @@ -350,36 +350,39 @@ public Task createTask(long id, String type, String action, TaskId parentTaskId, public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(type); - out.writeString(queryGroupId); + out.writeString(workloadGroupId); } public String getType() { return type; } - public String getQueryGroupId() { - return queryGroupId; + public String getWorkloadGroupId() { + return workloadGroupId; } } - public static class TestQueryGroupTaskTransportAction extends HandledTransportAction { - public static final ActionType ACTION = new ActionType<>("internal::test_query_group_task_action", TestResponse::new); + public static class TestWorkloadGroupTaskTransportAction extends HandledTransportAction { + public static final ActionType ACTION = new ActionType<>( + "internal::test_workload_group_task_action", + TestResponse::new + ); private final ThreadPool threadPool; @Inject - public TestQueryGroupTaskTransportAction(TransportService transportService, ThreadPool threadPool, ActionFilters actionFilters) { - super(ACTION.name(), transportService, actionFilters, TestQueryGroupTaskRequest::new); + public TestWorkloadGroupTaskTransportAction(TransportService transportService, ThreadPool threadPool, ActionFilters actionFilters) { + super(ACTION.name(), transportService, actionFilters, TestWorkloadGroupTaskRequest::new); this.threadPool = threadPool; } @Override - protected void doExecute(Task task, TestQueryGroupTaskRequest request, ActionListener listener) { - threadPool.getThreadContext().putHeader(QUERY_GROUP_ID_HEADER, request.getQueryGroupId()); + protected void doExecute(Task task, TestWorkloadGroupTaskRequest request, ActionListener listener) { + threadPool.getThreadContext().putHeader(WORKLOAD_GROUP_ID_HEADER, request.getWorkloadGroupId()); threadPool.executor(ThreadPool.Names.SEARCH).execute(() -> { try { CancellableTask cancellableTask = (CancellableTask) task; - ((QueryGroupTask) task).setQueryGroupId(threadPool.getThreadContext()); - assertEquals(request.getQueryGroupId(), ((QueryGroupTask) task).getQueryGroupId()); + ((WorkloadGroupTask) task).setWorkloadGroupId(threadPool.getThreadContext()); + assertEquals(request.getWorkloadGroupId(), ((WorkloadGroupTask) task).getWorkloadGroupId()); long startTime = System.nanoTime(); while (System.nanoTime() - startTime < TIMEOUT.getNanos()) { doWork(request); @@ -398,7 +401,7 @@ protected void doExecute(Task task, TestQueryGroupTaskRequest request, ActionLis }); } - private void doWork(TestQueryGroupTaskRequest request) throws InterruptedException { + private void doWork(TestWorkloadGroupTaskRequest request) throws InterruptedException { switch (request.getType()) { case "CPU": long i = 0, j = 1, k = 1, iterations = 1000; @@ -422,13 +425,13 @@ public static class TestClusterUpdatePlugin extends Plugin implements ActionPlug public List> getActions() { return Arrays.asList( new ActionHandler<>(TestClusterUpdateTransportAction.ACTION, TestClusterUpdateTransportAction.class), - new ActionHandler<>(TestQueryGroupTaskTransportAction.ACTION, TestQueryGroupTaskTransportAction.class) + new ActionHandler<>(TestWorkloadGroupTaskTransportAction.ACTION, TestWorkloadGroupTaskTransportAction.class) ); } @Override public List> getClientActions() { - return Arrays.asList(TestClusterUpdateTransportAction.ACTION, TestQueryGroupTaskTransportAction.ACTION); + return Arrays.asList(TestClusterUpdateTransportAction.ACTION, TestWorkloadGroupTaskTransportAction.ACTION); } } } diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index fa504466e0a4a..5c7d9ad8e4f24 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -498,7 +498,7 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.client.node.NodeClient; import org.opensearch.usage.UsageService; -import org.opensearch.wlm.QueryGroupTask; +import org.opensearch.wlm.WorkloadGroupTask; import java.util.ArrayList; import java.util.Collections; @@ -585,7 +585,7 @@ public ActionModule( actionPlugins.stream().flatMap(p -> p.getRestHeaders().stream()), Stream.of( new RestHeaderDefinition(Task.X_OPAQUE_ID, false), - new RestHeaderDefinition(QueryGroupTask.QUERY_GROUP_ID_HEADER, false) + new RestHeaderDefinition(WorkloadGroupTask.WORKLOAD_GROUP_ID_HEADER, false) ) ).collect(Collectors.toSet()); UnaryOperator restWrapper = null; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/wlm/TransportWlmStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/wlm/TransportWlmStatsAction.java index 9c2fb3f1689ec..9a05c4184d4a2 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/wlm/TransportWlmStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/wlm/TransportWlmStatsAction.java @@ -16,7 +16,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; -import org.opensearch.wlm.QueryGroupService; +import org.opensearch.wlm.WorkloadGroupService; import org.opensearch.wlm.stats.WlmStats; import java.io.IOException; @@ -29,14 +29,14 @@ */ public class TransportWlmStatsAction extends TransportNodesAction { - final QueryGroupService queryGroupService; + final WorkloadGroupService workloadGroupService; @Inject public TransportWlmStatsAction( ThreadPool threadPool, ClusterService clusterService, TransportService transportService, - QueryGroupService queryGroupService, + WorkloadGroupService workloadGroupService, ActionFilters actionFilters ) { super( @@ -50,7 +50,7 @@ public TransportWlmStatsAction( ThreadPool.Names.MANAGEMENT, WlmStats.class ); - this.queryGroupService = queryGroupService; + this.workloadGroupService = workloadGroupService; } @Override @@ -73,7 +73,7 @@ protected WlmStats nodeOperation(WlmStatsRequest wlmStatsRequest) { assert transportService.getLocalNode() != null; return new WlmStats( transportService.getLocalNode(), - queryGroupService.nodeStats(wlmStatsRequest.getQueryGroupIds(), wlmStatsRequest.isBreach()) + workloadGroupService.nodeStats(wlmStatsRequest.getWorkloadGroupIds(), wlmStatsRequest.isBreach()) ); } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/wlm/WlmStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/wlm/WlmStatsRequest.java index bf4f79faff478..5f42591678e2b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/wlm/WlmStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/wlm/WlmStatsRequest.java @@ -23,40 +23,40 @@ @ExperimentalApi public class WlmStatsRequest extends BaseNodesRequest { - private final Set queryGroupIds; + private final Set workloadGroupIds; private final Boolean breach; public WlmStatsRequest(StreamInput in) throws IOException { super(in); - this.queryGroupIds = new HashSet<>(Set.of(in.readStringArray())); + this.workloadGroupIds = new HashSet<>(Set.of(in.readStringArray())); this.breach = in.readOptionalBoolean(); } /** - * Get QueryGroup stats from nodes based on the nodes ids specified. If none are passed, stats + * Get WorkloadGroup stats from nodes based on the nodes ids specified. If none are passed, stats * for all nodes will be returned. */ - public WlmStatsRequest(String[] nodesIds, Set queryGroupIds, Boolean breach) { + public WlmStatsRequest(String[] nodesIds, Set workloadGroupIds, Boolean breach) { super(false, nodesIds); - this.queryGroupIds = queryGroupIds; + this.workloadGroupIds = workloadGroupIds; this.breach = breach; } public WlmStatsRequest() { super(false, (String[]) null); - queryGroupIds = new HashSet<>(); + workloadGroupIds = new HashSet<>(); this.breach = false; } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeStringArray(queryGroupIds.toArray(new String[0])); + out.writeStringArray(workloadGroupIds.toArray(new String[0])); out.writeOptionalBoolean(breach); } - public Set getQueryGroupIds() { - return queryGroupIds; + public Set getWorkloadGroupIds() { + return workloadGroupIds; } public Boolean isBreach() { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/wlm/WlmStatsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/wlm/WlmStatsResponse.java index 2ce1b09a61fc6..a663229220ea3 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/wlm/WlmStatsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/wlm/WlmStatsResponse.java @@ -17,8 +17,8 @@ import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.wlm.stats.QueryGroupStats; import org.opensearch.wlm.stats.WlmStats; +import org.opensearch.wlm.stats.WorkloadGroupStats; import java.io.IOException; import java.util.List; @@ -51,8 +51,8 @@ protected void writeNodesTo(StreamOutput out, List nodes) throws IOExc public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { for (WlmStats wlmStats : getNodes()) { builder.startObject(wlmStats.getNode().getId()); - QueryGroupStats queryGroupStats = wlmStats.getQueryGroupStats(); - queryGroupStats.toXContent(builder, params); + WorkloadGroupStats workloadGroupStats = wlmStats.getWorkloadGroupStats(); + workloadGroupStats.toXContent(builder, params); builder.endObject(); } return builder; diff --git a/server/src/main/java/org/opensearch/action/search/SearchShardTask.java b/server/src/main/java/org/opensearch/action/search/SearchShardTask.java index ed2943db94420..b92d334598b63 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchShardTask.java +++ b/server/src/main/java/org/opensearch/action/search/SearchShardTask.java @@ -38,7 +38,7 @@ import org.opensearch.search.fetch.ShardFetchSearchRequest; import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.tasks.SearchBackpressureTask; -import org.opensearch.wlm.QueryGroupTask; +import org.opensearch.wlm.WorkloadGroupTask; import java.util.Map; import java.util.function.Supplier; @@ -50,7 +50,7 @@ * @opensearch.api */ @PublicApi(since = "1.0.0") -public class SearchShardTask extends QueryGroupTask implements SearchBackpressureTask { +public class SearchShardTask extends WorkloadGroupTask implements SearchBackpressureTask { // generating metadata in a lazy way since source can be quite big private final MemoizedSupplier metadataSupplier; diff --git a/server/src/main/java/org/opensearch/action/search/SearchTask.java b/server/src/main/java/org/opensearch/action/search/SearchTask.java index 2a1a961e7607b..346b17bf11633 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchTask.java +++ b/server/src/main/java/org/opensearch/action/search/SearchTask.java @@ -36,7 +36,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.core.tasks.TaskId; import org.opensearch.tasks.SearchBackpressureTask; -import org.opensearch.wlm.QueryGroupTask; +import org.opensearch.wlm.WorkloadGroupTask; import java.util.Map; import java.util.function.Supplier; @@ -49,7 +49,7 @@ * @opensearch.api */ @PublicApi(since = "1.0.0") -public class SearchTask extends QueryGroupTask implements SearchBackpressureTask { +public class SearchTask extends WorkloadGroupTask implements SearchBackpressureTask { // generating description in a lazy way since source can be quite big private final Supplier descriptionSupplier; private SearchProgressListener progressListener = SearchProgressListener.NOOP; diff --git a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java index 1696cb365fcd6..1da080e5bd302 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java @@ -102,7 +102,7 @@ import org.opensearch.transport.client.Client; import org.opensearch.transport.client.OriginSettingClient; import org.opensearch.transport.client.node.NodeClient; -import org.opensearch.wlm.QueryGroupTask; +import org.opensearch.wlm.WorkloadGroupTask; import java.util.ArrayList; import java.util.Arrays; @@ -447,8 +447,8 @@ private void executeRequest( // At this point either the QUERY_GROUP_ID header will be present in ThreadContext either via ActionFilter // or HTTP header (HTTP header will be deprecated once ActionFilter is implemented) - if (task instanceof QueryGroupTask) { - ((QueryGroupTask) task).setQueryGroupId(threadPool.getThreadContext()); + if (task instanceof WorkloadGroupTask) { + ((WorkloadGroupTask) task).setWorkloadGroupId(threadPool.getThreadContext()); } PipelinedRequest searchRequest; diff --git a/server/src/main/java/org/opensearch/action/search/TransportSearchScrollAction.java b/server/src/main/java/org/opensearch/action/search/TransportSearchScrollAction.java index 01bf5754a42a1..b0f98a4c1703b 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportSearchScrollAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportSearchScrollAction.java @@ -41,7 +41,7 @@ import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; -import org.opensearch.wlm.QueryGroupTask; +import org.opensearch.wlm.WorkloadGroupTask; /** * Perform the search scroll @@ -75,8 +75,8 @@ public TransportSearchScrollAction( protected void doExecute(Task task, SearchScrollRequest request, ActionListener listener) { try { - if (task instanceof QueryGroupTask) { - ((QueryGroupTask) task).setQueryGroupId(threadPool.getThreadContext()); + if (task instanceof WorkloadGroupTask) { + ((WorkloadGroupTask) task).setWorkloadGroupId(threadPool.getThreadContext()); } ParsedScrollId scrollId = TransportSearchHelper.parseScrollId(request.scrollId()); diff --git a/server/src/main/java/org/opensearch/autotagging/Rule.java b/server/src/main/java/org/opensearch/autotagging/Rule.java index 0f4adb5e462f5..10e8a503b8dc6 100644 --- a/server/src/main/java/org/opensearch/autotagging/Rule.java +++ b/server/src/main/java/org/opensearch/autotagging/Rule.java @@ -32,7 +32,7 @@ * "_id": "fwehf8302582mglfio349==", * "description": "Assign Query Group for Index Logs123" * "index_pattern": ["logs123"], - * "query_group": "dev_query_group_id", + * "workload_group": "dev_workload_group_id", * "updated_at": "01-10-2025T21:23:21.456Z" * } * @opensearch.experimental diff --git a/server/src/main/java/org/opensearch/autotagging/RuleValidator.java b/server/src/main/java/org/opensearch/autotagging/RuleValidator.java index 625d7ba94d282..9614761042081 100644 --- a/server/src/main/java/org/opensearch/autotagging/RuleValidator.java +++ b/server/src/main/java/org/opensearch/autotagging/RuleValidator.java @@ -17,7 +17,7 @@ import java.util.Objects; import java.util.Set; -import static org.opensearch.cluster.metadata.QueryGroup.isValid; +import static org.opensearch.cluster.metadata.WorkloadGroup.isValid; /** * This is the validator for rule. It ensures that the rule has a valid description, feature value, diff --git a/server/src/main/java/org/opensearch/cluster/ClusterModule.java b/server/src/main/java/org/opensearch/cluster/ClusterModule.java index 939d9ec6b9ae8..6466726836f0a 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterModule.java @@ -48,10 +48,10 @@ import org.opensearch.cluster.metadata.MetadataIndexTemplateService; import org.opensearch.cluster.metadata.MetadataMappingService; import org.opensearch.cluster.metadata.MetadataUpdateSettingsService; -import org.opensearch.cluster.metadata.QueryGroupMetadata; import org.opensearch.cluster.metadata.RepositoriesMetadata; import org.opensearch.cluster.metadata.ViewMetadata; import org.opensearch.cluster.metadata.WeightedRoutingMetadata; +import org.opensearch.cluster.metadata.WorkloadGroupMetadata; import org.opensearch.cluster.routing.DelayedAllocationService; import org.opensearch.cluster.routing.RerouteService; import org.opensearch.cluster.routing.allocation.AllocationService; @@ -221,7 +221,7 @@ public static List getNamedWriteables() { DecommissionAttributeMetadata::readDiffFrom ); - registerMetadataCustom(entries, QueryGroupMetadata.TYPE, QueryGroupMetadata::new, QueryGroupMetadata::readDiffFrom); + registerMetadataCustom(entries, WorkloadGroupMetadata.TYPE, WorkloadGroupMetadata::new, WorkloadGroupMetadata::readDiffFrom); // Task Status (not Diffable) entries.add(new Entry(Task.Status.class, PersistentTasksNodeService.Status.NAME, PersistentTasksNodeService.Status::new)); return entries; @@ -330,8 +330,8 @@ public static List getNamedXWriteables() { entries.add( new NamedXContentRegistry.Entry( Metadata.Custom.class, - new ParseField(QueryGroupMetadata.TYPE), - QueryGroupMetadata::fromXContent + new ParseField(WorkloadGroupMetadata.TYPE), + WorkloadGroupMetadata::fromXContent ) ); return entries; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java index 600f408cc963b..687b7fa1edef2 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java @@ -853,9 +853,9 @@ public Map views() { return Optional.ofNullable((ViewMetadata) this.custom(ViewMetadata.TYPE)).map(ViewMetadata::views).orElse(Collections.emptyMap()); } - public Map queryGroups() { - return Optional.ofNullable((QueryGroupMetadata) this.custom(QueryGroupMetadata.TYPE)) - .map(QueryGroupMetadata::queryGroups) + public Map workloadGroups() { + return Optional.ofNullable((WorkloadGroupMetadata) this.custom(WorkloadGroupMetadata.TYPE)) + .map(WorkloadGroupMetadata::workloadGroups) .orElse(Collections.emptyMap()); } @@ -1385,29 +1385,29 @@ public Builder removeDataStream(String name) { return this; } - public Builder queryGroups(final Map queryGroups) { - this.customs.put(QueryGroupMetadata.TYPE, new QueryGroupMetadata(queryGroups)); + public Builder workloadGroups(final Map workloadGroups) { + this.customs.put(WorkloadGroupMetadata.TYPE, new WorkloadGroupMetadata(workloadGroups)); return this; } - public Builder put(final QueryGroup queryGroup) { - Objects.requireNonNull(queryGroup, "queryGroup should not be null"); - Map existing = new HashMap<>(getQueryGroups()); - existing.put(queryGroup.get_id(), queryGroup); - return queryGroups(existing); + public Builder put(final WorkloadGroup workloadGroup) { + Objects.requireNonNull(workloadGroup, "workloadGroup should not be null"); + Map existing = new HashMap<>(getWorkloadGroups()); + existing.put(workloadGroup.get_id(), workloadGroup); + return workloadGroups(existing); } - public Builder remove(final QueryGroup queryGroup) { - Objects.requireNonNull(queryGroup, "queryGroup should not be null"); - Map existing = new HashMap<>(getQueryGroups()); - existing.remove(queryGroup.get_id()); - return queryGroups(existing); + public Builder remove(final WorkloadGroup workloadGroup) { + Objects.requireNonNull(workloadGroup, "workloadGroup should not be null"); + Map existing = new HashMap<>(getWorkloadGroups()); + existing.remove(workloadGroup.get_id()); + return workloadGroups(existing); } - private Map getQueryGroups() { - return Optional.ofNullable(this.customs.get(QueryGroupMetadata.TYPE)) - .map(o -> (QueryGroupMetadata) o) - .map(QueryGroupMetadata::queryGroups) + private Map getWorkloadGroups() { + return Optional.ofNullable(this.customs.get(WorkloadGroupMetadata.TYPE)) + .map(o -> (WorkloadGroupMetadata) o) + .map(WorkloadGroupMetadata::workloadGroups) .orElse(Collections.emptyMap()); } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/QueryGroup.java b/server/src/main/java/org/opensearch/cluster/metadata/WorkloadGroup.java similarity index 59% rename from server/src/main/java/org/opensearch/cluster/metadata/QueryGroup.java rename to server/src/main/java/org/opensearch/cluster/metadata/WorkloadGroup.java index 0eeafdc8f5eed..e09f14607f1e5 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/QueryGroup.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/WorkloadGroup.java @@ -18,8 +18,8 @@ import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.wlm.MutableQueryGroupFragment; -import org.opensearch.wlm.MutableQueryGroupFragment.ResiliencyMode; +import org.opensearch.wlm.MutableWorkloadGroupFragment; +import org.opensearch.wlm.MutableWorkloadGroupFragment.ResiliencyMode; import org.opensearch.wlm.ResourceType; import org.joda.time.Instant; @@ -30,7 +30,7 @@ import java.util.Optional; /** - * Class to define the QueryGroup schema + * Class to define the WorkloadGroup schema * { * "_id": "fafjafjkaf9ag8a9ga9g7ag0aagaga", * "resource_limits": { @@ -43,7 +43,7 @@ * } */ @PublicApi(since = "2.18.0") -public class QueryGroup extends AbstractDiffable implements ToXContentObject { +public class WorkloadGroup extends AbstractDiffable implements ToXContentObject { public static final String _ID_STRING = "_id"; public static final String NAME_STRING = "name"; @@ -53,29 +53,29 @@ public class QueryGroup extends AbstractDiffable implements ToXConte private final String _id; // It is an epoch in millis private final long updatedAtInMillis; - private final MutableQueryGroupFragment mutableQueryGroupFragment; + private final MutableWorkloadGroupFragment mutableWorkloadGroupFragment; - public QueryGroup(String name, MutableQueryGroupFragment mutableQueryGroupFragment) { - this(name, UUIDs.randomBase64UUID(), mutableQueryGroupFragment, Instant.now().getMillis()); + public WorkloadGroup(String name, MutableWorkloadGroupFragment mutableWorkloadGroupFragment) { + this(name, UUIDs.randomBase64UUID(), mutableWorkloadGroupFragment, Instant.now().getMillis()); } - public QueryGroup(String name, String _id, MutableQueryGroupFragment mutableQueryGroupFragment, long updatedAt) { - Objects.requireNonNull(name, "QueryGroup.name can't be null"); - Objects.requireNonNull(mutableQueryGroupFragment.getResourceLimits(), "QueryGroup.resourceLimits can't be null"); - Objects.requireNonNull(mutableQueryGroupFragment.getResiliencyMode(), "QueryGroup.resiliencyMode can't be null"); - Objects.requireNonNull(_id, "QueryGroup._id can't be null"); + public WorkloadGroup(String name, String _id, MutableWorkloadGroupFragment mutableWorkloadGroupFragment, long updatedAt) { + Objects.requireNonNull(name, "WorkloadGroup.name can't be null"); + Objects.requireNonNull(mutableWorkloadGroupFragment.getResourceLimits(), "WorkloadGroup.resourceLimits can't be null"); + Objects.requireNonNull(mutableWorkloadGroupFragment.getResiliencyMode(), "WorkloadGroup.resiliencyMode can't be null"); + Objects.requireNonNull(_id, "WorkloadGroup._id can't be null"); validateName(name); - if (mutableQueryGroupFragment.getResourceLimits().isEmpty()) { - throw new IllegalArgumentException("QueryGroup.resourceLimits should at least have 1 resource limit"); + if (mutableWorkloadGroupFragment.getResourceLimits().isEmpty()) { + throw new IllegalArgumentException("WorkloadGroup.resourceLimits should at least have 1 resource limit"); } if (!isValid(updatedAt)) { - throw new IllegalArgumentException("QueryGroup.updatedAtInMillis is not a valid epoch"); + throw new IllegalArgumentException("WorkloadGroup.updatedAtInMillis is not a valid epoch"); } this.name = name; this._id = _id; - this.mutableQueryGroupFragment = mutableQueryGroupFragment; + this.mutableWorkloadGroupFragment = mutableWorkloadGroupFragment; this.updatedAtInMillis = updatedAt; } @@ -89,22 +89,25 @@ public static boolean isValid(long updatedAt) { return minValidTimestamp <= updatedAt && updatedAt <= currentSeconds; } - public QueryGroup(StreamInput in) throws IOException { - this(in.readString(), in.readString(), new MutableQueryGroupFragment(in), in.readLong()); + public WorkloadGroup(StreamInput in) throws IOException { + this(in.readString(), in.readString(), new MutableWorkloadGroupFragment(in), in.readLong()); } - public static QueryGroup updateExistingQueryGroup(QueryGroup existingGroup, MutableQueryGroupFragment mutableQueryGroupFragment) { + public static WorkloadGroup updateExistingWorkloadGroup( + WorkloadGroup existingGroup, + MutableWorkloadGroupFragment mutableWorkloadGroupFragment + ) { final Map updatedResourceLimits = new HashMap<>(existingGroup.getResourceLimits()); - final Map mutableFragmentResourceLimits = mutableQueryGroupFragment.getResourceLimits(); + final Map mutableFragmentResourceLimits = mutableWorkloadGroupFragment.getResourceLimits(); if (mutableFragmentResourceLimits != null && !mutableFragmentResourceLimits.isEmpty()) { updatedResourceLimits.putAll(mutableFragmentResourceLimits); } - final ResiliencyMode mode = Optional.ofNullable(mutableQueryGroupFragment.getResiliencyMode()) + final ResiliencyMode mode = Optional.ofNullable(mutableWorkloadGroupFragment.getResiliencyMode()) .orElse(existingGroup.getResiliencyMode()); - return new QueryGroup( + return new WorkloadGroup( existingGroup.getName(), existingGroup.get_id(), - new MutableQueryGroupFragment(mode, updatedResourceLimits), + new MutableWorkloadGroupFragment(mode, updatedResourceLimits), Instant.now().getMillis() ); } @@ -113,13 +116,13 @@ public static QueryGroup updateExistingQueryGroup(QueryGroup existingGroup, Muta public void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeString(_id); - mutableQueryGroupFragment.writeTo(out); + mutableWorkloadGroupFragment.writeTo(out); out.writeLong(updatedAtInMillis); } public static void validateName(String name) { if (name == null || name.isEmpty() || name.length() > MAX_CHARS_ALLOWED_IN_NAME) { - throw new IllegalArgumentException("QueryGroup.name shouldn't be null, empty or more than 50 chars long"); + throw new IllegalArgumentException("WorkloadGroup.name shouldn't be null, empty or more than 50 chars long"); } } @@ -128,52 +131,52 @@ public XContentBuilder toXContent(final XContentBuilder builder, final Params pa builder.startObject(); builder.field(_ID_STRING, _id); builder.field(NAME_STRING, name); - for (String fieldName : MutableQueryGroupFragment.acceptedFieldNames) { - mutableQueryGroupFragment.writeField(builder, fieldName); + for (String fieldName : MutableWorkloadGroupFragment.acceptedFieldNames) { + mutableWorkloadGroupFragment.writeField(builder, fieldName); } builder.field(UPDATED_AT_STRING, updatedAtInMillis); builder.endObject(); return builder; } - public static QueryGroup fromXContent(final XContentParser parser) throws IOException { + public static WorkloadGroup fromXContent(final XContentParser parser) throws IOException { return Builder.fromXContent(parser).build(); } - public static Diff readDiff(final StreamInput in) throws IOException { - return readDiffFrom(QueryGroup::new, in); + public static Diff readDiff(final StreamInput in) throws IOException { + return readDiffFrom(WorkloadGroup::new, in); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - QueryGroup that = (QueryGroup) o; + WorkloadGroup that = (WorkloadGroup) o; return Objects.equals(name, that.name) - && Objects.equals(mutableQueryGroupFragment, that.mutableQueryGroupFragment) + && Objects.equals(mutableWorkloadGroupFragment, that.mutableWorkloadGroupFragment) && Objects.equals(_id, that._id) && updatedAtInMillis == that.updatedAtInMillis; } @Override public int hashCode() { - return Objects.hash(name, mutableQueryGroupFragment, updatedAtInMillis, _id); + return Objects.hash(name, mutableWorkloadGroupFragment, updatedAtInMillis, _id); } public String getName() { return name; } - public MutableQueryGroupFragment getMutableQueryGroupFragment() { - return mutableQueryGroupFragment; + public MutableWorkloadGroupFragment getMutableWorkloadGroupFragment() { + return mutableWorkloadGroupFragment; } public ResiliencyMode getResiliencyMode() { - return getMutableQueryGroupFragment().getResiliencyMode(); + return getMutableWorkloadGroupFragment().getResiliencyMode(); } public Map getResourceLimits() { - return getMutableQueryGroupFragment().getResourceLimits(); + return getMutableWorkloadGroupFragment().getResourceLimits(); } public String get_id() { @@ -185,7 +188,7 @@ public long getUpdatedAtInMillis() { } /** - * builder method for the {@link QueryGroup} + * builder method for the {@link WorkloadGroup} * @return Builder object */ public static Builder builder() { @@ -193,13 +196,13 @@ public static Builder builder() { } /** - * Builder class for {@link QueryGroup} + * Builder class for {@link WorkloadGroup} */ @ExperimentalApi public static class Builder { private String name; private String _id; - private MutableQueryGroupFragment mutableQueryGroupFragment; + private MutableWorkloadGroupFragment mutableWorkloadGroupFragment; private long updatedAt; private Builder() {} @@ -218,7 +221,7 @@ public static Builder fromXContent(XContentParser parser) throws IOException { } String fieldName = ""; - MutableQueryGroupFragment mutableQueryGroupFragment1 = new MutableQueryGroupFragment(); + MutableWorkloadGroupFragment mutableWorkloadGroupFragment1 = new MutableWorkloadGroupFragment(); while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { fieldName = parser.currentName(); @@ -227,21 +230,21 @@ public static Builder fromXContent(XContentParser parser) throws IOException { builder._id(parser.text()); } else if (fieldName.equals(NAME_STRING)) { builder.name(parser.text()); - } else if (MutableQueryGroupFragment.shouldParse(fieldName)) { - mutableQueryGroupFragment1.parseField(parser, fieldName); + } else if (MutableWorkloadGroupFragment.shouldParse(fieldName)) { + mutableWorkloadGroupFragment1.parseField(parser, fieldName); } else if (fieldName.equals(UPDATED_AT_STRING)) { builder.updatedAt(parser.longValue()); } else { - throw new IllegalArgumentException(fieldName + " is not a valid field in QueryGroup"); + throw new IllegalArgumentException(fieldName + " is not a valid field in WorkloadGroup"); } } else if (token == XContentParser.Token.START_OBJECT) { - if (!MutableQueryGroupFragment.shouldParse(fieldName)) { - throw new IllegalArgumentException(fieldName + " is not a valid object in QueryGroup"); + if (!MutableWorkloadGroupFragment.shouldParse(fieldName)) { + throw new IllegalArgumentException(fieldName + " is not a valid object in WorkloadGroup"); } - mutableQueryGroupFragment1.parseField(parser, fieldName); + mutableWorkloadGroupFragment1.parseField(parser, fieldName); } } - return builder.mutableQueryGroupFragment(mutableQueryGroupFragment1); + return builder.mutableWorkloadGroupFragment(mutableWorkloadGroupFragment1); } public Builder name(String name) { @@ -254,8 +257,8 @@ public Builder _id(String _id) { return this; } - public Builder mutableQueryGroupFragment(MutableQueryGroupFragment mutableQueryGroupFragment) { - this.mutableQueryGroupFragment = mutableQueryGroupFragment; + public Builder mutableWorkloadGroupFragment(MutableWorkloadGroupFragment mutableWorkloadGroupFragment) { + this.mutableWorkloadGroupFragment = mutableWorkloadGroupFragment; return this; } @@ -264,12 +267,12 @@ public Builder updatedAt(long updatedAt) { return this; } - public QueryGroup build() { - return new QueryGroup(name, _id, mutableQueryGroupFragment, updatedAt); + public WorkloadGroup build() { + return new WorkloadGroup(name, _id, mutableWorkloadGroupFragment, updatedAt); } - public MutableQueryGroupFragment getMutableQueryGroupFragment() { - return mutableQueryGroupFragment; + public MutableWorkloadGroupFragment getMutableWorkloadGroupFragment() { + return mutableWorkloadGroupFragment; } } } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/QueryGroupMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/WorkloadGroupMetadata.java similarity index 58% rename from server/src/main/java/org/opensearch/cluster/metadata/QueryGroupMetadata.java rename to server/src/main/java/org/opensearch/cluster/metadata/WorkloadGroupMetadata.java index 79732bc505ee2..f6f039e03f285 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/QueryGroupMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/WorkloadGroupMetadata.java @@ -29,33 +29,35 @@ import static org.opensearch.cluster.metadata.Metadata.ALL_CONTEXTS; /** - * This class holds the QueryGroupMetadata + * This class holds the WorkloadGroupMetadata * sample schema * { * "queryGroups": { * "_id": { - * {@link QueryGroup} + * {@link WorkloadGroup} * }, * ... * } * } */ -public class QueryGroupMetadata implements Metadata.Custom { +public class WorkloadGroupMetadata implements Metadata.Custom { + // We are not changing this name to ensure the cluster state restore works when a OS version < 3.0 writes it to + // either a remote store or on local disk and OS version >= 3.0 reads it public static final String TYPE = "queryGroups"; - private static final ParseField QUERY_GROUP_FIELD = new ParseField("queryGroups"); + private static final ParseField WORKLOAD_GROUP_FIELD = new ParseField("queryGroups"); - private final Map queryGroups; + private final Map workloadGroups; - public QueryGroupMetadata(Map queryGroups) { - this.queryGroups = queryGroups; + public WorkloadGroupMetadata(Map workloadGroups) { + this.workloadGroups = workloadGroups; } - public QueryGroupMetadata(StreamInput in) throws IOException { - this.queryGroups = in.readMap(StreamInput::readString, QueryGroup::new); + public WorkloadGroupMetadata(StreamInput in) throws IOException { + this.workloadGroups = in.readMap(StreamInput::readString, WorkloadGroup::new); } - public Map queryGroups() { - return this.queryGroups; + public Map workloadGroups() { + return this.workloadGroups; } /** @@ -76,19 +78,19 @@ public Version getMinimalSupportedVersion() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(queryGroups, StreamOutput::writeString, (stream, val) -> val.writeTo(stream)); + out.writeMap(workloadGroups, StreamOutput::writeString, (stream, val) -> val.writeTo(stream)); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - for (Map.Entry entry : queryGroups.entrySet()) { + for (Map.Entry entry : workloadGroups.entrySet()) { builder.field(entry.getKey(), entry.getValue()); } return builder; } - public static QueryGroupMetadata fromXContent(XContentParser parser) throws IOException { - Map queryGroupMap = new HashMap<>(); + public static WorkloadGroupMetadata fromXContent(XContentParser parser) throws IOException { + Map workloadGroupMap = new HashMap<>(); if (parser.currentToken() == null) { parser.nextToken(); @@ -96,7 +98,7 @@ public static QueryGroupMetadata fromXContent(XContentParser parser) throws IOEx if (parser.currentToken() != XContentParser.Token.START_OBJECT) { throw new IllegalArgumentException( - "QueryGroupMetadata.fromXContent was expecting a { token but found : " + parser.currentToken() + "WorkloadGroupMetadata.fromXContent was expecting a { token but found : " + parser.currentToken() ); } XContentParser.Token token = parser.currentToken(); @@ -105,21 +107,21 @@ public static QueryGroupMetadata fromXContent(XContentParser parser) throws IOEx if (token == XContentParser.Token.FIELD_NAME) { fieldName = parser.currentName(); } else { - QueryGroup queryGroup = QueryGroup.fromXContent(parser); - queryGroupMap.put(fieldName, queryGroup); + WorkloadGroup workloadGroup = WorkloadGroup.fromXContent(parser); + workloadGroupMap.put(fieldName, workloadGroup); } } - return new QueryGroupMetadata(queryGroupMap); + return new WorkloadGroupMetadata(workloadGroupMap); } @Override public Diff diff(final Metadata.Custom previousState) { - return new QueryGroupMetadataDiff((QueryGroupMetadata) previousState, this); + return new WorkloadGroupMetadataDiff((WorkloadGroupMetadata) previousState, this); } public static NamedDiff readDiffFrom(StreamInput in) throws IOException { - return new QueryGroupMetadataDiff(in); + return new WorkloadGroupMetadataDiff(in); } @Override @@ -131,13 +133,13 @@ public EnumSet context() { public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - QueryGroupMetadata that = (QueryGroupMetadata) o; - return Objects.equals(queryGroups, that.queryGroups); + WorkloadGroupMetadata that = (WorkloadGroupMetadata) o; + return Objects.equals(workloadGroups, that.workloadGroups); } @Override public int hashCode() { - return Objects.hash(queryGroups); + return Objects.hash(workloadGroups); } @Override @@ -146,21 +148,21 @@ public String toString() { } /** - * QueryGroupMetadataDiff + * WorkloadGroupMetadataDiff */ - static class QueryGroupMetadataDiff implements NamedDiff { - final Diff> dataStreamDiff; + static class WorkloadGroupMetadataDiff implements NamedDiff { + final Diff> dataStreamDiff; - QueryGroupMetadataDiff(final QueryGroupMetadata before, final QueryGroupMetadata after) { - dataStreamDiff = DiffableUtils.diff(before.queryGroups, after.queryGroups, DiffableUtils.getStringKeySerializer()); + WorkloadGroupMetadataDiff(final WorkloadGroupMetadata before, final WorkloadGroupMetadata after) { + dataStreamDiff = DiffableUtils.diff(before.workloadGroups, after.workloadGroups, DiffableUtils.getStringKeySerializer()); } - QueryGroupMetadataDiff(final StreamInput in) throws IOException { + WorkloadGroupMetadataDiff(final StreamInput in) throws IOException { this.dataStreamDiff = DiffableUtils.readJdkMapDiff( in, DiffableUtils.getStringKeySerializer(), - QueryGroup::new, - QueryGroup::readDiff + WorkloadGroup::new, + WorkloadGroup::readDiff ); } @@ -179,7 +181,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public Metadata.Custom apply(Metadata.Custom part) { - return new QueryGroupMetadata(new HashMap<>(dataStreamDiff.apply(((QueryGroupMetadata) part).queryGroups))); + return new WorkloadGroupMetadata(new HashMap<>(dataStreamDiff.apply(((WorkloadGroupMetadata) part).workloadGroups))); } } } diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 8037f90653d89..e7bbc3b2d5723 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -272,14 +272,14 @@ import org.opensearch.transport.client.node.NodeClient; import org.opensearch.usage.UsageService; import org.opensearch.watcher.ResourceWatcherService; -import org.opensearch.wlm.QueryGroupService; -import org.opensearch.wlm.QueryGroupsStateAccessor; +import org.opensearch.wlm.WorkloadGroupService; +import org.opensearch.wlm.WorkloadGroupsStateAccessor; import org.opensearch.wlm.WorkloadManagementSettings; import org.opensearch.wlm.WorkloadManagementTransportInterceptor; import org.opensearch.wlm.cancellation.MaximumResourceTaskSelectionStrategy; -import org.opensearch.wlm.cancellation.QueryGroupTaskCancellationService; -import org.opensearch.wlm.listeners.QueryGroupRequestOperationListener; -import org.opensearch.wlm.tracker.QueryGroupResourceUsageTrackerService; +import org.opensearch.wlm.cancellation.WorkloadGroupTaskCancellationService; +import org.opensearch.wlm.listeners.WorkloadGroupRequestOperationListener; +import org.opensearch.wlm.tracker.WorkloadGroupResourceUsageTrackerService; import javax.net.ssl.SNIHostName; @@ -1055,32 +1055,31 @@ protected Node( List identityAwarePlugins = pluginsService.filterPlugins(IdentityAwarePlugin.class); identityService.initializeIdentityAwarePlugins(identityAwarePlugins); - final QueryGroupResourceUsageTrackerService queryGroupResourceUsageTrackerService = new QueryGroupResourceUsageTrackerService( - taskResourceTrackingService - ); + final WorkloadGroupResourceUsageTrackerService workloadGroupResourceUsageTrackerService = + new WorkloadGroupResourceUsageTrackerService(taskResourceTrackingService); final WorkloadManagementSettings workloadManagementSettings = new WorkloadManagementSettings( settings, settingsModule.getClusterSettings() ); - final QueryGroupsStateAccessor queryGroupsStateAccessor = new QueryGroupsStateAccessor(); + final WorkloadGroupsStateAccessor workloadGroupsStateAccessor = new WorkloadGroupsStateAccessor(); - final QueryGroupService queryGroupService = new QueryGroupService( - new QueryGroupTaskCancellationService( + final WorkloadGroupService workloadGroupService = new WorkloadGroupService( + new WorkloadGroupTaskCancellationService( workloadManagementSettings, new MaximumResourceTaskSelectionStrategy(), - queryGroupResourceUsageTrackerService, - queryGroupsStateAccessor + workloadGroupResourceUsageTrackerService, + workloadGroupsStateAccessor ), clusterService, threadPool, workloadManagementSettings, - queryGroupsStateAccessor + workloadGroupsStateAccessor ); - taskResourceTrackingService.addTaskCompletionListener(queryGroupService); + taskResourceTrackingService.addTaskCompletionListener(workloadGroupService); - final QueryGroupRequestOperationListener queryGroupRequestOperationListener = new QueryGroupRequestOperationListener( - queryGroupService, + final WorkloadGroupRequestOperationListener workloadGroupRequestOperationListener = new WorkloadGroupRequestOperationListener( + workloadGroupService, threadPool ); @@ -1092,7 +1091,7 @@ protected Node( searchRequestStats, searchRequestSlowLog, searchTaskRequestOperationsListener, - queryGroupRequestOperationListener + workloadGroupRequestOperationListener ), pluginComponents.stream() .filter(p -> p instanceof SearchRequestOperationsListener) @@ -1144,7 +1143,7 @@ protected Node( WorkloadManagementTransportInterceptor workloadManagementTransportInterceptor = new WorkloadManagementTransportInterceptor( threadPool, - queryGroupService + workloadGroupService ); final Collection secureSettingsFactories = pluginsService.filterPlugins(Plugin.class) @@ -1243,7 +1242,7 @@ protected Node( taskResourceTrackingService, threadPool, transportService.getTaskManager(), - queryGroupService + workloadGroupService ); final SegmentReplicationStatsTracker segmentReplicationStatsTracker = new SegmentReplicationStatsTracker(indicesService); @@ -1455,7 +1454,7 @@ protected Node( b.bind(IndexingPressureService.class).toInstance(indexingPressureService); b.bind(TaskResourceTrackingService.class).toInstance(taskResourceTrackingService); b.bind(SearchBackpressureService.class).toInstance(searchBackpressureService); - b.bind(QueryGroupService.class).toInstance(queryGroupService); + b.bind(WorkloadGroupService.class).toInstance(workloadGroupService); b.bind(AdmissionControlService.class).toInstance(admissionControlService); b.bind(UsageService.class).toInstance(usageService); b.bind(AggregationUsageService.class).toInstance(searchModule.getValuesSourceRegistry().getUsageService()); @@ -1649,7 +1648,7 @@ public Node start() throws NodeValidationException { nodeService.getMonitorService().start(); nodeService.getSearchBackpressureService().start(); nodeService.getTaskCancellationMonitoringService().start(); - injector.getInstance(QueryGroupService.class).start(); + injector.getInstance(WorkloadGroupService.class).start(); final ClusterService clusterService = injector.getInstance(ClusterService.class); @@ -1823,7 +1822,7 @@ private Node stop() { injector.getInstance(FsHealthService.class).stop(); injector.getInstance(NodeResourceUsageTracker.class).stop(); injector.getInstance(ResourceUsageCollectorService.class).stop(); - injector.getInstance(QueryGroupService.class).stop(); + injector.getInstance(WorkloadGroupService.class).stop(); nodeService.getMonitorService().stop(); nodeService.getSearchBackpressureService().stop(); injector.getInstance(GatewayService.class).stop(); diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestWlmStatsAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestWlmStatsAction.java index b3f60d0ccc767..b0e52f1f2b29c 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestWlmStatsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestWlmStatsAction.java @@ -36,8 +36,8 @@ public List routes() { asList( new Route(GET, "_wlm/stats"), new Route(GET, "_wlm/{nodeId}/stats"), - new Route(GET, "_wlm/stats/{queryGroupId}"), - new Route(GET, "_wlm/{nodeId}/stats/{queryGroupId}") + new Route(GET, "_wlm/stats/{workloadGroupId}"), + new Route(GET, "_wlm/{nodeId}/stats/{workloadGroupId}") ) ); } @@ -50,9 +50,9 @@ public String getName() { @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { String[] nodesIds = Strings.splitStringByCommaToArray(request.param("nodeId")); - Set queryGroupIds = Strings.tokenizeByCommaToSet(request.param("queryGroupId", "_all")); + Set workloadGroupIds = Strings.tokenizeByCommaToSet(request.param("workloadGroupId", "_all")); Boolean breach = request.hasParam("breach") ? Boolean.parseBoolean(request.param("boolean")) : null; - WlmStatsRequest wlmStatsRequest = new WlmStatsRequest(nodesIds, queryGroupIds, breach); + WlmStatsRequest wlmStatsRequest = new WlmStatsRequest(nodesIds, workloadGroupIds, breach); return channel -> client.admin().cluster().wlmStats(wlmStatsRequest, new RestActions.NodesResponseRestListener<>(channel)); } } diff --git a/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureService.java b/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureService.java index d6eac45ca0288..6c12857477768 100644 --- a/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureService.java +++ b/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureService.java @@ -42,8 +42,8 @@ import org.opensearch.tasks.TaskResourceTrackingService.TaskCompletionListener; import org.opensearch.threadpool.Scheduler; import org.opensearch.threadpool.ThreadPool; -import org.opensearch.wlm.QueryGroupService; import org.opensearch.wlm.ResourceType; +import org.opensearch.wlm.WorkloadGroupService; import java.io.IOException; import java.util.ArrayList; @@ -87,14 +87,14 @@ public class SearchBackpressureService extends AbstractLifecycleComponent implem private final Map, SearchBackpressureState> searchBackpressureStates; private final TaskManager taskManager; - private final QueryGroupService queryGroupService; + private final WorkloadGroupService workloadGroupService; public SearchBackpressureService( SearchBackpressureSettings settings, TaskResourceTrackingService taskResourceTrackingService, ThreadPool threadPool, TaskManager taskManager, - QueryGroupService queryGroupService + WorkloadGroupService workloadGroupService ) { this(settings, taskResourceTrackingService, threadPool, System::nanoTime, new NodeDuressTrackers(new EnumMap<>(ResourceType.class) { { @@ -135,7 +135,7 @@ public SearchBackpressureService( SearchShardTaskSettings.SETTING_HEAP_MOVING_AVERAGE_WINDOW_SIZE ), taskManager, - queryGroupService + workloadGroupService ); } @@ -148,7 +148,7 @@ public SearchBackpressureService( TaskResourceUsageTrackers searchTaskTrackers, TaskResourceUsageTrackers searchShardTaskTrackers, TaskManager taskManager, - QueryGroupService queryGroupService + WorkloadGroupService workloadGroupService ) { this.settings = settings; this.taskResourceTrackingService = taskResourceTrackingService; @@ -156,7 +156,7 @@ public SearchBackpressureService( this.threadPool = threadPool; this.nodeDuressTrackers = nodeDuressTrackers; this.taskManager = taskManager; - this.queryGroupService = queryGroupService; + this.workloadGroupService = workloadGroupService; this.searchBackpressureStates = Map.of( SearchTask.class, @@ -352,7 +352,7 @@ List getTa .stream() .filter(type::isInstance) .map(type::cast) - .filter(queryGroupService::shouldSBPHandle) + .filter(workloadGroupService::shouldSBPHandle) .collect(Collectors.toUnmodifiableList()); } diff --git a/server/src/main/java/org/opensearch/transport/client/ClusterAdminClient.java b/server/src/main/java/org/opensearch/transport/client/ClusterAdminClient.java index 00b4fc1ed3867..4fc9b6ce83587 100644 --- a/server/src/main/java/org/opensearch/transport/client/ClusterAdminClient.java +++ b/server/src/main/java/org/opensearch/transport/client/ClusterAdminClient.java @@ -323,7 +323,7 @@ public interface ClusterAdminClient extends OpenSearchClient { NodesStatsRequestBuilder prepareNodesStats(String... nodesIds); /** - * QueryGroup stats of the cluster. + * WorkloadGroup stats of the cluster. * @param request The wlmStatsRequest * @param listener A listener to be notified with a result */ diff --git a/server/src/main/java/org/opensearch/wlm/MutableQueryGroupFragment.java b/server/src/main/java/org/opensearch/wlm/MutableWorkloadGroupFragment.java similarity index 93% rename from server/src/main/java/org/opensearch/wlm/MutableQueryGroupFragment.java rename to server/src/main/java/org/opensearch/wlm/MutableWorkloadGroupFragment.java index 8ea240132fea2..329659b891f9f 100644 --- a/server/src/main/java/org/opensearch/wlm/MutableQueryGroupFragment.java +++ b/server/src/main/java/org/opensearch/wlm/MutableWorkloadGroupFragment.java @@ -24,10 +24,10 @@ import java.util.function.Function; /** - * Class to hold the fields that can be updated in a QueryGroup. + * Class to hold the fields that can be updated in a WorkloadGroup. */ @ExperimentalApi -public class MutableQueryGroupFragment extends AbstractDiffable { +public class MutableWorkloadGroupFragment extends AbstractDiffable { public static final String RESILIENCY_MODE_STRING = "resiliency_mode"; public static final String RESOURCE_LIMITS_STRING = "resource_limits"; @@ -36,15 +36,15 @@ public class MutableQueryGroupFragment extends AbstractDiffable acceptedFieldNames = List.of(RESILIENCY_MODE_STRING, RESOURCE_LIMITS_STRING); - public MutableQueryGroupFragment() {} + public MutableWorkloadGroupFragment() {} - public MutableQueryGroupFragment(ResiliencyMode resiliencyMode, Map resourceLimits) { + public MutableWorkloadGroupFragment(ResiliencyMode resiliencyMode, Map resourceLimits) { validateResourceLimits(resourceLimits); this.resiliencyMode = resiliencyMode; this.resourceLimits = resourceLimits; } - public MutableQueryGroupFragment(StreamInput in) throws IOException { + public MutableWorkloadGroupFragment(StreamInput in) throws IOException { if (in.readBoolean()) { resourceLimits = in.readMap((i) -> ResourceType.fromName(i.readString()), StreamInput::readDouble); } else { @@ -166,7 +166,7 @@ public static void validateResourceLimits(Map resourceLimi public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - MutableQueryGroupFragment that = (MutableQueryGroupFragment) o; + MutableWorkloadGroupFragment that = (MutableWorkloadGroupFragment) o; return Objects.equals(resiliencyMode, that.resiliencyMode) && Objects.equals(resourceLimits, that.resourceLimits); } @@ -184,7 +184,7 @@ public Map getResourceLimits() { } /** - * This enum models the different QueryGroup resiliency modes + * This enum models the different WorkloadGroup resiliency modes * SOFT - means that this query group can consume more than query group resource limits if node is not in duress * ENFORCED - means that it will never breach the assigned limits and will cancel as soon as the limits are breached * MONITOR - it will not cause any cancellation but just log the eligible task cancellations @@ -210,7 +210,7 @@ public static ResiliencyMode fromName(String s) { if (mode.getName().equalsIgnoreCase(s)) return mode; } - throw new IllegalArgumentException("Invalid value for QueryGroupMode: " + s); + throw new IllegalArgumentException("Invalid value for WorkloadGroupMode: " + s); } } diff --git a/server/src/main/java/org/opensearch/wlm/QueryGroupsStateAccessor.java b/server/src/main/java/org/opensearch/wlm/QueryGroupsStateAccessor.java deleted file mode 100644 index 7f93e41f12092..0000000000000 --- a/server/src/main/java/org/opensearch/wlm/QueryGroupsStateAccessor.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.wlm; - -import org.opensearch.wlm.stats.QueryGroupState; - -import java.util.HashMap; -import java.util.Map; - -/** - * This class is used to decouple {@link QueryGroupService} and {@link org.opensearch.wlm.cancellation.QueryGroupTaskCancellationService} to share the - * {@link QueryGroupState}s - */ -public class QueryGroupsStateAccessor { - // This map does not need to be concurrent since we will process the cluster state change serially and update - // this map with new additions and deletions of entries. QueryGroupState is thread safe - private final Map queryGroupStateMap; - - public QueryGroupsStateAccessor() { - this(new HashMap<>()); - } - - public QueryGroupsStateAccessor(Map queryGroupStateMap) { - this.queryGroupStateMap = queryGroupStateMap; - } - - /** - * returns the query groups state - */ - public Map getQueryGroupStateMap() { - return queryGroupStateMap; - } - - /** - * return QueryGroupState for the given queryGroupId - * @param queryGroupId - * @return QueryGroupState for the given queryGroupId, if id is invalid return default query group state - */ - public QueryGroupState getQueryGroupState(String queryGroupId) { - return queryGroupStateMap.getOrDefault(queryGroupId, queryGroupStateMap.get(QueryGroupTask.DEFAULT_QUERY_GROUP_ID_SUPPLIER.get())); - } - - /** - * adds new QueryGroupState against given queryGroupId - * @param queryGroupId - */ - public void addNewQueryGroup(String queryGroupId) { - this.queryGroupStateMap.putIfAbsent(queryGroupId, new QueryGroupState()); - } - - /** - * removes QueryGroupState against given queryGroupId - * @param queryGroupId - */ - public void removeQueryGroup(String queryGroupId) { - this.queryGroupStateMap.remove(queryGroupId); - } -} diff --git a/server/src/main/java/org/opensearch/wlm/QueryGroupLevelResourceUsageView.java b/server/src/main/java/org/opensearch/wlm/WorkloadGroupLevelResourceUsageView.java similarity index 59% rename from server/src/main/java/org/opensearch/wlm/QueryGroupLevelResourceUsageView.java rename to server/src/main/java/org/opensearch/wlm/WorkloadGroupLevelResourceUsageView.java index de213eaab64a8..01abac2ffa089 100644 --- a/server/src/main/java/org/opensearch/wlm/QueryGroupLevelResourceUsageView.java +++ b/server/src/main/java/org/opensearch/wlm/WorkloadGroupLevelResourceUsageView.java @@ -12,17 +12,17 @@ import java.util.Map; /** - * Represents the point in time view of resource usage of a QueryGroup and - * has a 1:1 relation with a QueryGroup. + * Represents the point in time view of resource usage of a WorkloadGroup and + * has a 1:1 relation with a WorkloadGroup. * This class holds the resource usage data and the list of active tasks. */ -public class QueryGroupLevelResourceUsageView { - // resourceUsage holds the resource usage data for a QueryGroup at a point in time +public class WorkloadGroupLevelResourceUsageView { + // resourceUsage holds the resource usage data for a WorkloadGroup at a point in time private final Map resourceUsage; - // activeTasks holds the list of active tasks for a QueryGroup at a point in time - private final List activeTasks; + // activeTasks holds the list of active tasks for a WorkloadGroup at a point in time + private final List activeTasks; - public QueryGroupLevelResourceUsageView(Map resourceUsage, List activeTasks) { + public WorkloadGroupLevelResourceUsageView(Map resourceUsage, List activeTasks) { this.resourceUsage = resourceUsage; this.activeTasks = activeTasks; } @@ -41,7 +41,7 @@ public Map getResourceUsageData() { * * @return The list of active tasks */ - public List getActiveTasks() { + public List getActiveTasks() { return activeTasks; } } diff --git a/server/src/main/java/org/opensearch/wlm/QueryGroupService.java b/server/src/main/java/org/opensearch/wlm/WorkloadGroupService.java similarity index 51% rename from server/src/main/java/org/opensearch/wlm/QueryGroupService.java rename to server/src/main/java/org/opensearch/wlm/WorkloadGroupService.java index 4451b3e7b62f4..970844a2b59b0 100644 --- a/server/src/main/java/org/opensearch/wlm/QueryGroupService.java +++ b/server/src/main/java/org/opensearch/wlm/WorkloadGroupService.java @@ -14,7 +14,7 @@ import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterStateListener; import org.opensearch.cluster.metadata.Metadata; -import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.cluster.metadata.WorkloadGroup; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; @@ -26,10 +26,10 @@ import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.threadpool.Scheduler; import org.opensearch.threadpool.ThreadPool; -import org.opensearch.wlm.cancellation.QueryGroupTaskCancellationService; -import org.opensearch.wlm.stats.QueryGroupState; -import org.opensearch.wlm.stats.QueryGroupStats; -import org.opensearch.wlm.stats.QueryGroupStats.QueryGroupStatsHolder; +import org.opensearch.wlm.cancellation.WorkloadGroupTaskCancellationService; +import org.opensearch.wlm.stats.WorkloadGroupState; +import org.opensearch.wlm.stats.WorkloadGroupStats; +import org.opensearch.wlm.stats.WorkloadGroupStats.WorkloadGroupStatsHolder; import java.io.IOException; import java.util.HashMap; @@ -38,34 +38,34 @@ import java.util.Optional; import java.util.Set; -import static org.opensearch.wlm.tracker.QueryGroupResourceUsageTrackerService.TRACKED_RESOURCES; +import static org.opensearch.wlm.tracker.WorkloadGroupResourceUsageTrackerService.TRACKED_RESOURCES; /** - * As of now this is a stub and main implementation PR will be raised soon.Coming PR will collate these changes with core QueryGroupService changes + * As of now this is a stub and main implementation PR will be raised soon.Coming PR will collate these changes with core WorkloadGroupService changes * @opensearch.experimental */ -public class QueryGroupService extends AbstractLifecycleComponent +public class WorkloadGroupService extends AbstractLifecycleComponent implements ClusterStateListener, TaskResourceTrackingService.TaskCompletionListener { - private static final Logger logger = LogManager.getLogger(QueryGroupService.class); - private final QueryGroupTaskCancellationService taskCancellationService; + private static final Logger logger = LogManager.getLogger(WorkloadGroupService.class); + private final WorkloadGroupTaskCancellationService taskCancellationService; private volatile Scheduler.Cancellable scheduledFuture; private final ThreadPool threadPool; private final ClusterService clusterService; private final WorkloadManagementSettings workloadManagementSettings; - private Set activeQueryGroups; - private final Set deletedQueryGroups; + private Set activeWorkloadGroups; + private final Set deletedWorkloadGroups; private final NodeDuressTrackers nodeDuressTrackers; - private final QueryGroupsStateAccessor queryGroupsStateAccessor; + private final WorkloadGroupsStateAccessor workloadGroupsStateAccessor; - public QueryGroupService( - QueryGroupTaskCancellationService taskCancellationService, + public WorkloadGroupService( + WorkloadGroupTaskCancellationService taskCancellationService, ClusterService clusterService, ThreadPool threadPool, WorkloadManagementSettings workloadManagementSettings, - QueryGroupsStateAccessor queryGroupsStateAccessor + WorkloadGroupsStateAccessor workloadGroupsStateAccessor ) { this( @@ -90,32 +90,32 @@ public QueryGroupService( ) ) ), - queryGroupsStateAccessor, + workloadGroupsStateAccessor, new HashSet<>(), new HashSet<>() ); } - public QueryGroupService( - QueryGroupTaskCancellationService taskCancellationService, + public WorkloadGroupService( + WorkloadGroupTaskCancellationService taskCancellationService, ClusterService clusterService, ThreadPool threadPool, WorkloadManagementSettings workloadManagementSettings, NodeDuressTrackers nodeDuressTrackers, - QueryGroupsStateAccessor queryGroupsStateAccessor, - Set activeQueryGroups, - Set deletedQueryGroups + WorkloadGroupsStateAccessor workloadGroupsStateAccessor, + Set activeWorkloadGroups, + Set deletedWorkloadGroups ) { this.taskCancellationService = taskCancellationService; this.clusterService = clusterService; this.threadPool = threadPool; this.workloadManagementSettings = workloadManagementSettings; this.nodeDuressTrackers = nodeDuressTrackers; - this.activeQueryGroups = activeQueryGroups; - this.deletedQueryGroups = deletedQueryGroups; - this.queryGroupsStateAccessor = queryGroupsStateAccessor; - activeQueryGroups.forEach(queryGroup -> this.queryGroupsStateAccessor.addNewQueryGroup(queryGroup.get_id())); - this.queryGroupsStateAccessor.addNewQueryGroup(QueryGroupTask.DEFAULT_QUERY_GROUP_ID_SUPPLIER.get()); + this.activeWorkloadGroups = activeWorkloadGroups; + this.deletedWorkloadGroups = deletedWorkloadGroups; + this.workloadGroupsStateAccessor = workloadGroupsStateAccessor; + activeWorkloadGroups.forEach(workloadGroup -> this.workloadGroupsStateAccessor.addNewWorkloadGroup(workloadGroup.get_id())); + this.workloadGroupsStateAccessor.addNewWorkloadGroup(WorkloadGroupTask.DEFAULT_WORKLOAD_GROUP_ID_SUPPLIER.get()); this.clusterService.addListener(this); } @@ -126,8 +126,8 @@ void doRun() { if (workloadManagementSettings.getWlmMode() == WlmMode.DISABLED) { return; } - taskCancellationService.cancelTasks(nodeDuressTrackers::isNodeInDuress, activeQueryGroups, deletedQueryGroups); - taskCancellationService.pruneDeletedQueryGroups(deletedQueryGroups); + taskCancellationService.cancelTasks(nodeDuressTrackers::isNodeInDuress, activeWorkloadGroups, deletedWorkloadGroups); + taskCancellationService.pruneDeletedWorkloadGroups(deletedWorkloadGroups); } /** @@ -141,7 +141,7 @@ protected void doStart() { } catch (Exception e) { logger.debug("Exception occurred in Query Sandbox service", e); } - }, this.workloadManagementSettings.getQueryGroupServiceRunInterval(), ThreadPool.Names.GENERIC); + }, this.workloadManagementSettings.getWorkloadGroupServiceRunInterval(), ThreadPool.Names.GENERIC); } @Override @@ -161,85 +161,85 @@ public void clusterChanged(ClusterChangedEvent event) { Metadata currentMetadata = event.state().metadata(); // Extract the query groups from both the current and previous cluster states - Map previousQueryGroups = previousMetadata.queryGroups(); - Map currentQueryGroups = currentMetadata.queryGroups(); + Map previousWorkloadGroups = previousMetadata.workloadGroups(); + Map currentWorkloadGroups = currentMetadata.workloadGroups(); // Detect new query groups added in the current cluster state - for (String queryGroupName : currentQueryGroups.keySet()) { - if (!previousQueryGroups.containsKey(queryGroupName)) { + for (String workloadGroupName : currentWorkloadGroups.keySet()) { + if (!previousWorkloadGroups.containsKey(workloadGroupName)) { // New query group detected - QueryGroup newQueryGroup = currentQueryGroups.get(queryGroupName); + WorkloadGroup newWorkloadGroup = currentWorkloadGroups.get(workloadGroupName); // Perform any necessary actions with the new query group - queryGroupsStateAccessor.addNewQueryGroup(newQueryGroup.get_id()); + workloadGroupsStateAccessor.addNewWorkloadGroup(newWorkloadGroup.get_id()); } } // Detect query groups deleted in the current cluster state - for (String queryGroupName : previousQueryGroups.keySet()) { - if (!currentQueryGroups.containsKey(queryGroupName)) { + for (String workloadGroupName : previousWorkloadGroups.keySet()) { + if (!currentWorkloadGroups.containsKey(workloadGroupName)) { // Query group deleted - QueryGroup deletedQueryGroup = previousQueryGroups.get(queryGroupName); + WorkloadGroup deletedWorkloadGroup = previousWorkloadGroups.get(workloadGroupName); // Perform any necessary actions with the deleted query group - this.deletedQueryGroups.add(deletedQueryGroup); - queryGroupsStateAccessor.removeQueryGroup(deletedQueryGroup.get_id()); + this.deletedWorkloadGroups.add(deletedWorkloadGroup); + workloadGroupsStateAccessor.removeWorkloadGroup(deletedWorkloadGroup.get_id()); } } - this.activeQueryGroups = new HashSet<>(currentMetadata.queryGroups().values()); + this.activeWorkloadGroups = new HashSet<>(currentMetadata.workloadGroups().values()); } /** * updates the failure stats for the query group * - * @param queryGroupId query group identifier + * @param workloadGroupId query group identifier */ - public void incrementFailuresFor(final String queryGroupId) { - QueryGroupState queryGroupState = queryGroupsStateAccessor.getQueryGroupState(queryGroupId); + public void incrementFailuresFor(final String workloadGroupId) { + WorkloadGroupState workloadGroupState = workloadGroupsStateAccessor.getWorkloadGroupState(workloadGroupId); // This can happen if the request failed for a deleted query group - // or new queryGroup is being created and has not been acknowledged yet - if (queryGroupState == null) { + // or new workloadGroup is being created and has not been acknowledged yet + if (workloadGroupState == null) { return; } - queryGroupState.failures.inc(); + workloadGroupState.failures.inc(); } /** * @return node level query group stats */ - public QueryGroupStats nodeStats(Set queryGroupIds, Boolean requestedBreached) { - final Map statsHolderMap = new HashMap<>(); - Map existingStateMap = queryGroupsStateAccessor.getQueryGroupStateMap(); - if (!queryGroupIds.contains("_all")) { - for (String id : queryGroupIds) { + public WorkloadGroupStats nodeStats(Set workloadGroupIds, Boolean requestedBreached) { + final Map statsHolderMap = new HashMap<>(); + Map existingStateMap = workloadGroupsStateAccessor.getWorkloadGroupStateMap(); + if (!workloadGroupIds.contains("_all")) { + for (String id : workloadGroupIds) { if (!existingStateMap.containsKey(id)) { - throw new ResourceNotFoundException("QueryGroup with id " + id + " does not exist"); + throw new ResourceNotFoundException("WorkloadGroup with id " + id + " does not exist"); } } } if (existingStateMap != null) { - existingStateMap.forEach((queryGroupId, currentState) -> { - boolean shouldInclude = queryGroupIds.contains("_all") || queryGroupIds.contains(queryGroupId); + existingStateMap.forEach((workloadGroupId, currentState) -> { + boolean shouldInclude = workloadGroupIds.contains("_all") || workloadGroupIds.contains(workloadGroupId); if (shouldInclude) { - if (requestedBreached == null || requestedBreached == resourceLimitBreached(queryGroupId, currentState)) { - statsHolderMap.put(queryGroupId, QueryGroupStatsHolder.from(currentState)); + if (requestedBreached == null || requestedBreached == resourceLimitBreached(workloadGroupId, currentState)) { + statsHolderMap.put(workloadGroupId, WorkloadGroupStatsHolder.from(currentState)); } } }); } - return new QueryGroupStats(statsHolderMap); + return new WorkloadGroupStats(statsHolderMap); } /** - * @return if the QueryGroup breaches any resource limit based on the LastRecordedUsage + * @return if the WorkloadGroup breaches any resource limit based on the LastRecordedUsage */ - public boolean resourceLimitBreached(String id, QueryGroupState currentState) { - QueryGroup queryGroup = clusterService.state().metadata().queryGroups().get(id); - if (queryGroup == null) { - throw new ResourceNotFoundException("QueryGroup with id " + id + " does not exist"); + public boolean resourceLimitBreached(String id, WorkloadGroupState currentState) { + WorkloadGroup workloadGroup = clusterService.state().metadata().workloadGroups().get(id); + if (workloadGroup == null) { + throw new ResourceNotFoundException("WorkloadGroup with id " + id + " does not exist"); } for (ResourceType resourceType : TRACKED_RESOURCES) { - if (queryGroup.getResourceLimits().containsKey(resourceType)) { - final double threshold = getNormalisedRejectionThreshold(queryGroup.getResourceLimits().get(resourceType), resourceType); + if (workloadGroup.getResourceLimits().containsKey(resourceType)) { + final double threshold = getNormalisedRejectionThreshold(workloadGroup.getResourceLimits().get(resourceType), resourceType); final double lastRecordedUsage = currentState.getResourceState().get(resourceType).getLastRecordedUsage(); if (threshold < lastRecordedUsage) { return true; @@ -250,57 +250,59 @@ public boolean resourceLimitBreached(String id, QueryGroupState currentState) { } /** - * @param queryGroupId query group identifier + * @param workloadGroupId query group identifier */ - public void rejectIfNeeded(String queryGroupId) { + public void rejectIfNeeded(String workloadGroupId) { if (workloadManagementSettings.getWlmMode() != WlmMode.ENABLED) { return; } - if (queryGroupId == null || queryGroupId.equals(QueryGroupTask.DEFAULT_QUERY_GROUP_ID_SUPPLIER.get())) return; - QueryGroupState queryGroupState = queryGroupsStateAccessor.getQueryGroupState(queryGroupId); + if (workloadGroupId == null || workloadGroupId.equals(WorkloadGroupTask.DEFAULT_WORKLOAD_GROUP_ID_SUPPLIER.get())) return; + WorkloadGroupState workloadGroupState = workloadGroupsStateAccessor.getWorkloadGroupState(workloadGroupId); // This can happen if the request failed for a deleted query group - // or new queryGroup is being created and has not been acknowledged yet or invalid query group id - if (queryGroupState == null) { + // or new workloadGroup is being created and has not been acknowledged yet or invalid query group id + if (workloadGroupState == null) { return; } - // rejections will not happen for SOFT mode QueryGroups unless node is in duress - Optional optionalQueryGroup = activeQueryGroups.stream().filter(x -> x.get_id().equals(queryGroupId)).findFirst(); + // rejections will not happen for SOFT mode WorkloadGroups unless node is in duress + Optional optionalWorkloadGroup = activeWorkloadGroups.stream() + .filter(x -> x.get_id().equals(workloadGroupId)) + .findFirst(); - if (optionalQueryGroup.isPresent() - && (optionalQueryGroup.get().getResiliencyMode() == MutableQueryGroupFragment.ResiliencyMode.SOFT + if (optionalWorkloadGroup.isPresent() + && (optionalWorkloadGroup.get().getResiliencyMode() == MutableWorkloadGroupFragment.ResiliencyMode.SOFT && !nodeDuressTrackers.isNodeInDuress())) return; - optionalQueryGroup.ifPresent(queryGroup -> { + optionalWorkloadGroup.ifPresent(workloadGroup -> { boolean reject = false; final StringBuilder reason = new StringBuilder(); for (ResourceType resourceType : TRACKED_RESOURCES) { - if (queryGroup.getResourceLimits().containsKey(resourceType)) { + if (workloadGroup.getResourceLimits().containsKey(resourceType)) { final double threshold = getNormalisedRejectionThreshold( - queryGroup.getResourceLimits().get(resourceType), + workloadGroup.getResourceLimits().get(resourceType), resourceType ); - final double lastRecordedUsage = queryGroupState.getResourceState().get(resourceType).getLastRecordedUsage(); + final double lastRecordedUsage = workloadGroupState.getResourceState().get(resourceType).getLastRecordedUsage(); if (threshold < lastRecordedUsage) { reject = true; reason.append(resourceType) - .append(" limit is breaching for ENFORCED type QueryGroup: (") + .append(" limit is breaching for ENFORCED type WorkloadGroup: (") .append(threshold) .append(" < ") .append(lastRecordedUsage) .append("). "); - queryGroupState.getResourceState().get(resourceType).rejections.inc(); + workloadGroupState.getResourceState().get(resourceType).rejections.inc(); // should not double count even if both the resource limits are breaching break; } } } if (reject) { - queryGroupState.totalRejections.inc(); + workloadGroupState.totalRejections.inc(); throw new OpenSearchRejectedExecutionException( - "QueryGroup " + queryGroupId + " is already contended. " + reason.toString() + "WorkloadGroup " + workloadGroupId + " is already contended. " + reason.toString() ); } }); @@ -315,45 +317,45 @@ private double getNormalisedRejectionThreshold(double limit, ResourceType resour throw new IllegalArgumentException(resourceType + " is not supported in WLM yet"); } - public Set getActiveQueryGroups() { - return activeQueryGroups; + public Set getActiveWorkloadGroups() { + return activeWorkloadGroups; } - public Set getDeletedQueryGroups() { - return deletedQueryGroups; + public Set getDeletedWorkloadGroups() { + return deletedWorkloadGroups; } /** * This method determines whether the task should be accounted by SBP if both features co-exist - * @param t QueryGroupTask + * @param t WorkloadGroupTask * @return whether or not SBP handle it */ public boolean shouldSBPHandle(Task t) { - QueryGroupTask task = (QueryGroupTask) t; - boolean isInvalidQueryGroupTask = true; - if (task.isQueryGroupSet() && !QueryGroupTask.DEFAULT_QUERY_GROUP_ID_SUPPLIER.get().equals(task.getQueryGroupId())) { - isInvalidQueryGroupTask = activeQueryGroups.stream() - .noneMatch(queryGroup -> queryGroup.get_id().equals(task.getQueryGroupId())); + WorkloadGroupTask task = (WorkloadGroupTask) t; + boolean isInvalidWorkloadGroupTask = true; + if (task.isWorkloadGroupSet() && !WorkloadGroupTask.DEFAULT_WORKLOAD_GROUP_ID_SUPPLIER.get().equals(task.getWorkloadGroupId())) { + isInvalidWorkloadGroupTask = activeWorkloadGroups.stream() + .noneMatch(workloadGroup -> workloadGroup.get_id().equals(task.getWorkloadGroupId())); } - return workloadManagementSettings.getWlmMode() != WlmMode.ENABLED || isInvalidQueryGroupTask; + return workloadManagementSettings.getWlmMode() != WlmMode.ENABLED || isInvalidWorkloadGroupTask; } @Override public void onTaskCompleted(Task task) { - if (!(task instanceof QueryGroupTask) || !((QueryGroupTask) task).isQueryGroupSet()) { + if (!(task instanceof WorkloadGroupTask) || !((WorkloadGroupTask) task).isWorkloadGroupSet()) { return; } - final QueryGroupTask queryGroupTask = (QueryGroupTask) task; - String queryGroupId = queryGroupTask.getQueryGroupId(); + final WorkloadGroupTask workloadGroupTask = (WorkloadGroupTask) task; + String workloadGroupId = workloadGroupTask.getWorkloadGroupId(); - // set the default queryGroupId if not existing in the active query groups - String finalQueryGroupId = queryGroupId; - boolean exists = activeQueryGroups.stream().anyMatch(queryGroup -> queryGroup.get_id().equals(finalQueryGroupId)); + // set the default workloadGroupId if not existing in the active query groups + String finalWorkloadGroupId = workloadGroupId; + boolean exists = activeWorkloadGroups.stream().anyMatch(workloadGroup -> workloadGroup.get_id().equals(finalWorkloadGroupId)); if (!exists) { - queryGroupId = QueryGroupTask.DEFAULT_QUERY_GROUP_ID_SUPPLIER.get(); + workloadGroupId = WorkloadGroupTask.DEFAULT_WORKLOAD_GROUP_ID_SUPPLIER.get(); } - queryGroupsStateAccessor.getQueryGroupState(queryGroupId).totalCompletions.inc(); + workloadGroupsStateAccessor.getWorkloadGroupState(workloadGroupId).totalCompletions.inc(); } } diff --git a/server/src/main/java/org/opensearch/wlm/QueryGroupTask.java b/server/src/main/java/org/opensearch/wlm/WorkloadGroupTask.java similarity index 54% rename from server/src/main/java/org/opensearch/wlm/QueryGroupTask.java rename to server/src/main/java/org/opensearch/wlm/WorkloadGroupTask.java index c6b7fee3b04c0..636e9178775f9 100644 --- a/server/src/main/java/org/opensearch/wlm/QueryGroupTask.java +++ b/server/src/main/java/org/opensearch/wlm/WorkloadGroupTask.java @@ -23,23 +23,23 @@ import static org.opensearch.search.SearchService.NO_TIMEOUT; /** - * Base class to define QueryGroup tasks + * Base class to define WorkloadGroup tasks */ @PublicApi(since = "2.18.0") -public class QueryGroupTask extends CancellableTask { +public class WorkloadGroupTask extends CancellableTask { - private static final Logger logger = LogManager.getLogger(QueryGroupTask.class); - public static final String QUERY_GROUP_ID_HEADER = "queryGroupId"; - public static final Supplier DEFAULT_QUERY_GROUP_ID_SUPPLIER = () -> "DEFAULT_QUERY_GROUP"; + private static final Logger logger = LogManager.getLogger(WorkloadGroupTask.class); + public static final String WORKLOAD_GROUP_ID_HEADER = "workloadGroupId"; + public static final Supplier DEFAULT_WORKLOAD_GROUP_ID_SUPPLIER = () -> "DEFAULT_WORKLOAD_GROUP"; private final LongSupplier nanoTimeSupplier; - private String queryGroupId; - private boolean isQueryGroupSet = false; + private String workloadGroupId; + private boolean isWorkloadGroupSet = false; - public QueryGroupTask(long id, String type, String action, String description, TaskId parentTaskId, Map headers) { + public WorkloadGroupTask(long id, String type, String action, String description, TaskId parentTaskId, Map headers) { this(id, type, action, description, parentTaskId, headers, NO_TIMEOUT, System::nanoTime); } - public QueryGroupTask( + public WorkloadGroupTask( long id, String type, String action, @@ -51,7 +51,7 @@ public QueryGroupTask( this(id, type, action, description, parentTaskId, headers, cancelAfterTimeInterval, System::nanoTime); } - public QueryGroupTask( + public WorkloadGroupTask( long id, String type, String action, @@ -66,27 +66,27 @@ public QueryGroupTask( } /** - * This method should always be called after calling setQueryGroupId at least once on this object - * @return task queryGroupId + * This method should always be called after calling setWorkloadGroupId at least once on this object + * @return task workloadGroupId */ - public final String getQueryGroupId() { - if (queryGroupId == null) { - logger.warn("QueryGroup _id can't be null, It should be set before accessing it. This is abnormal behaviour "); + public final String getWorkloadGroupId() { + if (workloadGroupId == null) { + logger.warn("WorkloadGroup _id can't be null, It should be set before accessing it. This is abnormal behaviour "); } - return queryGroupId; + return workloadGroupId; } /** - * sets the queryGroupId from threadContext into the task itself, - * This method was defined since the queryGroupId can only be evaluated after task creation + * sets the workloadGroupId from threadContext into the task itself, + * This method was defined since the workloadGroupId can only be evaluated after task creation * @param threadContext current threadContext */ - public final void setQueryGroupId(final ThreadContext threadContext) { - isQueryGroupSet = true; - if (threadContext != null && threadContext.getHeader(QUERY_GROUP_ID_HEADER) != null) { - this.queryGroupId = threadContext.getHeader(QUERY_GROUP_ID_HEADER); + public final void setWorkloadGroupId(final ThreadContext threadContext) { + isWorkloadGroupSet = true; + if (threadContext != null && threadContext.getHeader(WORKLOAD_GROUP_ID_HEADER) != null) { + this.workloadGroupId = threadContext.getHeader(WORKLOAD_GROUP_ID_HEADER); } else { - this.queryGroupId = DEFAULT_QUERY_GROUP_ID_SUPPLIER.get(); + this.workloadGroupId = DEFAULT_WORKLOAD_GROUP_ID_SUPPLIER.get(); } } @@ -94,8 +94,8 @@ public long getElapsedTime() { return nanoTimeSupplier.getAsLong() - getStartTimeNanos(); } - public boolean isQueryGroupSet() { - return isQueryGroupSet; + public boolean isWorkloadGroupSet() { + return isWorkloadGroupSet; } @Override diff --git a/server/src/main/java/org/opensearch/wlm/QueryGroupThreadContextStatePropagator.java b/server/src/main/java/org/opensearch/wlm/WorkloadGroupThreadContextStatePropagator.java similarity index 80% rename from server/src/main/java/org/opensearch/wlm/QueryGroupThreadContextStatePropagator.java rename to server/src/main/java/org/opensearch/wlm/WorkloadGroupThreadContextStatePropagator.java index 06d223907082e..d5055c5cc4d5c 100644 --- a/server/src/main/java/org/opensearch/wlm/QueryGroupThreadContextStatePropagator.java +++ b/server/src/main/java/org/opensearch/wlm/WorkloadGroupThreadContextStatePropagator.java @@ -15,11 +15,11 @@ import java.util.Map; /** - * This class is used to propagate QueryGroup related headers to request and nodes + * This class is used to propagate WorkloadGroup related headers to request and nodes */ -public class QueryGroupThreadContextStatePropagator implements ThreadContextStatePropagator { - // TODO: move this constant to QueryGroupService class once the QueryGroup monitoring framework PR is ready - public static List PROPAGATED_HEADERS = List.of("queryGroupId"); +public class WorkloadGroupThreadContextStatePropagator implements ThreadContextStatePropagator { + + public static List PROPAGATED_HEADERS = List.of(WorkloadGroupTask.WORKLOAD_GROUP_ID_HEADER); /** * @param source current context transient headers diff --git a/server/src/main/java/org/opensearch/wlm/WorkloadGroupsStateAccessor.java b/server/src/main/java/org/opensearch/wlm/WorkloadGroupsStateAccessor.java new file mode 100644 index 0000000000000..103871d1c7fe0 --- /dev/null +++ b/server/src/main/java/org/opensearch/wlm/WorkloadGroupsStateAccessor.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.wlm; + +import org.opensearch.wlm.stats.WorkloadGroupState; + +import java.util.HashMap; +import java.util.Map; + +/** + * This class is used to decouple {@link WorkloadGroupService} and {@link org.opensearch.wlm.cancellation.WorkloadGroupTaskCancellationService} to share the + * {@link WorkloadGroupState}s + */ +public class WorkloadGroupsStateAccessor { + // This map does not need to be concurrent since we will process the cluster state change serially and update + // this map with new additions and deletions of entries. WorkloadGroupState is thread safe + private final Map workloadGroupStateMap; + + public WorkloadGroupsStateAccessor() { + this(new HashMap<>()); + } + + public WorkloadGroupsStateAccessor(Map workloadGroupStateMap) { + this.workloadGroupStateMap = workloadGroupStateMap; + } + + /** + * returns the query groups state + */ + public Map getWorkloadGroupStateMap() { + return workloadGroupStateMap; + } + + /** + * return WorkloadGroupState for the given workloadGroupId + * @param workloadGroupId + * @return WorkloadGroupState for the given workloadGroupId, if id is invalid return default query group state + */ + public WorkloadGroupState getWorkloadGroupState(String workloadGroupId) { + return workloadGroupStateMap.getOrDefault( + workloadGroupId, + workloadGroupStateMap.get(WorkloadGroupTask.DEFAULT_WORKLOAD_GROUP_ID_SUPPLIER.get()) + ); + } + + /** + * adds new WorkloadGroupState against given workloadGroupId + * @param workloadGroupId + */ + public void addNewWorkloadGroup(String workloadGroupId) { + this.workloadGroupStateMap.putIfAbsent(workloadGroupId, new WorkloadGroupState()); + } + + /** + * removes WorkloadGroupState against given workloadGroupId + * @param workloadGroupId + */ + public void removeWorkloadGroup(String workloadGroupId) { + this.workloadGroupStateMap.remove(workloadGroupId); + } +} diff --git a/server/src/main/java/org/opensearch/wlm/WorkloadManagementSettings.java b/server/src/main/java/org/opensearch/wlm/WorkloadManagementSettings.java index af25eedd7eed5..35a043a6e42d8 100644 --- a/server/src/main/java/org/opensearch/wlm/WorkloadManagementSettings.java +++ b/server/src/main/java/org/opensearch/wlm/WorkloadManagementSettings.java @@ -36,9 +36,9 @@ public class WorkloadManagementSettings { private Double nodeLevelCpuRejectionThreshold; /** - * Setting name for QueryGroupService node duress streak + * Setting name for WorkloadGroupService node duress streak */ - public static final String QUERYGROUP_DURESS_STREAK_SETTING_NAME = "wlm.query_group.duress_streak"; + public static final String QUERYGROUP_DURESS_STREAK_SETTING_NAME = "wlm.workload_group.duress_streak"; private int duressStreak; public static final Setting QUERYGROUP_SERVICE_DURESS_STREAK_SETTING = Setting.intSetting( QUERYGROUP_DURESS_STREAK_SETTING_NAME, @@ -51,9 +51,9 @@ public class WorkloadManagementSettings { /** * Setting name for Query Group Service run interval */ - public static final String QUERYGROUP_ENFORCEMENT_INTERVAL_SETTING_NAME = "wlm.query_group.enforcement_interval"; + public static final String QUERYGROUP_ENFORCEMENT_INTERVAL_SETTING_NAME = "wlm.workload_group.enforcement_interval"; - private TimeValue queryGroupServiceRunInterval; + private TimeValue workloadGroupServiceRunInterval; /** * Setting to control the run interval of Query Group Service */ @@ -68,7 +68,7 @@ public class WorkloadManagementSettings { /** * WLM mode setting name */ - public static final String WLM_MODE_SETTING_NAME = "wlm.query_group.mode"; + public static final String WLM_MODE_SETTING_NAME = "wlm.workload_group.mode"; private volatile WlmMode wlmMode; @@ -84,9 +84,9 @@ public class WorkloadManagementSettings { ); /** - * Setting name for node level memory based rejection threshold for QueryGroup service + * Setting name for node level memory based rejection threshold for WorkloadGroup service */ - public static final String NODE_MEMORY_REJECTION_THRESHOLD_SETTING_NAME = "wlm.query_group.node.memory_rejection_threshold"; + public static final String NODE_MEMORY_REJECTION_THRESHOLD_SETTING_NAME = "wlm.workload_group.node.memory_rejection_threshold"; /** * Setting to control the memory based rejection threshold */ @@ -97,9 +97,9 @@ public class WorkloadManagementSettings { Setting.Property.NodeScope ); /** - * Setting name for node level cpu based rejection threshold for QueryGroup service + * Setting name for node level cpu based rejection threshold for WorkloadGroup service */ - public static final String NODE_CPU_REJECTION_THRESHOLD_SETTING_NAME = "wlm.query_group.node.cpu_rejection_threshold"; + public static final String NODE_CPU_REJECTION_THRESHOLD_SETTING_NAME = "wlm.workload_group.node.cpu_rejection_threshold"; /** * Setting to control the cpu based rejection threshold */ @@ -110,9 +110,9 @@ public class WorkloadManagementSettings { Setting.Property.NodeScope ); /** - * Setting name for node level memory based cancellation threshold for QueryGroup service + * Setting name for node level memory based cancellation threshold for WorkloadGroup service */ - public static final String NODE_MEMORY_CANCELLATION_THRESHOLD_SETTING_NAME = "wlm.query_group.node.memory_cancellation_threshold"; + public static final String NODE_MEMORY_CANCELLATION_THRESHOLD_SETTING_NAME = "wlm.workload_group.node.memory_cancellation_threshold"; /** * Setting to control the memory based cancellation threshold */ @@ -123,9 +123,9 @@ public class WorkloadManagementSettings { Setting.Property.NodeScope ); /** - * Setting name for node level cpu based cancellation threshold for QueryGroup service + * Setting name for node level cpu based cancellation threshold for WorkloadGroup service */ - public static final String NODE_CPU_CANCELLATION_THRESHOLD_SETTING_NAME = "wlm.query_group.node.cpu_cancellation_threshold"; + public static final String NODE_CPU_CANCELLATION_THRESHOLD_SETTING_NAME = "wlm.workload_group.node.cpu_cancellation_threshold"; /** * Setting to control the cpu based cancellation threshold */ @@ -137,9 +137,9 @@ public class WorkloadManagementSettings { ); /** - * QueryGroup service settings constructor - * @param settings - QueryGroup service settings - * @param clusterSettings - QueryGroup cluster settings + * WorkloadGroup service settings constructor + * @param settings - WorkloadGroup service settings + * @param clusterSettings - WorkloadGroup cluster settings */ public WorkloadManagementSettings(Settings settings, ClusterSettings clusterSettings) { this.wlmMode = WLM_MODE_SETTING.get(settings); @@ -147,7 +147,7 @@ public WorkloadManagementSettings(Settings settings, ClusterSettings clusterSett nodeLevelMemoryRejectionThreshold = NODE_LEVEL_MEMORY_REJECTION_THRESHOLD.get(settings); nodeLevelCpuCancellationThreshold = NODE_LEVEL_CPU_CANCELLATION_THRESHOLD.get(settings); nodeLevelCpuRejectionThreshold = NODE_LEVEL_CPU_REJECTION_THRESHOLD.get(settings); - this.queryGroupServiceRunInterval = TimeValue.timeValueMillis(QUERYGROUP_SERVICE_RUN_INTERVAL_SETTING.get(settings)); + this.workloadGroupServiceRunInterval = TimeValue.timeValueMillis(QUERYGROUP_SERVICE_RUN_INTERVAL_SETTING.get(settings)); duressStreak = QUERYGROUP_SERVICE_DURESS_STREAK_SETTING.get(settings); ensureRejectionThresholdIsLessThanCancellation( @@ -168,7 +168,7 @@ public WorkloadManagementSettings(Settings settings, ClusterSettings clusterSett clusterSettings.addSettingsUpdateConsumer(NODE_LEVEL_CPU_CANCELLATION_THRESHOLD, this::setNodeLevelCpuCancellationThreshold); clusterSettings.addSettingsUpdateConsumer(NODE_LEVEL_CPU_REJECTION_THRESHOLD, this::setNodeLevelCpuRejectionThreshold); clusterSettings.addSettingsUpdateConsumer(WLM_MODE_SETTING, this::setWlmMode); - clusterSettings.addSettingsUpdateConsumer(QUERYGROUP_SERVICE_RUN_INTERVAL_SETTING, this::setQueryGroupServiceRunInterval); + clusterSettings.addSettingsUpdateConsumer(QUERYGROUP_SERVICE_RUN_INTERVAL_SETTING, this::setWorkloadGroupServiceRunInterval); clusterSettings.addSettingsUpdateConsumer(QUERYGROUP_SERVICE_DURESS_STREAK_SETTING, this::setDuressStreak); } @@ -189,19 +189,19 @@ private void setDuressStreak(int duressStreak) { } /** - * queryGroupServiceRunInterval setter + * workloadGroupServiceRunInterval setter * @param newIntervalInMillis new value */ - private void setQueryGroupServiceRunInterval(long newIntervalInMillis) { - this.queryGroupServiceRunInterval = TimeValue.timeValueMillis(newIntervalInMillis); + private void setWorkloadGroupServiceRunInterval(long newIntervalInMillis) { + this.workloadGroupServiceRunInterval = TimeValue.timeValueMillis(newIntervalInMillis); } /** - * queryGroupServiceRunInterval getter - * @return current queryGroupServiceRunInterval value + * workloadGroupServiceRunInterval getter + * @return current workloadGroupServiceRunInterval value */ - public TimeValue getQueryGroupServiceRunInterval() { - return this.queryGroupServiceRunInterval; + public TimeValue getWorkloadGroupServiceRunInterval() { + return this.workloadGroupServiceRunInterval; } /** diff --git a/server/src/main/java/org/opensearch/wlm/WorkloadManagementTransportInterceptor.java b/server/src/main/java/org/opensearch/wlm/WorkloadManagementTransportInterceptor.java index d382b4c729a38..bb52440e4db34 100644 --- a/server/src/main/java/org/opensearch/wlm/WorkloadManagementTransportInterceptor.java +++ b/server/src/main/java/org/opensearch/wlm/WorkloadManagementTransportInterceptor.java @@ -16,15 +16,15 @@ import org.opensearch.transport.TransportRequestHandler; /** - * This class is used to intercept search traffic requests and populate the queryGroupId header in task headers + * This class is used to intercept search traffic requests and populate the workloadGroupId header in task headers */ public class WorkloadManagementTransportInterceptor implements TransportInterceptor { private final ThreadPool threadPool; - private final QueryGroupService queryGroupService; + private final WorkloadGroupService workloadGroupService; - public WorkloadManagementTransportInterceptor(final ThreadPool threadPool, final QueryGroupService queryGroupService) { + public WorkloadManagementTransportInterceptor(final ThreadPool threadPool, final WorkloadGroupService workloadGroupService) { this.threadPool = threadPool; - this.queryGroupService = queryGroupService; + this.workloadGroupService = workloadGroupService; } @Override @@ -34,37 +34,37 @@ public TransportRequestHandler interceptHandler( boolean forceExecution, TransportRequestHandler actualHandler ) { - return new RequestHandler(threadPool, actualHandler, queryGroupService); + return new RequestHandler(threadPool, actualHandler, workloadGroupService); } /** - * This class is mainly used to populate the queryGroupId header + * This class is mainly used to populate the workloadGroupId header * @param T is Search related request */ public static class RequestHandler implements TransportRequestHandler { private final ThreadPool threadPool; TransportRequestHandler actualHandler; - private final QueryGroupService queryGroupService; + private final WorkloadGroupService workloadGroupService; - public RequestHandler(ThreadPool threadPool, TransportRequestHandler actualHandler, QueryGroupService queryGroupService) { + public RequestHandler(ThreadPool threadPool, TransportRequestHandler actualHandler, WorkloadGroupService workloadGroupService) { this.threadPool = threadPool; this.actualHandler = actualHandler; - this.queryGroupService = queryGroupService; + this.workloadGroupService = workloadGroupService; } @Override public void messageReceived(T request, TransportChannel channel, Task task) throws Exception { if (isSearchWorkloadRequest(task)) { - ((QueryGroupTask) task).setQueryGroupId(threadPool.getThreadContext()); - final String queryGroupId = ((QueryGroupTask) (task)).getQueryGroupId(); - queryGroupService.rejectIfNeeded(queryGroupId); + ((WorkloadGroupTask) task).setWorkloadGroupId(threadPool.getThreadContext()); + final String workloadGroupId = ((WorkloadGroupTask) (task)).getWorkloadGroupId(); + workloadGroupService.rejectIfNeeded(workloadGroupId); } actualHandler.messageReceived(request, channel, task); } boolean isSearchWorkloadRequest(Task task) { - return task instanceof QueryGroupTask; + return task instanceof WorkloadGroupTask; } } } diff --git a/server/src/main/java/org/opensearch/wlm/cancellation/MaximumResourceTaskSelectionStrategy.java b/server/src/main/java/org/opensearch/wlm/cancellation/MaximumResourceTaskSelectionStrategy.java index ffb326c07e7ac..6788b3da866bf 100644 --- a/server/src/main/java/org/opensearch/wlm/cancellation/MaximumResourceTaskSelectionStrategy.java +++ b/server/src/main/java/org/opensearch/wlm/cancellation/MaximumResourceTaskSelectionStrategy.java @@ -8,8 +8,8 @@ package org.opensearch.wlm.cancellation; -import org.opensearch.wlm.QueryGroupTask; import org.opensearch.wlm.ResourceType; +import org.opensearch.wlm.WorkloadGroupTask; import java.util.ArrayList; import java.util.Collections; @@ -17,7 +17,7 @@ import java.util.List; import java.util.stream.Collectors; -import static org.opensearch.wlm.cancellation.QueryGroupTaskCancellationService.MIN_VALUE; +import static org.opensearch.wlm.cancellation.WorkloadGroupTaskCancellationService.MIN_VALUE; /** * Represents the highest resource consuming task first selection strategy. @@ -33,7 +33,7 @@ public MaximumResourceTaskSelectionStrategy() {} * * @return The comparator */ - private Comparator sortingCondition(ResourceType resourceType) { + private Comparator sortingCondition(ResourceType resourceType) { return Comparator.comparingDouble(task -> resourceType.getResourceUsageCalculator().calculateTaskResourceUsage(task)); } @@ -47,7 +47,7 @@ private Comparator sortingCondition(ResourceType resourceType) { * @return The list of selected tasks * @throws IllegalArgumentException If the limit is less than zero */ - public List selectTasksForCancellation(List tasks, double limit, ResourceType resourceType) { + public List selectTasksForCancellation(List tasks, double limit, ResourceType resourceType) { if (limit < 0) { throw new IllegalArgumentException("limit has to be greater than zero"); } @@ -55,11 +55,11 @@ public List selectTasksForCancellation(List task return Collections.emptyList(); } - List sortedTasks = tasks.stream().sorted(sortingCondition(resourceType).reversed()).collect(Collectors.toList()); + List sortedTasks = tasks.stream().sorted(sortingCondition(resourceType).reversed()).collect(Collectors.toList()); - List selectedTasks = new ArrayList<>(); + List selectedTasks = new ArrayList<>(); double accumulated = 0; - for (QueryGroupTask task : sortedTasks) { + for (WorkloadGroupTask task : sortedTasks) { selectedTasks.add(task); accumulated += resourceType.getResourceUsageCalculator().calculateTaskResourceUsage(task); if ((accumulated - limit) > MIN_VALUE) { diff --git a/server/src/main/java/org/opensearch/wlm/cancellation/QueryGroupTaskCancellationService.java b/server/src/main/java/org/opensearch/wlm/cancellation/QueryGroupTaskCancellationService.java deleted file mode 100644 index e82a19c5f7af2..0000000000000 --- a/server/src/main/java/org/opensearch/wlm/cancellation/QueryGroupTaskCancellationService.java +++ /dev/null @@ -1,274 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.wlm.cancellation; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.cluster.metadata.QueryGroup; -import org.opensearch.tasks.TaskCancellation; -import org.opensearch.wlm.MutableQueryGroupFragment.ResiliencyMode; -import org.opensearch.wlm.QueryGroupLevelResourceUsageView; -import org.opensearch.wlm.QueryGroupTask; -import org.opensearch.wlm.QueryGroupsStateAccessor; -import org.opensearch.wlm.ResourceType; -import org.opensearch.wlm.WlmMode; -import org.opensearch.wlm.WorkloadManagementSettings; -import org.opensearch.wlm.stats.QueryGroupState; -import org.opensearch.wlm.tracker.QueryGroupResourceUsageTrackerService; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.function.BooleanSupplier; -import java.util.function.Consumer; -import java.util.stream.Collectors; - -import static org.opensearch.wlm.tracker.QueryGroupResourceUsageTrackerService.TRACKED_RESOURCES; - -/** - * Manages the cancellation of tasks enforced by QueryGroup thresholds on resource usage criteria. - * This class utilizes a strategy pattern through {@link MaximumResourceTaskSelectionStrategy} to identify tasks that exceed - * predefined resource usage limits and are therefore eligible for cancellation. - * - *

The cancellation process is initiated by evaluating the resource usage of each QueryGroup against its - * resource limits. Tasks that contribute to exceeding these limits are selected for cancellation based on the - * implemented task selection strategy.

- * - *

Instances of this class are configured with a map linking QueryGroup IDs to their corresponding resource usage - * views, a set of active QueryGroups, and a task selection strategy. These components collectively facilitate the - * identification and cancellation of tasks that threaten to breach QueryGroup resource limits.

- * - * @see MaximumResourceTaskSelectionStrategy - * @see QueryGroup - * @see ResourceType - */ -public class QueryGroupTaskCancellationService { - public static final double MIN_VALUE = 1e-9; - private static final Logger log = LogManager.getLogger(QueryGroupTaskCancellationService.class); - - private final WorkloadManagementSettings workloadManagementSettings; - private final TaskSelectionStrategy taskSelectionStrategy; - private final QueryGroupResourceUsageTrackerService resourceUsageTrackerService; - // a map of QueryGroupId to its corresponding QueryGroupLevelResourceUsageView object - Map queryGroupLevelResourceUsageViews; - private final QueryGroupsStateAccessor queryGroupStateAccessor; - - public QueryGroupTaskCancellationService( - WorkloadManagementSettings workloadManagementSettings, - TaskSelectionStrategy taskSelectionStrategy, - QueryGroupResourceUsageTrackerService resourceUsageTrackerService, - QueryGroupsStateAccessor queryGroupStateAccessor - ) { - this.workloadManagementSettings = workloadManagementSettings; - this.taskSelectionStrategy = taskSelectionStrategy; - this.resourceUsageTrackerService = resourceUsageTrackerService; - this.queryGroupStateAccessor = queryGroupStateAccessor; - } - - /** - * Cancel tasks based on the implemented strategy. - */ - public void cancelTasks( - BooleanSupplier isNodeInDuress, - Collection activeQueryGroups, - Collection deletedQueryGroups - ) { - queryGroupLevelResourceUsageViews = resourceUsageTrackerService.constructQueryGroupLevelUsageViews(); - // cancel tasks from QueryGroups that are in Enforced mode that are breaching their resource limits - cancelTasks(ResiliencyMode.ENFORCED, activeQueryGroups); - // if the node is in duress, cancel tasks accordingly. - handleNodeDuress(isNodeInDuress, activeQueryGroups, deletedQueryGroups); - - updateResourceUsageInQueryGroupState(activeQueryGroups); - } - - private void updateResourceUsageInQueryGroupState(Collection activeQueryGroups) { - Set isSearchWorkloadRunning = new HashSet<>(); - for (Map.Entry queryGroupLevelResourceUsageViewEntry : queryGroupLevelResourceUsageViews - .entrySet()) { - isSearchWorkloadRunning.add(queryGroupLevelResourceUsageViewEntry.getKey()); - QueryGroupState queryGroupState = getQueryGroupState(queryGroupLevelResourceUsageViewEntry.getKey()); - TRACKED_RESOURCES.forEach(resourceType -> { - final double currentUsage = queryGroupLevelResourceUsageViewEntry.getValue().getResourceUsageData().get(resourceType); - queryGroupState.getResourceState().get(resourceType).setLastRecordedUsage(currentUsage); - }); - } - - activeQueryGroups.forEach(queryGroup -> { - if (!isSearchWorkloadRunning.contains(queryGroup.get_id())) { - TRACKED_RESOURCES.forEach( - resourceType -> getQueryGroupState(queryGroup.get_id()).getResourceState().get(resourceType).setLastRecordedUsage(0.0) - ); - } - }); - } - - private void handleNodeDuress( - BooleanSupplier isNodeInDuress, - Collection activeQueryGroups, - Collection deletedQueryGroups - ) { - if (!isNodeInDuress.getAsBoolean()) { - return; - } - // List of tasks to be executed in order if the node is in duress - List> duressActions = List.of( - v -> cancelTasksFromDeletedQueryGroups(deletedQueryGroups), - v -> cancelTasks(ResiliencyMode.SOFT, activeQueryGroups) - ); - - for (Consumer duressAction : duressActions) { - if (!isNodeInDuress.getAsBoolean()) { - break; - } - duressAction.accept(null); - } - } - - private void cancelTasksFromDeletedQueryGroups(Collection deletedQueryGroups) { - cancelTasks(getAllCancellableTasks(deletedQueryGroups)); - } - - /** - * Get all cancellable tasks from the QueryGroups. - * - * @return List of tasks that can be cancelled - */ - List getAllCancellableTasks(ResiliencyMode resiliencyMode, Collection queryGroups) { - return getAllCancellableTasks( - queryGroups.stream().filter(queryGroup -> queryGroup.getResiliencyMode() == resiliencyMode).collect(Collectors.toList()) - ); - } - - /** - * Get all cancellable tasks from the given QueryGroups. - * - * @return List of tasks that can be cancelled - */ - List getAllCancellableTasks(Collection queryGroups) { - List taskCancellations = new ArrayList<>(); - final List onCancelCallbacks = new ArrayList<>(); - for (QueryGroup queryGroup : queryGroups) { - final List reasons = new ArrayList<>(); - List selectedTasks = new ArrayList<>(); - for (ResourceType resourceType : TRACKED_RESOURCES) { - // We need to consider the already selected tasks since those tasks also consumed the resources - double excessUsage = getExcessUsage(queryGroup, resourceType) - resourceType.getResourceUsageCalculator() - .calculateResourceUsage(selectedTasks); - if (excessUsage > MIN_VALUE) { - reasons.add(new TaskCancellation.Reason(generateReasonString(queryGroup, resourceType), 1)); - onCancelCallbacks.add(this.getResourceTypeOnCancelCallback(queryGroup.get_id(), resourceType)); - // Only add tasks not already added to avoid double cancellations - selectedTasks.addAll( - taskSelectionStrategy.selectTasksForCancellation(getTasksFor(queryGroup), excessUsage, resourceType) - .stream() - .filter(x -> selectedTasks.stream().noneMatch(y -> x.getId() != y.getId())) - .collect(Collectors.toList()) - ); - } - } - - if (!reasons.isEmpty()) { - onCancelCallbacks.add(getQueryGroupState(queryGroup.get_id()).totalCancellations::inc); - taskCancellations.addAll( - selectedTasks.stream().map(task -> new TaskCancellation(task, reasons, onCancelCallbacks)).collect(Collectors.toList()) - ); - } - } - return taskCancellations; - } - - private String generateReasonString(QueryGroup queryGroup, ResourceType resourceType) { - final double currentUsage = getCurrentUsage(queryGroup, resourceType); - return "QueryGroup ID : " - + queryGroup.get_id() - + " breached the resource limit: (" - + currentUsage - + " > " - + queryGroup.getResourceLimits().get(resourceType) - + ") for resource type : " - + resourceType.getName(); - } - - private List getTasksFor(QueryGroup queryGroup) { - return queryGroupLevelResourceUsageViews.get(queryGroup.get_id()).getActiveTasks(); - } - - private void cancelTasks(ResiliencyMode resiliencyMode, Collection queryGroups) { - cancelTasks(getAllCancellableTasks(resiliencyMode, queryGroups)); - } - - private void cancelTasks(List cancellableTasks) { - - Consumer cancellationLoggingConsumer = (taskCancellation -> { - log.warn( - "Task {} is eligible for cancellation for reason {}", - taskCancellation.getTask().getId(), - taskCancellation.getReasonString() - ); - }); - Consumer cancellationConsumer = cancellationLoggingConsumer; - if (workloadManagementSettings.getWlmMode() == WlmMode.ENABLED) { - cancellationConsumer = (taskCancellation -> { - cancellationLoggingConsumer.accept(taskCancellation); - taskCancellation.cancel(); - }); - } - cancellableTasks.forEach(cancellationConsumer); - } - - private double getExcessUsage(QueryGroup queryGroup, ResourceType resourceType) { - if (queryGroup.getResourceLimits().get(resourceType) == null - || !queryGroupLevelResourceUsageViews.containsKey(queryGroup.get_id())) { - return 0; - } - return getCurrentUsage(queryGroup, resourceType) - getNormalisedThreshold(queryGroup, resourceType); - } - - private double getCurrentUsage(QueryGroup queryGroup, ResourceType resourceType) { - final QueryGroupLevelResourceUsageView queryGroupResourceUsageView = queryGroupLevelResourceUsageViews.get(queryGroup.get_id()); - return queryGroupResourceUsageView.getResourceUsageData().get(resourceType); - } - - /** - * normalises configured value with respect to node level cancellation thresholds - * @param queryGroup instance - * @return normalised value with respect to node level cancellation thresholds - */ - private double getNormalisedThreshold(QueryGroup queryGroup, ResourceType resourceType) { - double nodeLevelCancellationThreshold = resourceType.getNodeLevelThreshold(workloadManagementSettings); - return queryGroup.getResourceLimits().get(resourceType) * nodeLevelCancellationThreshold; - } - - private Runnable getResourceTypeOnCancelCallback(String queryGroupId, ResourceType resourceType) { - QueryGroupState queryGroupState = getQueryGroupState(queryGroupId); - return queryGroupState.getResourceState().get(resourceType).cancellations::inc; - } - - private QueryGroupState getQueryGroupState(String queryGroupId) { - assert queryGroupId != null : "queryGroupId should never be null at this point."; - - return queryGroupStateAccessor.getQueryGroupState(queryGroupId); - } - - /** - * Removes the queryGroups from deleted list if it doesn't have any tasks running - */ - public void pruneDeletedQueryGroups(Collection deletedQueryGroups) { - List currentDeletedQueryGroups = new ArrayList<>(deletedQueryGroups); - for (QueryGroup queryGroup : currentDeletedQueryGroups) { - if (queryGroupLevelResourceUsageViews.get(queryGroup.get_id()).getActiveTasks().isEmpty()) { - deletedQueryGroups.remove(queryGroup); - } - } - } -} diff --git a/server/src/main/java/org/opensearch/wlm/cancellation/TaskSelectionStrategy.java b/server/src/main/java/org/opensearch/wlm/cancellation/TaskSelectionStrategy.java index 63fbf9b791a33..c0dbc35349976 100644 --- a/server/src/main/java/org/opensearch/wlm/cancellation/TaskSelectionStrategy.java +++ b/server/src/main/java/org/opensearch/wlm/cancellation/TaskSelectionStrategy.java @@ -8,8 +8,8 @@ package org.opensearch.wlm.cancellation; -import org.opensearch.wlm.QueryGroupTask; import org.opensearch.wlm.ResourceType; +import org.opensearch.wlm.WorkloadGroupTask; import java.util.List; @@ -24,5 +24,5 @@ public interface TaskSelectionStrategy { * @param resourceType * @return list of tasks */ - List selectTasksForCancellation(List tasks, double limit, ResourceType resourceType); + List selectTasksForCancellation(List tasks, double limit, ResourceType resourceType); } diff --git a/server/src/main/java/org/opensearch/wlm/cancellation/WorkloadGroupTaskCancellationService.java b/server/src/main/java/org/opensearch/wlm/cancellation/WorkloadGroupTaskCancellationService.java new file mode 100644 index 0000000000000..ce5d6728b5633 --- /dev/null +++ b/server/src/main/java/org/opensearch/wlm/cancellation/WorkloadGroupTaskCancellationService.java @@ -0,0 +1,282 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.wlm.cancellation; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.cluster.metadata.WorkloadGroup; +import org.opensearch.tasks.TaskCancellation; +import org.opensearch.wlm.MutableWorkloadGroupFragment.ResiliencyMode; +import org.opensearch.wlm.ResourceType; +import org.opensearch.wlm.WlmMode; +import org.opensearch.wlm.WorkloadGroupLevelResourceUsageView; +import org.opensearch.wlm.WorkloadGroupTask; +import org.opensearch.wlm.WorkloadGroupsStateAccessor; +import org.opensearch.wlm.WorkloadManagementSettings; +import org.opensearch.wlm.stats.WorkloadGroupState; +import org.opensearch.wlm.tracker.WorkloadGroupResourceUsageTrackerService; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.BooleanSupplier; +import java.util.function.Consumer; +import java.util.stream.Collectors; + +import static org.opensearch.wlm.tracker.WorkloadGroupResourceUsageTrackerService.TRACKED_RESOURCES; + +/** + * Manages the cancellation of tasks enforced by WorkloadGroup thresholds on resource usage criteria. + * This class utilizes a strategy pattern through {@link MaximumResourceTaskSelectionStrategy} to identify tasks that exceed + * predefined resource usage limits and are therefore eligible for cancellation. + * + *

The cancellation process is initiated by evaluating the resource usage of each WorkloadGroup against its + * resource limits. Tasks that contribute to exceeding these limits are selected for cancellation based on the + * implemented task selection strategy.

+ * + *

Instances of this class are configured with a map linking WorkloadGroup IDs to their corresponding resource usage + * views, a set of active WorkloadGroups, and a task selection strategy. These components collectively facilitate the + * identification and cancellation of tasks that threaten to breach WorkloadGroup resource limits.

+ * + * @see MaximumResourceTaskSelectionStrategy + * @see WorkloadGroup + * @see ResourceType + */ +public class WorkloadGroupTaskCancellationService { + public static final double MIN_VALUE = 1e-9; + private static final Logger log = LogManager.getLogger(WorkloadGroupTaskCancellationService.class); + + private final WorkloadManagementSettings workloadManagementSettings; + private final TaskSelectionStrategy taskSelectionStrategy; + private final WorkloadGroupResourceUsageTrackerService resourceUsageTrackerService; + // a map of WorkloadGroupId to its corresponding WorkloadGroupLevelResourceUsageView object + Map workloadGroupLevelResourceUsageViews; + private final WorkloadGroupsStateAccessor workloadGroupStateAccessor; + + public WorkloadGroupTaskCancellationService( + WorkloadManagementSettings workloadManagementSettings, + TaskSelectionStrategy taskSelectionStrategy, + WorkloadGroupResourceUsageTrackerService resourceUsageTrackerService, + WorkloadGroupsStateAccessor workloadGroupStateAccessor + ) { + this.workloadManagementSettings = workloadManagementSettings; + this.taskSelectionStrategy = taskSelectionStrategy; + this.resourceUsageTrackerService = resourceUsageTrackerService; + this.workloadGroupStateAccessor = workloadGroupStateAccessor; + } + + /** + * Cancel tasks based on the implemented strategy. + */ + public void cancelTasks( + BooleanSupplier isNodeInDuress, + Collection activeWorkloadGroups, + Collection deletedWorkloadGroups + ) { + workloadGroupLevelResourceUsageViews = resourceUsageTrackerService.constructWorkloadGroupLevelUsageViews(); + // cancel tasks from WorkloadGroups that are in Enforced mode that are breaching their resource limits + cancelTasks(ResiliencyMode.ENFORCED, activeWorkloadGroups); + // if the node is in duress, cancel tasks accordingly. + handleNodeDuress(isNodeInDuress, activeWorkloadGroups, deletedWorkloadGroups); + + updateResourceUsageInWorkloadGroupState(activeWorkloadGroups); + } + + private void updateResourceUsageInWorkloadGroupState(Collection activeWorkloadGroups) { + Set isSearchWorkloadRunning = new HashSet<>(); + for (Map.Entry< + String, + WorkloadGroupLevelResourceUsageView> workloadGroupLevelResourceUsageViewEntry : workloadGroupLevelResourceUsageViews + .entrySet()) { + isSearchWorkloadRunning.add(workloadGroupLevelResourceUsageViewEntry.getKey()); + WorkloadGroupState workloadGroupState = getWorkloadGroupState(workloadGroupLevelResourceUsageViewEntry.getKey()); + TRACKED_RESOURCES.forEach(resourceType -> { + final double currentUsage = workloadGroupLevelResourceUsageViewEntry.getValue().getResourceUsageData().get(resourceType); + workloadGroupState.getResourceState().get(resourceType).setLastRecordedUsage(currentUsage); + }); + } + + activeWorkloadGroups.forEach(workloadGroup -> { + if (!isSearchWorkloadRunning.contains(workloadGroup.get_id())) { + TRACKED_RESOURCES.forEach( + resourceType -> getWorkloadGroupState(workloadGroup.get_id()).getResourceState() + .get(resourceType) + .setLastRecordedUsage(0.0) + ); + } + }); + } + + private void handleNodeDuress( + BooleanSupplier isNodeInDuress, + Collection activeWorkloadGroups, + Collection deletedWorkloadGroups + ) { + if (!isNodeInDuress.getAsBoolean()) { + return; + } + // List of tasks to be executed in order if the node is in duress + List> duressActions = List.of( + v -> cancelTasksFromDeletedWorkloadGroups(deletedWorkloadGroups), + v -> cancelTasks(ResiliencyMode.SOFT, activeWorkloadGroups) + ); + + for (Consumer duressAction : duressActions) { + if (!isNodeInDuress.getAsBoolean()) { + break; + } + duressAction.accept(null); + } + } + + private void cancelTasksFromDeletedWorkloadGroups(Collection deletedWorkloadGroups) { + cancelTasks(getAllCancellableTasks(deletedWorkloadGroups)); + } + + /** + * Get all cancellable tasks from the WorkloadGroups. + * + * @return List of tasks that can be cancelled + */ + List getAllCancellableTasks(ResiliencyMode resiliencyMode, Collection workloadGroups) { + return getAllCancellableTasks( + workloadGroups.stream() + .filter(workloadGroup -> workloadGroup.getResiliencyMode() == resiliencyMode) + .collect(Collectors.toList()) + ); + } + + /** + * Get all cancellable tasks from the given WorkloadGroups. + * + * @return List of tasks that can be cancelled + */ + List getAllCancellableTasks(Collection workloadGroups) { + List taskCancellations = new ArrayList<>(); + final List onCancelCallbacks = new ArrayList<>(); + for (WorkloadGroup workloadGroup : workloadGroups) { + final List reasons = new ArrayList<>(); + List selectedTasks = new ArrayList<>(); + for (ResourceType resourceType : TRACKED_RESOURCES) { + // We need to consider the already selected tasks since those tasks also consumed the resources + double excessUsage = getExcessUsage(workloadGroup, resourceType) - resourceType.getResourceUsageCalculator() + .calculateResourceUsage(selectedTasks); + if (excessUsage > MIN_VALUE) { + reasons.add(new TaskCancellation.Reason(generateReasonString(workloadGroup, resourceType), 1)); + onCancelCallbacks.add(this.getResourceTypeOnCancelCallback(workloadGroup.get_id(), resourceType)); + // Only add tasks not already added to avoid double cancellations + selectedTasks.addAll( + taskSelectionStrategy.selectTasksForCancellation(getTasksFor(workloadGroup), excessUsage, resourceType) + .stream() + .filter(x -> selectedTasks.stream().noneMatch(y -> x.getId() != y.getId())) + .collect(Collectors.toList()) + ); + } + } + + if (!reasons.isEmpty()) { + onCancelCallbacks.add(getWorkloadGroupState(workloadGroup.get_id()).totalCancellations::inc); + taskCancellations.addAll( + selectedTasks.stream().map(task -> new TaskCancellation(task, reasons, onCancelCallbacks)).collect(Collectors.toList()) + ); + } + } + return taskCancellations; + } + + private String generateReasonString(WorkloadGroup workloadGroup, ResourceType resourceType) { + final double currentUsage = getCurrentUsage(workloadGroup, resourceType); + return "WorkloadGroup ID : " + + workloadGroup.get_id() + + " breached the resource limit: (" + + currentUsage + + " > " + + workloadGroup.getResourceLimits().get(resourceType) + + ") for resource type : " + + resourceType.getName(); + } + + private List getTasksFor(WorkloadGroup workloadGroup) { + return workloadGroupLevelResourceUsageViews.get(workloadGroup.get_id()).getActiveTasks(); + } + + private void cancelTasks(ResiliencyMode resiliencyMode, Collection workloadGroups) { + cancelTasks(getAllCancellableTasks(resiliencyMode, workloadGroups)); + } + + private void cancelTasks(List cancellableTasks) { + + Consumer cancellationLoggingConsumer = (taskCancellation -> { + log.warn( + "Task {} is eligible for cancellation for reason {}", + taskCancellation.getTask().getId(), + taskCancellation.getReasonString() + ); + }); + Consumer cancellationConsumer = cancellationLoggingConsumer; + if (workloadManagementSettings.getWlmMode() == WlmMode.ENABLED) { + cancellationConsumer = (taskCancellation -> { + cancellationLoggingConsumer.accept(taskCancellation); + taskCancellation.cancel(); + }); + } + cancellableTasks.forEach(cancellationConsumer); + } + + private double getExcessUsage(WorkloadGroup workloadGroup, ResourceType resourceType) { + if (workloadGroup.getResourceLimits().get(resourceType) == null + || !workloadGroupLevelResourceUsageViews.containsKey(workloadGroup.get_id())) { + return 0; + } + return getCurrentUsage(workloadGroup, resourceType) - getNormalisedThreshold(workloadGroup, resourceType); + } + + private double getCurrentUsage(WorkloadGroup workloadGroup, ResourceType resourceType) { + final WorkloadGroupLevelResourceUsageView workloadGroupResourceUsageView = workloadGroupLevelResourceUsageViews.get( + workloadGroup.get_id() + ); + return workloadGroupResourceUsageView.getResourceUsageData().get(resourceType); + } + + /** + * normalises configured value with respect to node level cancellation thresholds + * @param workloadGroup instance + * @return normalised value with respect to node level cancellation thresholds + */ + private double getNormalisedThreshold(WorkloadGroup workloadGroup, ResourceType resourceType) { + double nodeLevelCancellationThreshold = resourceType.getNodeLevelThreshold(workloadManagementSettings); + return workloadGroup.getResourceLimits().get(resourceType) * nodeLevelCancellationThreshold; + } + + private Runnable getResourceTypeOnCancelCallback(String workloadGroupId, ResourceType resourceType) { + WorkloadGroupState workloadGroupState = getWorkloadGroupState(workloadGroupId); + return workloadGroupState.getResourceState().get(resourceType).cancellations::inc; + } + + private WorkloadGroupState getWorkloadGroupState(String workloadGroupId) { + assert workloadGroupId != null : "workloadGroupId should never be null at this point."; + + return workloadGroupStateAccessor.getWorkloadGroupState(workloadGroupId); + } + + /** + * Removes the workloadGroups from deleted list if it doesn't have any tasks running + */ + public void pruneDeletedWorkloadGroups(Collection deletedWorkloadGroups) { + List currentDeletedWorkloadGroups = new ArrayList<>(deletedWorkloadGroups); + for (WorkloadGroup workloadGroup : currentDeletedWorkloadGroups) { + if (workloadGroupLevelResourceUsageViews.get(workloadGroup.get_id()).getActiveTasks().isEmpty()) { + deletedWorkloadGroups.remove(workloadGroup); + } + } + } +} diff --git a/server/src/main/java/org/opensearch/wlm/listeners/QueryGroupRequestOperationListener.java b/server/src/main/java/org/opensearch/wlm/listeners/WorkloadGroupRequestOperationListener.java similarity index 50% rename from server/src/main/java/org/opensearch/wlm/listeners/QueryGroupRequestOperationListener.java rename to server/src/main/java/org/opensearch/wlm/listeners/WorkloadGroupRequestOperationListener.java index a2ce2b57bfe0f..14f112ab7ff43 100644 --- a/server/src/main/java/org/opensearch/wlm/listeners/QueryGroupRequestOperationListener.java +++ b/server/src/main/java/org/opensearch/wlm/listeners/WorkloadGroupRequestOperationListener.java @@ -12,35 +12,35 @@ import org.opensearch.action.search.SearchRequestContext; import org.opensearch.action.search.SearchRequestOperationsListener; import org.opensearch.threadpool.ThreadPool; -import org.opensearch.wlm.QueryGroupService; -import org.opensearch.wlm.QueryGroupTask; +import org.opensearch.wlm.WorkloadGroupService; +import org.opensearch.wlm.WorkloadGroupTask; /** - * This listener is used to listen for request lifecycle events for a queryGroup + * This listener is used to listen for request lifecycle events for a workloadGroup */ -public class QueryGroupRequestOperationListener extends SearchRequestOperationsListener { +public class WorkloadGroupRequestOperationListener extends SearchRequestOperationsListener { - private final QueryGroupService queryGroupService; + private final WorkloadGroupService workloadGroupService; private final ThreadPool threadPool; - public QueryGroupRequestOperationListener(QueryGroupService queryGroupService, ThreadPool threadPool) { - this.queryGroupService = queryGroupService; + public WorkloadGroupRequestOperationListener(WorkloadGroupService workloadGroupService, ThreadPool threadPool) { + this.workloadGroupService = workloadGroupService; this.threadPool = threadPool; } /** - * This method assumes that the queryGroupId is already populated in the thread context + * This method assumes that the workloadGroupId is already populated in the thread context * @param searchRequestContext SearchRequestContext instance */ @Override protected void onRequestStart(SearchRequestContext searchRequestContext) { - final String queryGroupId = threadPool.getThreadContext().getHeader(QueryGroupTask.QUERY_GROUP_ID_HEADER); - queryGroupService.rejectIfNeeded(queryGroupId); + final String workloadGroupId = threadPool.getThreadContext().getHeader(WorkloadGroupTask.WORKLOAD_GROUP_ID_HEADER); + workloadGroupService.rejectIfNeeded(workloadGroupId); } @Override protected void onRequestFailure(SearchPhaseContext context, SearchRequestContext searchRequestContext) { - final String queryGroupId = threadPool.getThreadContext().getHeader(QueryGroupTask.QUERY_GROUP_ID_HEADER); - queryGroupService.incrementFailuresFor(queryGroupId); + final String workloadGroupId = threadPool.getThreadContext().getHeader(WorkloadGroupTask.WORKLOAD_GROUP_ID_HEADER); + workloadGroupService.incrementFailuresFor(workloadGroupId); } } diff --git a/server/src/main/java/org/opensearch/wlm/stats/WlmStats.java b/server/src/main/java/org/opensearch/wlm/stats/WlmStats.java index 3313021bfae52..f729643674c79 100644 --- a/server/src/main/java/org/opensearch/wlm/stats/WlmStats.java +++ b/server/src/main/java/org/opensearch/wlm/stats/WlmStats.java @@ -23,27 +23,27 @@ * This class contains the stats for Workload Management */ public class WlmStats extends BaseNodeResponse implements ToXContentObject, Writeable { - private final QueryGroupStats queryGroupStats; + private final WorkloadGroupStats workloadGroupStats; - public WlmStats(DiscoveryNode node, QueryGroupStats queryGroupStats) { + public WlmStats(DiscoveryNode node, WorkloadGroupStats workloadGroupStats) { super(node); - this.queryGroupStats = queryGroupStats; + this.workloadGroupStats = workloadGroupStats; } public WlmStats(StreamInput in) throws IOException { super(in); - queryGroupStats = new QueryGroupStats(in); + workloadGroupStats = new WorkloadGroupStats(in); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - queryGroupStats.writeTo(out); + workloadGroupStats.writeTo(out); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return queryGroupStats.toXContent(builder, params); + return workloadGroupStats.toXContent(builder, params); } @Override @@ -51,15 +51,15 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; WlmStats that = (WlmStats) o; - return Objects.equals(getQueryGroupStats(), that.getQueryGroupStats()); + return Objects.equals(getWorkloadGroupStats(), that.getWorkloadGroupStats()); } @Override public int hashCode() { - return Objects.hash(queryGroupStats); + return Objects.hash(workloadGroupStats); } - public QueryGroupStats getQueryGroupStats() { - return queryGroupStats; + public WorkloadGroupStats getWorkloadGroupStats() { + return workloadGroupStats; } } diff --git a/server/src/main/java/org/opensearch/wlm/stats/QueryGroupState.java b/server/src/main/java/org/opensearch/wlm/stats/WorkloadGroupState.java similarity index 97% rename from server/src/main/java/org/opensearch/wlm/stats/QueryGroupState.java rename to server/src/main/java/org/opensearch/wlm/stats/WorkloadGroupState.java index a082ed159e829..78cb4521aff18 100644 --- a/server/src/main/java/org/opensearch/wlm/stats/QueryGroupState.java +++ b/server/src/main/java/org/opensearch/wlm/stats/WorkloadGroupState.java @@ -17,7 +17,7 @@ /** * This class will keep the point in time view of the query group stats */ -public class QueryGroupState { +public class WorkloadGroupState { /** * co-ordinator level completions at the query group level, this is a cumulative counter since the Opensearch start time */ @@ -43,7 +43,7 @@ public class QueryGroupState { */ private final Map resourceState; - public QueryGroupState() { + public WorkloadGroupState() { resourceState = new EnumMap<>(ResourceType.class); for (ResourceType resourceType : ResourceType.values()) { if (resourceType.hasStatsEnabled()) { diff --git a/server/src/main/java/org/opensearch/wlm/stats/QueryGroupStats.java b/server/src/main/java/org/opensearch/wlm/stats/WorkloadGroupStats.java similarity index 77% rename from server/src/main/java/org/opensearch/wlm/stats/QueryGroupStats.java rename to server/src/main/java/org/opensearch/wlm/stats/WorkloadGroupStats.java index 42ce3ac0019db..68ce981d4e0c5 100644 --- a/server/src/main/java/org/opensearch/wlm/stats/QueryGroupStats.java +++ b/server/src/main/java/org/opensearch/wlm/stats/WorkloadGroupStats.java @@ -14,7 +14,7 @@ import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.wlm.ResourceType; -import org.opensearch.wlm.stats.QueryGroupState.ResourceTypeState; +import org.opensearch.wlm.stats.WorkloadGroupState.ResourceTypeState; import java.io.IOException; import java.util.ArrayList; @@ -25,7 +25,7 @@ /** * { - * "queryGroupID": { + * "workloadGroupID": { * "completions": 1233234234, * "rejections": 12, * "failures": 97, @@ -37,32 +37,32 @@ * ... * } */ -public class QueryGroupStats implements ToXContentObject, Writeable { - private final Map stats; +public class WorkloadGroupStats implements ToXContentObject, Writeable { + private final Map stats; - public QueryGroupStats(Map stats) { + public WorkloadGroupStats(Map stats) { this.stats = stats; } - public QueryGroupStats(StreamInput in) throws IOException { - stats = in.readMap(StreamInput::readString, QueryGroupStatsHolder::new); + public WorkloadGroupStats(StreamInput in) throws IOException { + stats = in.readMap(StreamInput::readString, WorkloadGroupStatsHolder::new); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(stats, StreamOutput::writeString, QueryGroupStatsHolder::writeTo); + out.writeMap(stats, StreamOutput::writeString, WorkloadGroupStatsHolder::writeTo); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject("query_groups"); + builder.startObject("workload_groups"); // to keep the toXContent consistent - List> entryList = new ArrayList<>(stats.entrySet()); + List> entryList = new ArrayList<>(stats.entrySet()); entryList.sort((k1, k2) -> k1.getKey().compareTo(k2.getKey())); - for (Map.Entry queryGroupStats : entryList) { - builder.startObject(queryGroupStats.getKey()); - queryGroupStats.getValue().toXContent(builder, params); + for (Map.Entry workloadGroupStats : entryList) { + builder.startObject(workloadGroupStats.getKey()); + workloadGroupStats.getValue().toXContent(builder, params); builder.endObject(); } builder.endObject(); @@ -73,7 +73,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - QueryGroupStats that = (QueryGroupStats) o; + WorkloadGroupStats that = (WorkloadGroupStats) o; return Objects.equals(stats, that.stats); } @@ -82,7 +82,7 @@ public int hashCode() { return Objects.hash(stats); } - public Map getStats() { + public Map getStats() { return stats; } @@ -90,7 +90,7 @@ public Map getStats() { * This is a stats holder object which will hold the data for a query group at a point in time * the instance will only be created on demand through stats api */ - public static class QueryGroupStatsHolder implements ToXContentObject, Writeable { + public static class WorkloadGroupStatsHolder implements ToXContentObject, Writeable { public static final String COMPLETIONS = "total_completions"; public static final String REJECTIONS = "total_rejections"; public static final String TOTAL_CANCELLATIONS = "total_cancellations"; @@ -102,9 +102,9 @@ public static class QueryGroupStatsHolder implements ToXContentObject, Writeable private Map resourceStats; // this is needed to support the factory method - public QueryGroupStatsHolder() {} + public WorkloadGroupStatsHolder() {} - public QueryGroupStatsHolder( + public WorkloadGroupStatsHolder( long completions, long rejections, long failures, @@ -118,7 +118,7 @@ public QueryGroupStatsHolder( this.resourceStats = resourceStats; } - public QueryGroupStatsHolder(StreamInput in) throws IOException { + public WorkloadGroupStatsHolder(StreamInput in) throws IOException { this.completions = in.readVLong(); this.rejections = in.readVLong(); this.failures = in.readVLong(); @@ -127,23 +127,23 @@ public QueryGroupStatsHolder(StreamInput in) throws IOException { } /** - * static factory method to convert {@link QueryGroupState} into {@link QueryGroupStatsHolder} - * @param queryGroupState which needs to be converted - * @return QueryGroupStatsHolder object + * static factory method to convert {@link WorkloadGroupState} into {@link WorkloadGroupStatsHolder} + * @param workloadGroupState which needs to be converted + * @return WorkloadGroupStatsHolder object */ - public static QueryGroupStatsHolder from(QueryGroupState queryGroupState) { - final QueryGroupStatsHolder statsHolder = new QueryGroupStatsHolder(); + public static WorkloadGroupStatsHolder from(WorkloadGroupState workloadGroupState) { + final WorkloadGroupStatsHolder statsHolder = new WorkloadGroupStatsHolder(); Map resourceStatsMap = new HashMap<>(); - for (Map.Entry resourceTypeStateEntry : queryGroupState.getResourceState().entrySet()) { + for (Map.Entry resourceTypeStateEntry : workloadGroupState.getResourceState().entrySet()) { resourceStatsMap.put(resourceTypeStateEntry.getKey(), ResourceStats.from(resourceTypeStateEntry.getValue())); } - statsHolder.completions = queryGroupState.getTotalCompletions(); - statsHolder.rejections = queryGroupState.getTotalRejections(); - statsHolder.failures = queryGroupState.getFailures(); - statsHolder.cancellations = queryGroupState.getTotalCancellations(); + statsHolder.completions = workloadGroupState.getTotalCompletions(); + statsHolder.rejections = workloadGroupState.getTotalRejections(); + statsHolder.failures = workloadGroupState.getFailures(); + statsHolder.cancellations = workloadGroupState.getTotalCancellations(); statsHolder.resourceStats = resourceStatsMap; return statsHolder; } @@ -151,10 +151,10 @@ public static QueryGroupStatsHolder from(QueryGroupState queryGroupState) { /** * Writes the @param {statsHolder} to @param {out} * @param out StreamOutput - * @param statsHolder QueryGroupStatsHolder + * @param statsHolder WorkloadGroupStatsHolder * @throws IOException exception */ - public static void writeTo(StreamOutput out, QueryGroupStatsHolder statsHolder) throws IOException { + public static void writeTo(StreamOutput out, WorkloadGroupStatsHolder statsHolder) throws IOException { out.writeVLong(statsHolder.completions); out.writeVLong(statsHolder.rejections); out.writeVLong(statsHolder.failures); @@ -164,7 +164,7 @@ public static void writeTo(StreamOutput out, QueryGroupStatsHolder statsHolder) @Override public void writeTo(StreamOutput out) throws IOException { - QueryGroupStatsHolder.writeTo(out, this); + WorkloadGroupStatsHolder.writeTo(out, this); } @Override @@ -189,7 +189,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - QueryGroupStatsHolder that = (QueryGroupStatsHolder) o; + WorkloadGroupStatsHolder that = (WorkloadGroupStatsHolder) o; return completions == that.completions && rejections == that.rejections && Objects.equals(resourceStats, that.resourceStats) @@ -230,7 +230,7 @@ public ResourceStats(StreamInput in) throws IOException { /** * static factory method to convert {@link ResourceTypeState} into {@link ResourceStats} * @param resourceTypeState which needs to be converted - * @return QueryGroupStatsHolder object + * @return WorkloadGroupStatsHolder object */ public static ResourceStats from(ResourceTypeState resourceTypeState) { return new ResourceStats( @@ -243,7 +243,7 @@ public static ResourceStats from(ResourceTypeState resourceTypeState) { /** * Writes the @param {stats} to @param {out} * @param out StreamOutput - * @param stats QueryGroupStatsHolder + * @param stats WorkloadGroupStatsHolder * @throws IOException exception */ public static void writeTo(StreamOutput out, ResourceStats stats) throws IOException { diff --git a/server/src/main/java/org/opensearch/wlm/tracker/CpuUsageCalculator.java b/server/src/main/java/org/opensearch/wlm/tracker/CpuUsageCalculator.java index 05c84cd767b1f..32fc0487e4e66 100644 --- a/server/src/main/java/org/opensearch/wlm/tracker/CpuUsageCalculator.java +++ b/server/src/main/java/org/opensearch/wlm/tracker/CpuUsageCalculator.java @@ -9,7 +9,7 @@ package org.opensearch.wlm.tracker; import org.opensearch.core.tasks.resourcetracker.ResourceStats; -import org.opensearch.wlm.QueryGroupTask; +import org.opensearch.wlm.WorkloadGroupTask; import java.util.List; @@ -24,7 +24,7 @@ public class CpuUsageCalculator extends ResourceUsageCalculator { private CpuUsageCalculator() {} @Override - public double calculateResourceUsage(List tasks) { + public double calculateResourceUsage(List tasks) { double usage = tasks.stream().mapToDouble(this::calculateTaskResourceUsage).sum(); usage /= PROCESSOR_COUNT; @@ -32,7 +32,7 @@ public double calculateResourceUsage(List tasks) { } @Override - public double calculateTaskResourceUsage(QueryGroupTask task) { + public double calculateTaskResourceUsage(WorkloadGroupTask task) { return (1.0f * task.getTotalResourceUtilization(ResourceStats.CPU)) / task.getElapsedTime(); } } diff --git a/server/src/main/java/org/opensearch/wlm/tracker/MemoryUsageCalculator.java b/server/src/main/java/org/opensearch/wlm/tracker/MemoryUsageCalculator.java index fb66ff47f58d0..6edb011f399e3 100644 --- a/server/src/main/java/org/opensearch/wlm/tracker/MemoryUsageCalculator.java +++ b/server/src/main/java/org/opensearch/wlm/tracker/MemoryUsageCalculator.java @@ -10,7 +10,7 @@ import org.opensearch.core.tasks.resourcetracker.ResourceStats; import org.opensearch.monitor.jvm.JvmStats; -import org.opensearch.wlm.QueryGroupTask; +import org.opensearch.wlm.WorkloadGroupTask; import java.util.List; @@ -24,12 +24,12 @@ public class MemoryUsageCalculator extends ResourceUsageCalculator { private MemoryUsageCalculator() {} @Override - public double calculateResourceUsage(List tasks) { + public double calculateResourceUsage(List tasks) { return tasks.stream().mapToDouble(this::calculateTaskResourceUsage).sum(); } @Override - public double calculateTaskResourceUsage(QueryGroupTask task) { + public double calculateTaskResourceUsage(WorkloadGroupTask task) { return (1.0f * task.getTotalResourceUtilization(ResourceStats.MEMORY)) / HEAP_SIZE_BYTES; } } diff --git a/server/src/main/java/org/opensearch/wlm/tracker/QueryGroupResourceUsageTrackerService.java b/server/src/main/java/org/opensearch/wlm/tracker/QueryGroupResourceUsageTrackerService.java deleted file mode 100644 index 71cf3135781dd..0000000000000 --- a/server/src/main/java/org/opensearch/wlm/tracker/QueryGroupResourceUsageTrackerService.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.wlm.tracker; - -import org.opensearch.tasks.TaskResourceTrackingService; -import org.opensearch.wlm.QueryGroupLevelResourceUsageView; -import org.opensearch.wlm.QueryGroupTask; -import org.opensearch.wlm.ResourceType; - -import java.util.EnumMap; -import java.util.EnumSet; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -/** - * This class tracks resource usage per QueryGroup - */ -public class QueryGroupResourceUsageTrackerService { - public static final EnumSet TRACKED_RESOURCES = EnumSet.allOf(ResourceType.class); - private final TaskResourceTrackingService taskResourceTrackingService; - - /** - * QueryGroupResourceTrackerService constructor - * - * @param taskResourceTrackingService Service that helps track resource usage of tasks running on a node. - */ - public QueryGroupResourceUsageTrackerService(TaskResourceTrackingService taskResourceTrackingService) { - this.taskResourceTrackingService = taskResourceTrackingService; - } - - /** - * Constructs a map of QueryGroupLevelResourceUsageView instances for each QueryGroup. - * - * @return Map of QueryGroup views - */ - public Map constructQueryGroupLevelUsageViews() { - final Map> tasksByQueryGroup = getTasksGroupedByQueryGroup(); - final Map queryGroupViews = new HashMap<>(); - - // Iterate over each QueryGroup entry - for (Map.Entry> queryGroupEntry : tasksByQueryGroup.entrySet()) { - // refresh the resource stats - taskResourceTrackingService.refreshResourceStats(queryGroupEntry.getValue().toArray(new QueryGroupTask[0])); - // Compute the QueryGroup resource usage - final Map queryGroupUsage = new EnumMap<>(ResourceType.class); - for (ResourceType resourceType : TRACKED_RESOURCES) { - double usage = resourceType.getResourceUsageCalculator().calculateResourceUsage(queryGroupEntry.getValue()); - queryGroupUsage.put(resourceType, usage); - } - - // Add to the QueryGroup View - queryGroupViews.put( - queryGroupEntry.getKey(), - new QueryGroupLevelResourceUsageView(queryGroupUsage, queryGroupEntry.getValue()) - ); - } - return queryGroupViews; - } - - /** - * Groups tasks by their associated QueryGroup. - * - * @return Map of tasks grouped by QueryGroup - */ - private Map> getTasksGroupedByQueryGroup() { - return taskResourceTrackingService.getResourceAwareTasks() - .values() - .stream() - .filter(QueryGroupTask.class::isInstance) - .map(QueryGroupTask.class::cast) - .filter(QueryGroupTask::isQueryGroupSet) - .collect(Collectors.groupingBy(QueryGroupTask::getQueryGroupId, Collectors.mapping(task -> task, Collectors.toList()))); - } -} diff --git a/server/src/main/java/org/opensearch/wlm/tracker/ResourceUsageCalculator.java b/server/src/main/java/org/opensearch/wlm/tracker/ResourceUsageCalculator.java index bc8317cbfbf92..7ec1f531c8920 100644 --- a/server/src/main/java/org/opensearch/wlm/tracker/ResourceUsageCalculator.java +++ b/server/src/main/java/org/opensearch/wlm/tracker/ResourceUsageCalculator.java @@ -9,7 +9,7 @@ package org.opensearch.wlm.tracker; import org.opensearch.common.annotation.PublicApi; -import org.opensearch.wlm.QueryGroupTask; +import org.opensearch.wlm.WorkloadGroupTask; import java.util.List; @@ -23,12 +23,12 @@ public abstract class ResourceUsageCalculator { * * @param tasks list of tasks in the query group */ - public abstract double calculateResourceUsage(List tasks); + public abstract double calculateResourceUsage(List tasks); /** * calculates the task level resource usage - * @param task QueryGroupTask + * @param task WorkloadGroupTask * @return task level resource usage */ - public abstract double calculateTaskResourceUsage(QueryGroupTask task); + public abstract double calculateTaskResourceUsage(WorkloadGroupTask task); } diff --git a/server/src/main/java/org/opensearch/wlm/tracker/WorkloadGroupResourceUsageTrackerService.java b/server/src/main/java/org/opensearch/wlm/tracker/WorkloadGroupResourceUsageTrackerService.java new file mode 100644 index 0000000000000..a0d39e28f77f6 --- /dev/null +++ b/server/src/main/java/org/opensearch/wlm/tracker/WorkloadGroupResourceUsageTrackerService.java @@ -0,0 +1,82 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.wlm.tracker; + +import org.opensearch.tasks.TaskResourceTrackingService; +import org.opensearch.wlm.ResourceType; +import org.opensearch.wlm.WorkloadGroupLevelResourceUsageView; +import org.opensearch.wlm.WorkloadGroupTask; + +import java.util.EnumMap; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * This class tracks resource usage per WorkloadGroup + */ +public class WorkloadGroupResourceUsageTrackerService { + public static final EnumSet TRACKED_RESOURCES = EnumSet.allOf(ResourceType.class); + private final TaskResourceTrackingService taskResourceTrackingService; + + /** + * WorkloadGroupResourceTrackerService constructor + * + * @param taskResourceTrackingService Service that helps track resource usage of tasks running on a node. + */ + public WorkloadGroupResourceUsageTrackerService(TaskResourceTrackingService taskResourceTrackingService) { + this.taskResourceTrackingService = taskResourceTrackingService; + } + + /** + * Constructs a map of WorkloadGroupLevelResourceUsageView instances for each WorkloadGroup. + * + * @return Map of WorkloadGroup views + */ + public Map constructWorkloadGroupLevelUsageViews() { + final Map> tasksByWorkloadGroup = getTasksGroupedByWorkloadGroup(); + final Map workloadGroupViews = new HashMap<>(); + + // Iterate over each WorkloadGroup entry + for (Map.Entry> workloadGroupEntry : tasksByWorkloadGroup.entrySet()) { + // refresh the resource stats + taskResourceTrackingService.refreshResourceStats(workloadGroupEntry.getValue().toArray(new WorkloadGroupTask[0])); + // Compute the WorkloadGroup resource usage + final Map workloadGroupUsage = new EnumMap<>(ResourceType.class); + for (ResourceType resourceType : TRACKED_RESOURCES) { + double usage = resourceType.getResourceUsageCalculator().calculateResourceUsage(workloadGroupEntry.getValue()); + workloadGroupUsage.put(resourceType, usage); + } + + // Add to the WorkloadGroup View + workloadGroupViews.put( + workloadGroupEntry.getKey(), + new WorkloadGroupLevelResourceUsageView(workloadGroupUsage, workloadGroupEntry.getValue()) + ); + } + return workloadGroupViews; + } + + /** + * Groups tasks by their associated WorkloadGroup. + * + * @return Map of tasks grouped by WorkloadGroup + */ + private Map> getTasksGroupedByWorkloadGroup() { + return taskResourceTrackingService.getResourceAwareTasks() + .values() + .stream() + .filter(WorkloadGroupTask.class::isInstance) + .map(WorkloadGroupTask.class::cast) + .filter(WorkloadGroupTask::isWorkloadGroupSet) + .collect(Collectors.groupingBy(WorkloadGroupTask::getWorkloadGroupId, Collectors.mapping(task -> task, Collectors.toList()))); + } +} diff --git a/server/src/main/java/org/opensearch/wlm/tracker/package-info.java b/server/src/main/java/org/opensearch/wlm/tracker/package-info.java index 86efc99355d3d..afea8701d1de6 100644 --- a/server/src/main/java/org/opensearch/wlm/tracker/package-info.java +++ b/server/src/main/java/org/opensearch/wlm/tracker/package-info.java @@ -7,6 +7,6 @@ */ /** - * QueryGroup resource tracking artifacts + * WorkloadGroup resource tracking artifacts */ package org.opensearch.wlm.tracker; diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/wlm/WlmStatsResponseTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/wlm/WlmStatsResponseTests.java index 01dc033568a95..d1e13546935b2 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/wlm/WlmStatsResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/wlm/WlmStatsResponseTests.java @@ -17,8 +17,8 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.wlm.ResourceType; -import org.opensearch.wlm.stats.QueryGroupStats; import org.opensearch.wlm.stats.WlmStats; +import org.opensearch.wlm.stats.WorkloadGroupStats; import java.io.IOException; import java.util.ArrayList; @@ -29,7 +29,7 @@ public class WlmStatsResponseTests extends OpenSearchTestCase { ClusterName clusterName = new ClusterName("test-cluster"); - String testQueryGroupId = "safjgagnaeekg-3r3fads"; + String testWorkloadGroupId = "safjgagnaeekg-3r3fads"; DiscoveryNode node = new DiscoveryNode( "node-1", buildNewFakeTransportAddress(), @@ -37,45 +37,45 @@ public class WlmStatsResponseTests extends OpenSearchTestCase { Set.of(DiscoveryNodeRole.DATA_ROLE), Version.CURRENT ); - Map statsHolderMap = new HashMap<>(); - QueryGroupStats queryGroupStats = new QueryGroupStats( + Map statsHolderMap = new HashMap<>(); + WorkloadGroupStats workloadGroupStats = new WorkloadGroupStats( Map.of( - testQueryGroupId, - new QueryGroupStats.QueryGroupStatsHolder( + testWorkloadGroupId, + new WorkloadGroupStats.WorkloadGroupStatsHolder( 0, 0, 1, 0, Map.of( ResourceType.CPU, - new QueryGroupStats.ResourceStats(0, 0, 0), + new WorkloadGroupStats.ResourceStats(0, 0, 0), ResourceType.MEMORY, - new QueryGroupStats.ResourceStats(0, 0, 0) + new WorkloadGroupStats.ResourceStats(0, 0, 0) ) ) ) ); - WlmStats wlmStats = new WlmStats(node, queryGroupStats); + WlmStats wlmStats = new WlmStats(node, workloadGroupStats); List wlmStatsList = List.of(wlmStats); List failedNodeExceptionList = new ArrayList<>(); public void testSerializationAndDeserialization() throws IOException { - WlmStatsResponse queryGroupStatsResponse = new WlmStatsResponse(clusterName, wlmStatsList, failedNodeExceptionList); + WlmStatsResponse workloadGroupStatsResponse = new WlmStatsResponse(clusterName, wlmStatsList, failedNodeExceptionList); BytesStreamOutput out = new BytesStreamOutput(); - queryGroupStatsResponse.writeTo(out); + workloadGroupStatsResponse.writeTo(out); StreamInput in = out.bytes().streamInput(); WlmStatsResponse deserializedResponse = new WlmStatsResponse(in); - assertEquals(queryGroupStatsResponse.getClusterName(), deserializedResponse.getClusterName()); - assertEquals(queryGroupStatsResponse.getNodes().size(), deserializedResponse.getNodes().size()); + assertEquals(workloadGroupStatsResponse.getClusterName(), deserializedResponse.getClusterName()); + assertEquals(workloadGroupStatsResponse.getNodes().size(), deserializedResponse.getNodes().size()); } public void testToString() { - WlmStatsResponse queryGroupStatsResponse = new WlmStatsResponse(clusterName, wlmStatsList, failedNodeExceptionList); - String responseString = queryGroupStatsResponse.toString(); + WlmStatsResponse workloadGroupStatsResponse = new WlmStatsResponse(clusterName, wlmStatsList, failedNodeExceptionList); + String responseString = workloadGroupStatsResponse.toString(); assertEquals( "{\n" + " \"node-1\" : {\n" - + " \"query_groups\" : {\n" + + " \"workload_groups\" : {\n" + " \"safjgagnaeekg-3r3fads\" : {\n" + " \"total_completions\" : 0,\n" + " \"total_rejections\" : 0,\n" diff --git a/server/src/test/java/org/opensearch/action/support/nodes/TransportWlmStatsActionTests.java b/server/src/test/java/org/opensearch/action/support/nodes/TransportWlmStatsActionTests.java index 49d2cc4d23e62..1cff2cd44de52 100644 --- a/server/src/test/java/org/opensearch/action/support/nodes/TransportWlmStatsActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/nodes/TransportWlmStatsActionTests.java @@ -15,7 +15,7 @@ import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.test.transport.CapturingTransport; -import org.opensearch.wlm.QueryGroupService; +import org.opensearch.wlm.WorkloadGroupService; import java.io.IOException; import java.util.ArrayList; @@ -48,7 +48,7 @@ private Map> performWlmStatsAction(WlmStatsRequest THREAD_POOL, clusterService, transportService, - mock(QueryGroupService.class), + mock(WorkloadGroupService.class), new ActionFilters(Collections.emptySet()) ); PlainActionFuture listener = new PlainActionFuture<>(); diff --git a/server/src/test/java/org/opensearch/cluster/ClusterModuleTests.java b/server/src/test/java/org/opensearch/cluster/ClusterModuleTests.java index 8d2f2b743af9f..d590df6575680 100644 --- a/server/src/test/java/org/opensearch/cluster/ClusterModuleTests.java +++ b/server/src/test/java/org/opensearch/cluster/ClusterModuleTests.java @@ -33,8 +33,8 @@ package org.opensearch.cluster; import org.opensearch.cluster.metadata.Metadata; -import org.opensearch.cluster.metadata.QueryGroupMetadata; import org.opensearch.cluster.metadata.RepositoriesMetadata; +import org.opensearch.cluster.metadata.WorkloadGroupMetadata; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.allocation.ExistingShardsAllocator; import org.opensearch.cluster.routing.allocation.RoutingAllocation; @@ -330,18 +330,19 @@ public void testRejectsDuplicateExistingShardsAllocatorName() { ); } - public void testQueryGroupMetadataRegister() { + public void testWorkloadGroupMetadataRegister() { List customEntries = ClusterModule.getNamedWriteables(); List customXEntries = ClusterModule.getNamedXWriteables(); assertTrue( customEntries.stream() - .anyMatch(entry -> entry.categoryClass == Metadata.Custom.class && entry.name.equals(QueryGroupMetadata.TYPE)) + .anyMatch(entry -> entry.categoryClass == Metadata.Custom.class && entry.name.equals(WorkloadGroupMetadata.TYPE)) ); assertTrue( customXEntries.stream() .anyMatch( - entry -> entry.categoryClass == Metadata.Custom.class && entry.name.getPreferredName().equals(QueryGroupMetadata.TYPE) + entry -> entry.categoryClass == Metadata.Custom.class + && entry.name.getPreferredName().equals(WorkloadGroupMetadata.TYPE) ) ); } diff --git a/server/src/test/java/org/opensearch/cluster/metadata/QueryGroupTests.java b/server/src/test/java/org/opensearch/cluster/metadata/QueryGroupTests.java deleted file mode 100644 index ce1b1270fc94e..0000000000000 --- a/server/src/test/java/org/opensearch/cluster/metadata/QueryGroupTests.java +++ /dev/null @@ -1,199 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.cluster.metadata; - -import org.opensearch.common.UUIDs; -import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.test.AbstractSerializingTestCase; -import org.opensearch.wlm.MutableQueryGroupFragment; -import org.opensearch.wlm.MutableQueryGroupFragment.ResiliencyMode; -import org.opensearch.wlm.ResourceType; -import org.joda.time.Instant; - -import java.io.IOException; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -public class QueryGroupTests extends AbstractSerializingTestCase { - - private static final List allowedModes = List.of(ResiliencyMode.SOFT, ResiliencyMode.ENFORCED, ResiliencyMode.MONITOR); - - static QueryGroup createRandomQueryGroup(String _id) { - String name = randomAlphaOfLength(10); - Map resourceLimit = new HashMap<>(); - resourceLimit.put(ResourceType.MEMORY, randomDoubleBetween(0.0, 0.80, false)); - return new QueryGroup(name, _id, new MutableQueryGroupFragment(randomMode(), resourceLimit), Instant.now().getMillis()); - } - - private static ResiliencyMode randomMode() { - return allowedModes.get(randomIntBetween(0, allowedModes.size() - 1)); - } - - /** - * Parses to a new instance using the provided {@link XContentParser} - * - * @param parser - */ - @Override - protected QueryGroup doParseInstance(XContentParser parser) throws IOException { - return QueryGroup.fromXContent(parser); - } - - /** - * Returns a {@link Writeable.Reader} that can be used to de-serialize the instance - */ - @Override - protected Writeable.Reader instanceReader() { - return QueryGroup::new; - } - - /** - * Creates a random test instance to use in the tests. This method will be - * called multiple times during test execution and should return a different - * random instance each time it is called. - */ - @Override - protected QueryGroup createTestInstance() { - return createRandomQueryGroup("1232sfraeradf_"); - } - - public void testNullName() { - assertThrows( - NullPointerException.class, - () -> new QueryGroup( - null, - "_id", - new MutableQueryGroupFragment(randomMode(), Collections.emptyMap()), - Instant.now().getMillis() - ) - ); - } - - public void testNullId() { - assertThrows( - NullPointerException.class, - () -> new QueryGroup( - "Dummy", - null, - new MutableQueryGroupFragment(randomMode(), Collections.emptyMap()), - Instant.now().getMillis() - ) - ); - } - - public void testNullResourceLimits() { - assertThrows( - NullPointerException.class, - () -> new QueryGroup("analytics", "_id", new MutableQueryGroupFragment(randomMode(), null), Instant.now().getMillis()) - ); - } - - public void testEmptyResourceLimits() { - assertThrows( - IllegalArgumentException.class, - () -> new QueryGroup( - "analytics", - "_id", - new MutableQueryGroupFragment(randomMode(), Collections.emptyMap()), - Instant.now().getMillis() - ) - ); - } - - public void testIllegalQueryGroupMode() { - assertThrows( - NullPointerException.class, - () -> new QueryGroup( - "analytics", - "_id", - new MutableQueryGroupFragment(null, Map.of(ResourceType.MEMORY, 0.4)), - Instant.now().getMillis() - ) - ); - } - - public void testQueryGroupInitiation() { - QueryGroup queryGroup = new QueryGroup("analytics", new MutableQueryGroupFragment(randomMode(), Map.of(ResourceType.MEMORY, 0.4))); - assertNotNull(queryGroup.getName()); - assertNotNull(queryGroup.get_id()); - assertNotNull(queryGroup.getResourceLimits()); - assertFalse(queryGroup.getResourceLimits().isEmpty()); - assertEquals(1, queryGroup.getResourceLimits().size()); - assertTrue(allowedModes.contains(queryGroup.getResiliencyMode())); - assertTrue(queryGroup.getUpdatedAtInMillis() != 0); - } - - public void testIllegalQueryGroupName() { - assertThrows( - NullPointerException.class, - () -> new QueryGroup("a".repeat(51), "_id", new MutableQueryGroupFragment(), Instant.now().getMillis()) - ); - assertThrows( - NullPointerException.class, - () -> new QueryGroup("", "_id", new MutableQueryGroupFragment(), Instant.now().getMillis()) - ); - - } - - public void testInvalidResourceLimitWhenInvalidSystemResourceValueIsGiven() { - assertThrows( - IllegalArgumentException.class, - () -> new QueryGroup( - "analytics", - "_id", - new MutableQueryGroupFragment(randomMode(), Map.of(ResourceType.MEMORY, randomDoubleBetween(1.1, 1.8, false))), - Instant.now().getMillis() - ) - ); - } - - public void testValidQueryGroup() { - QueryGroup queryGroup = new QueryGroup( - "analytics", - "_id", - new MutableQueryGroupFragment(randomMode(), Map.of(ResourceType.MEMORY, randomDoubleBetween(0.01, 0.8, false))), - Instant.ofEpochMilli(1717187289).getMillis() - ); - - assertNotNull(queryGroup.getName()); - assertEquals("analytics", queryGroup.getName()); - assertNotNull(queryGroup.getResourceLimits()); - assertFalse(queryGroup.getResourceLimits().isEmpty()); - assertEquals(1, queryGroup.getResourceLimits().size()); - assertTrue(allowedModes.contains(queryGroup.getResiliencyMode())); - assertEquals(1717187289, queryGroup.getUpdatedAtInMillis()); - } - - public void testToXContent() throws IOException { - long currentTimeInMillis = Instant.now().getMillis(); - String queryGroupId = UUIDs.randomBase64UUID(); - QueryGroup queryGroup = new QueryGroup( - "TestQueryGroup", - queryGroupId, - new MutableQueryGroupFragment(ResiliencyMode.ENFORCED, Map.of(ResourceType.CPU, 0.30, ResourceType.MEMORY, 0.40)), - currentTimeInMillis - ); - XContentBuilder builder = JsonXContent.contentBuilder(); - queryGroup.toXContent(builder, ToXContent.EMPTY_PARAMS); - assertEquals( - "{\"_id\":\"" - + queryGroupId - + "\",\"name\":\"TestQueryGroup\",\"resiliency_mode\":\"enforced\",\"resource_limits\":{\"cpu\":0.3,\"memory\":0.4},\"updated_at\":" - + currentTimeInMillis - + "}", - builder.toString() - ); - } -} diff --git a/server/src/test/java/org/opensearch/cluster/metadata/QueryGroupMetadataTests.java b/server/src/test/java/org/opensearch/cluster/metadata/WorkloadGroupMetadataTests.java similarity index 59% rename from server/src/test/java/org/opensearch/cluster/metadata/QueryGroupMetadataTests.java rename to server/src/test/java/org/opensearch/cluster/metadata/WorkloadGroupMetadataTests.java index 3f8d231ffb91e..141cd9cf2e2c7 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/QueryGroupMetadataTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/WorkloadGroupMetadataTests.java @@ -15,33 +15,36 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.test.AbstractDiffableSerializationTestCase; -import org.opensearch.wlm.MutableQueryGroupFragment; +import org.opensearch.wlm.MutableWorkloadGroupFragment; import org.opensearch.wlm.ResourceType; import java.io.IOException; import java.util.Collections; import java.util.Map; -import static org.opensearch.cluster.metadata.QueryGroupTests.createRandomQueryGroup; +import static org.opensearch.cluster.metadata.WorkloadGroupTests.createRandomWorkloadGroup; -public class QueryGroupMetadataTests extends AbstractDiffableSerializationTestCase { +public class WorkloadGroupMetadataTests extends AbstractDiffableSerializationTestCase { public void testToXContent() throws IOException { long updatedAt = 1720047207; - QueryGroupMetadata queryGroupMetadata = new QueryGroupMetadata( + WorkloadGroupMetadata workloadGroupMetadata = new WorkloadGroupMetadata( Map.of( "ajakgakg983r92_4242", - new QueryGroup( + new WorkloadGroup( "test", "ajakgakg983r92_4242", - new MutableQueryGroupFragment(MutableQueryGroupFragment.ResiliencyMode.ENFORCED, Map.of(ResourceType.MEMORY, 0.5)), + new MutableWorkloadGroupFragment( + MutableWorkloadGroupFragment.ResiliencyMode.ENFORCED, + Map.of(ResourceType.MEMORY, 0.5) + ), updatedAt ) ) ); XContentBuilder builder = JsonXContent.contentBuilder(); builder.startObject(); - queryGroupMetadata.toXContent(builder, null); + workloadGroupMetadata.toXContent(builder, null); builder.endObject(); assertEquals( "{\"ajakgakg983r92_4242\":{\"_id\":\"ajakgakg983r92_4242\",\"name\":\"test\",\"resiliency_mode\":\"enforced\",\"resource_limits\":{\"memory\":0.5},\"updated_at\":1720047207}}", @@ -53,41 +56,40 @@ public void testToXContent() throws IOException { protected NamedWriteableRegistry getNamedWriteableRegistry() { return new NamedWriteableRegistry( Collections.singletonList( - new NamedWriteableRegistry.Entry(QueryGroupMetadata.class, QueryGroupMetadata.TYPE, QueryGroupMetadata::new) + new NamedWriteableRegistry.Entry(WorkloadGroupMetadata.class, WorkloadGroupMetadata.TYPE, WorkloadGroupMetadata::new) ) ); } @Override protected Metadata.Custom makeTestChanges(Metadata.Custom testInstance) { - final QueryGroup queryGroup = createRandomQueryGroup("asdfakgjwrir23r25"); - final QueryGroupMetadata queryGroupMetadata = new QueryGroupMetadata(Map.of(queryGroup.get_id(), queryGroup)); - return queryGroupMetadata; + final WorkloadGroup workloadGroup = createRandomWorkloadGroup("asdfakgjwrir23r25"); + return new WorkloadGroupMetadata(Map.of(workloadGroup.get_id(), workloadGroup)); } @Override protected Writeable.Reader> diffReader() { - return QueryGroupMetadata::readDiffFrom; + return WorkloadGroupMetadata::readDiffFrom; } @Override protected Metadata.Custom doParseInstance(XContentParser parser) throws IOException { - return QueryGroupMetadata.fromXContent(parser); + return WorkloadGroupMetadata.fromXContent(parser); } @Override protected Writeable.Reader instanceReader() { - return QueryGroupMetadata::new; + return WorkloadGroupMetadata::new; } @Override - protected QueryGroupMetadata createTestInstance() { - return new QueryGroupMetadata(getRandomQueryGroups()); + protected WorkloadGroupMetadata createTestInstance() { + return new WorkloadGroupMetadata(getRandomWorkloadGroups()); } - private Map getRandomQueryGroups() { - QueryGroup qg1 = createRandomQueryGroup("1243gsgsdgs"); - QueryGroup qg2 = createRandomQueryGroup("lkajga8080"); + private Map getRandomWorkloadGroups() { + WorkloadGroup qg1 = createRandomWorkloadGroup("1243gsgsdgs"); + WorkloadGroup qg2 = createRandomWorkloadGroup("lkajga8080"); return Map.of(qg1.get_id(), qg1, qg2.get_id(), qg2); } } diff --git a/server/src/test/java/org/opensearch/cluster/metadata/WorkloadGroupTests.java b/server/src/test/java/org/opensearch/cluster/metadata/WorkloadGroupTests.java new file mode 100644 index 0000000000000..5a5fd0fc0da0c --- /dev/null +++ b/server/src/test/java/org/opensearch/cluster/metadata/WorkloadGroupTests.java @@ -0,0 +1,202 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.metadata; + +import org.opensearch.common.UUIDs; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.test.AbstractSerializingTestCase; +import org.opensearch.wlm.MutableWorkloadGroupFragment; +import org.opensearch.wlm.MutableWorkloadGroupFragment.ResiliencyMode; +import org.opensearch.wlm.ResourceType; +import org.joda.time.Instant; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class WorkloadGroupTests extends AbstractSerializingTestCase { + + private static final List allowedModes = List.of(ResiliencyMode.SOFT, ResiliencyMode.ENFORCED, ResiliencyMode.MONITOR); + + static WorkloadGroup createRandomWorkloadGroup(String _id) { + String name = randomAlphaOfLength(10); + Map resourceLimit = new HashMap<>(); + resourceLimit.put(ResourceType.MEMORY, randomDoubleBetween(0.0, 0.80, false)); + return new WorkloadGroup(name, _id, new MutableWorkloadGroupFragment(randomMode(), resourceLimit), Instant.now().getMillis()); + } + + private static ResiliencyMode randomMode() { + return allowedModes.get(randomIntBetween(0, allowedModes.size() - 1)); + } + + /** + * Parses to a new instance using the provided {@link XContentParser} + * + * @param parser + */ + @Override + protected WorkloadGroup doParseInstance(XContentParser parser) throws IOException { + return WorkloadGroup.fromXContent(parser); + } + + /** + * Returns a {@link Writeable.Reader} that can be used to de-serialize the instance + */ + @Override + protected Writeable.Reader instanceReader() { + return WorkloadGroup::new; + } + + /** + * Creates a random test instance to use in the tests. This method will be + * called multiple times during test execution and should return a different + * random instance each time it is called. + */ + @Override + protected WorkloadGroup createTestInstance() { + return createRandomWorkloadGroup("1232sfraeradf_"); + } + + public void testNullName() { + assertThrows( + NullPointerException.class, + () -> new WorkloadGroup( + null, + "_id", + new MutableWorkloadGroupFragment(randomMode(), Collections.emptyMap()), + Instant.now().getMillis() + ) + ); + } + + public void testNullId() { + assertThrows( + NullPointerException.class, + () -> new WorkloadGroup( + "Dummy", + null, + new MutableWorkloadGroupFragment(randomMode(), Collections.emptyMap()), + Instant.now().getMillis() + ) + ); + } + + public void testNullResourceLimits() { + assertThrows( + NullPointerException.class, + () -> new WorkloadGroup("analytics", "_id", new MutableWorkloadGroupFragment(randomMode(), null), Instant.now().getMillis()) + ); + } + + public void testEmptyResourceLimits() { + assertThrows( + IllegalArgumentException.class, + () -> new WorkloadGroup( + "analytics", + "_id", + new MutableWorkloadGroupFragment(randomMode(), Collections.emptyMap()), + Instant.now().getMillis() + ) + ); + } + + public void testIllegalWorkloadGroupMode() { + assertThrows( + NullPointerException.class, + () -> new WorkloadGroup( + "analytics", + "_id", + new MutableWorkloadGroupFragment(null, Map.of(ResourceType.MEMORY, 0.4)), + Instant.now().getMillis() + ) + ); + } + + public void testWorkloadGroupInitiation() { + WorkloadGroup workloadGroup = new WorkloadGroup( + "analytics", + new MutableWorkloadGroupFragment(randomMode(), Map.of(ResourceType.MEMORY, 0.4)) + ); + assertNotNull(workloadGroup.getName()); + assertNotNull(workloadGroup.get_id()); + assertNotNull(workloadGroup.getResourceLimits()); + assertFalse(workloadGroup.getResourceLimits().isEmpty()); + assertEquals(1, workloadGroup.getResourceLimits().size()); + assertTrue(allowedModes.contains(workloadGroup.getResiliencyMode())); + assertTrue(workloadGroup.getUpdatedAtInMillis() != 0); + } + + public void testIllegalWorkloadGroupName() { + assertThrows( + NullPointerException.class, + () -> new WorkloadGroup("a".repeat(51), "_id", new MutableWorkloadGroupFragment(), Instant.now().getMillis()) + ); + assertThrows( + NullPointerException.class, + () -> new WorkloadGroup("", "_id", new MutableWorkloadGroupFragment(), Instant.now().getMillis()) + ); + + } + + public void testInvalidResourceLimitWhenInvalidSystemResourceValueIsGiven() { + assertThrows( + IllegalArgumentException.class, + () -> new WorkloadGroup( + "analytics", + "_id", + new MutableWorkloadGroupFragment(randomMode(), Map.of(ResourceType.MEMORY, randomDoubleBetween(1.1, 1.8, false))), + Instant.now().getMillis() + ) + ); + } + + public void testValidWorkloadGroup() { + WorkloadGroup workloadGroup = new WorkloadGroup( + "analytics", + "_id", + new MutableWorkloadGroupFragment(randomMode(), Map.of(ResourceType.MEMORY, randomDoubleBetween(0.01, 0.8, false))), + Instant.ofEpochMilli(1717187289).getMillis() + ); + + assertNotNull(workloadGroup.getName()); + assertEquals("analytics", workloadGroup.getName()); + assertNotNull(workloadGroup.getResourceLimits()); + assertFalse(workloadGroup.getResourceLimits().isEmpty()); + assertEquals(1, workloadGroup.getResourceLimits().size()); + assertTrue(allowedModes.contains(workloadGroup.getResiliencyMode())); + assertEquals(1717187289, workloadGroup.getUpdatedAtInMillis()); + } + + public void testToXContent() throws IOException { + long currentTimeInMillis = Instant.now().getMillis(); + String workloadGroupId = UUIDs.randomBase64UUID(); + WorkloadGroup workloadGroup = new WorkloadGroup( + "TestWorkloadGroup", + workloadGroupId, + new MutableWorkloadGroupFragment(ResiliencyMode.ENFORCED, Map.of(ResourceType.CPU, 0.30, ResourceType.MEMORY, 0.40)), + currentTimeInMillis + ); + XContentBuilder builder = JsonXContent.contentBuilder(); + workloadGroup.toXContent(builder, ToXContent.EMPTY_PARAMS); + assertEquals( + "{\"_id\":\"" + + workloadGroupId + + "\",\"name\":\"TestWorkloadGroup\",\"resiliency_mode\":\"enforced\",\"resource_limits\":{\"cpu\":0.3,\"memory\":0.4},\"updated_at\":" + + currentTimeInMillis + + "}", + builder.toString() + ); + } +} diff --git a/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java b/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java index 8c4685bee6a0b..8cb22201da1b6 100644 --- a/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java +++ b/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java @@ -39,9 +39,9 @@ import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; -import org.opensearch.wlm.QueryGroupService; -import org.opensearch.wlm.QueryGroupTask; import org.opensearch.wlm.ResourceType; +import org.opensearch.wlm.WorkloadGroupService; +import org.opensearch.wlm.WorkloadGroupTask; import org.junit.After; import org.junit.Before; @@ -77,12 +77,12 @@ public class SearchBackpressureServiceTests extends OpenSearchTestCase { MockTransportService transportService; TaskManager taskManager; ThreadPool threadPool; - QueryGroupService queryGroupService; + WorkloadGroupService workloadGroupService; @Before public void setup() { threadPool = new TestThreadPool(getClass().getName()); - queryGroupService = mock(QueryGroupService.class); + workloadGroupService = mock(WorkloadGroupService.class); transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, NoopTracer.INSTANCE); transportService.start(); transportService.acceptIncomingRequests(); @@ -125,10 +125,10 @@ public void testIsNodeInDuress() { new TaskResourceUsageTrackers(), new TaskResourceUsageTrackers(), taskManager, - queryGroupService + workloadGroupService ); - when(queryGroupService.shouldSBPHandle(any())).thenReturn(true); + when(workloadGroupService.shouldSBPHandle(any())).thenReturn(true); // Node not in duress. cpuUsage.set(0.0); @@ -171,10 +171,10 @@ public void testTrackerStateUpdateOnSearchTaskCompletion() { taskResourceUsageTrackers, new TaskResourceUsageTrackers(), taskManager, - queryGroupService + workloadGroupService ); - when(queryGroupService.shouldSBPHandle(any())).thenReturn(true); + when(workloadGroupService.shouldSBPHandle(any())).thenReturn(true); for (int i = 0; i < 100; i++) { // service.onTaskCompleted(new SearchTask(1, "test", "test", () -> "Test", TaskId.EMPTY_TASK_ID, new HashMap<>())); @@ -205,10 +205,10 @@ public void testTrackerStateUpdateOnSearchShardTaskCompletion() { new TaskResourceUsageTrackers(), taskResourceUsageTrackers, taskManager, - queryGroupService + workloadGroupService ); - when(queryGroupService.shouldSBPHandle(any())).thenReturn(true); + when(workloadGroupService.shouldSBPHandle(any())).thenReturn(true); // Record task completions to update the tracker state. Tasks other than SearchTask & SearchShardTask are ignored. service.onTaskCompleted(createMockTaskWithResourceStats(CancellableTask.class, 100, 200, 101)); @@ -260,10 +260,10 @@ public void testSearchTaskInFlightCancellation() { taskResourceUsageTrackers, new TaskResourceUsageTrackers(), mockTaskManager, - queryGroupService + workloadGroupService ); - when(queryGroupService.shouldSBPHandle(any())).thenReturn(true); + when(workloadGroupService.shouldSBPHandle(any())).thenReturn(true); // Run two iterations so that node is marked 'in duress' from the third iteration onwards. service.doRun(); @@ -277,7 +277,7 @@ public void testSearchTaskInFlightCancellation() { when(settings.getSearchTaskSettings()).thenReturn(searchTaskSettings); // Create a mix of low and high resource usage SearchTasks (50 low + 25 high resource usage tasks). - Map activeSearchTasks = new HashMap<>(); + Map activeSearchTasks = new HashMap<>(); for (long i = 0; i < 75; i++) { if (i % 3 == 0) { activeSearchTasks.put(i, createMockTaskWithResourceStats(SearchTask.class, 500, taskHeapUsageBytes, i)); @@ -285,7 +285,7 @@ public void testSearchTaskInFlightCancellation() { activeSearchTasks.put(i, createMockTaskWithResourceStats(SearchTask.class, 100, taskHeapUsageBytes, i)); } } - activeSearchTasks.values().forEach(task -> task.setQueryGroupId(threadPool.getThreadContext())); + activeSearchTasks.values().forEach(task -> task.setWorkloadGroupId(threadPool.getThreadContext())); doReturn(activeSearchTasks).when(mockTaskResourceTrackingService).getResourceAwareTasks(); // There are 25 SearchTasks eligible for cancellation but only 5 will be cancelled (burst limit). @@ -356,10 +356,10 @@ public void testSearchShardTaskInFlightCancellation() { new TaskResourceUsageTrackers(), taskResourceUsageTrackers, mockTaskManager, - queryGroupService + workloadGroupService ); - when(queryGroupService.shouldSBPHandle(any())).thenReturn(true); + when(workloadGroupService.shouldSBPHandle(any())).thenReturn(true); // Run two iterations so that node is marked 'in duress' from the third iteration onwards. service.doRun(); @@ -373,7 +373,7 @@ public void testSearchShardTaskInFlightCancellation() { when(settings.getSearchShardTaskSettings()).thenReturn(searchShardTaskSettings); // Create a mix of low and high resource usage tasks (60 low + 15 high resource usage tasks). - Map activeSearchShardTasks = new HashMap<>(); + Map activeSearchShardTasks = new HashMap<>(); for (long i = 0; i < 75; i++) { if (i % 5 == 0) { activeSearchShardTasks.put(i, createMockTaskWithResourceStats(SearchShardTask.class, 500, taskHeapUsageBytes, i)); @@ -381,7 +381,7 @@ public void testSearchShardTaskInFlightCancellation() { activeSearchShardTasks.put(i, createMockTaskWithResourceStats(SearchShardTask.class, 100, taskHeapUsageBytes, i)); } } - activeSearchShardTasks.values().forEach(task -> task.setQueryGroupId(threadPool.getThreadContext())); + activeSearchShardTasks.values().forEach(task -> task.setWorkloadGroupId(threadPool.getThreadContext())); doReturn(activeSearchShardTasks).when(mockTaskResourceTrackingService).getResourceAwareTasks(); // There are 15 SearchShardTasks eligible for cancellation but only 10 will be cancelled (burst limit). @@ -461,10 +461,10 @@ public void testNonCancellationOfHeapBasedTasksWhenHeapNotInDuress() { taskResourceUsageTrackers, new TaskResourceUsageTrackers(), mockTaskManager, - queryGroupService + workloadGroupService ); - when(queryGroupService.shouldSBPHandle(any())).thenReturn(true); + when(workloadGroupService.shouldSBPHandle(any())).thenReturn(true); service.doRun(); service.doRun(); @@ -475,7 +475,7 @@ public void testNonCancellationOfHeapBasedTasksWhenHeapNotInDuress() { when(settings.getSearchTaskSettings()).thenReturn(searchTaskSettings); // Create a mix of low and high resource usage tasks (60 low + 15 high resource usage tasks). - Map activeSearchTasks = new HashMap<>(); + Map activeSearchTasks = new HashMap<>(); for (long i = 0; i < 75; i++) { if (i % 5 == 0) { activeSearchTasks.put(i, createMockTaskWithResourceStats(SearchTask.class, 500, 800, i)); @@ -483,7 +483,7 @@ public void testNonCancellationOfHeapBasedTasksWhenHeapNotInDuress() { activeSearchTasks.put(i, createMockTaskWithResourceStats(SearchTask.class, 100, 800, i)); } } - activeSearchTasks.values().forEach(task -> task.setQueryGroupId(threadPool.getThreadContext())); + activeSearchTasks.values().forEach(task -> task.setWorkloadGroupId(threadPool.getThreadContext())); doReturn(activeSearchTasks).when(mockTaskResourceTrackingService).getResourceAwareTasks(); // this will trigger cancellation but these cancellation should only be cpu based @@ -560,11 +560,11 @@ public void testNonCancellationWhenSearchTrafficIsNotQualifyingForCancellation() taskResourceUsageTrackers, new TaskResourceUsageTrackers(), mockTaskManager, - queryGroupService + workloadGroupService ) ); - when(queryGroupService.shouldSBPHandle(any())).thenReturn(true); + when(workloadGroupService.shouldSBPHandle(any())).thenReturn(true); when(service.isHeapUsageDominatedBySearch(anyList(), anyDouble())).thenReturn(false); service.doRun(); @@ -576,16 +576,16 @@ public void testNonCancellationWhenSearchTrafficIsNotQualifyingForCancellation() when(settings.getSearchTaskSettings()).thenReturn(searchTaskSettings); // Create a mix of low and high resource usage tasks (60 low + 15 high resource usage tasks). - Map activeSearchTasks = new HashMap<>(); + Map activeSearchTasks = new HashMap<>(); for (long i = 0; i < 75; i++) { - Class taskType = randomBoolean() ? SearchTask.class : SearchShardTask.class; + Class taskType = randomBoolean() ? SearchTask.class : SearchShardTask.class; if (i % 5 == 0) { activeSearchTasks.put(i, createMockTaskWithResourceStats(taskType, 500, 800, i)); } else { activeSearchTasks.put(i, createMockTaskWithResourceStats(taskType, 100, 800, i)); } } - activeSearchTasks.values().forEach(task -> task.setQueryGroupId(threadPool.getThreadContext())); + activeSearchTasks.values().forEach(task -> task.setWorkloadGroupId(threadPool.getThreadContext())); doReturn(activeSearchTasks).when(mockTaskResourceTrackingService).getResourceAwareTasks(); // this will trigger cancellation but the cancellation should not happen as the node is not is duress because of search traffic diff --git a/server/src/test/java/org/opensearch/wlm/MutableQueryGroupFragmentTests.java b/server/src/test/java/org/opensearch/wlm/MutableWorkloadGroupFragmentTests.java similarity index 56% rename from server/src/test/java/org/opensearch/wlm/MutableQueryGroupFragmentTests.java rename to server/src/test/java/org/opensearch/wlm/MutableWorkloadGroupFragmentTests.java index cfe53ddbd2c14..321eee51e89d8 100644 --- a/server/src/test/java/org/opensearch/wlm/MutableQueryGroupFragmentTests.java +++ b/server/src/test/java/org/opensearch/wlm/MutableWorkloadGroupFragmentTests.java @@ -16,38 +16,38 @@ import java.util.HashMap; import java.util.Map; -public class MutableQueryGroupFragmentTests extends OpenSearchTestCase { +public class MutableWorkloadGroupFragmentTests extends OpenSearchTestCase { public void testSerializationDeserialization() throws IOException { Map resourceLimits = new HashMap<>(); resourceLimits.put(ResourceType.CPU, 0.5); resourceLimits.put(ResourceType.MEMORY, 0.75); - MutableQueryGroupFragment mutableQueryGroupFragment = new MutableQueryGroupFragment( - MutableQueryGroupFragment.ResiliencyMode.SOFT, + MutableWorkloadGroupFragment mutableWorkloadGroupFragment = new MutableWorkloadGroupFragment( + MutableWorkloadGroupFragment.ResiliencyMode.SOFT, resourceLimits ); BytesStreamOutput out = new BytesStreamOutput(); - mutableQueryGroupFragment.writeTo(out); + mutableWorkloadGroupFragment.writeTo(out); StreamInput in = out.bytes().streamInput(); - MutableQueryGroupFragment deserializedGroup = new MutableQueryGroupFragment(in); - assertEquals(mutableQueryGroupFragment, deserializedGroup); + MutableWorkloadGroupFragment deserializedGroup = new MutableWorkloadGroupFragment(in); + assertEquals(mutableWorkloadGroupFragment, deserializedGroup); } public void testSerializationDeserializationWithNull() throws IOException { - MutableQueryGroupFragment mutableQueryGroupFragment = new MutableQueryGroupFragment(); + MutableWorkloadGroupFragment mutableWorkloadGroupFragment = new MutableWorkloadGroupFragment(); BytesStreamOutput out = new BytesStreamOutput(); - mutableQueryGroupFragment.writeTo(out); + mutableWorkloadGroupFragment.writeTo(out); StreamInput in = out.bytes().streamInput(); - MutableQueryGroupFragment deserializedGroup = new MutableQueryGroupFragment(in); + MutableWorkloadGroupFragment deserializedGroup = new MutableWorkloadGroupFragment(in); assertEquals(0, deserializedGroup.getResourceLimits().size()); - assertEquals(mutableQueryGroupFragment.getResiliencyMode(), deserializedGroup.getResiliencyMode()); + assertEquals(mutableWorkloadGroupFragment.getResiliencyMode(), deserializedGroup.getResiliencyMode()); } public void testValidateResourceLimits() { Map invalidLimits = new HashMap<>(); invalidLimits.put(ResourceType.CPU, 1.5); Exception exception = assertThrows(IllegalArgumentException.class, () -> { - MutableQueryGroupFragment.validateResourceLimits(invalidLimits); + MutableWorkloadGroupFragment.validateResourceLimits(invalidLimits); }); String expectedMessage = "resource value should be greater than 0 and less or equal to 1.0"; String actualMessage = exception.getMessage(); @@ -55,12 +55,12 @@ public void testValidateResourceLimits() { } public void testSetMethodsWithNullAndEmptyValues() { - MutableQueryGroupFragment queryGroup = new MutableQueryGroupFragment(); - queryGroup.setResiliencyMode(null); - assertNull(queryGroup.getResiliencyMode()); - queryGroup.setResourceLimits(null); - assertNull(queryGroup.getResourceLimits()); - queryGroup.setResourceLimits(new HashMap<>()); - assertEquals(0, queryGroup.getResourceLimits().size()); + MutableWorkloadGroupFragment workloadGroup = new MutableWorkloadGroupFragment(); + workloadGroup.setResiliencyMode(null); + assertNull(workloadGroup.getResiliencyMode()); + workloadGroup.setResourceLimits(null); + assertNull(workloadGroup.getResourceLimits()); + workloadGroup.setResourceLimits(new HashMap<>()); + assertEquals(0, workloadGroup.getResourceLimits().size()); } } diff --git a/server/src/test/java/org/opensearch/wlm/QueryGroupServiceTests.java b/server/src/test/java/org/opensearch/wlm/QueryGroupServiceTests.java deleted file mode 100644 index 579d65846f69b..0000000000000 --- a/server/src/test/java/org/opensearch/wlm/QueryGroupServiceTests.java +++ /dev/null @@ -1,553 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.wlm; - -import org.opensearch.action.search.SearchTask; -import org.opensearch.cluster.ClusterChangedEvent; -import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.metadata.Metadata; -import org.opensearch.cluster.metadata.QueryGroup; -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; -import org.opensearch.search.backpressure.trackers.NodeDuressTrackers; -import org.opensearch.tasks.Task; -import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.threadpool.Scheduler; -import org.opensearch.threadpool.TestThreadPool; -import org.opensearch.threadpool.ThreadPool; -import org.opensearch.wlm.cancellation.QueryGroupTaskCancellationService; -import org.opensearch.wlm.cancellation.TaskSelectionStrategy; -import org.opensearch.wlm.stats.QueryGroupState; -import org.opensearch.wlm.tracker.QueryGroupResourceUsageTrackerService; - -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; -import java.util.function.BooleanSupplier; - -import org.mockito.ArgumentCaptor; -import org.mockito.Mockito; - -import static org.opensearch.wlm.tracker.ResourceUsageCalculatorTests.createMockTaskWithResourceStats; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -public class QueryGroupServiceTests extends OpenSearchTestCase { - public static final String QUERY_GROUP_ID = "queryGroupId1"; - private QueryGroupService queryGroupService; - private QueryGroupTaskCancellationService mockCancellationService; - private ClusterService mockClusterService; - private ThreadPool mockThreadPool; - private WorkloadManagementSettings mockWorkloadManagementSettings; - private Scheduler.Cancellable mockScheduledFuture; - private Map mockQueryGroupStateMap; - NodeDuressTrackers mockNodeDuressTrackers; - QueryGroupsStateAccessor mockQueryGroupsStateAccessor; - - public void setUp() throws Exception { - super.setUp(); - mockClusterService = Mockito.mock(ClusterService.class); - mockThreadPool = Mockito.mock(ThreadPool.class); - mockScheduledFuture = Mockito.mock(Scheduler.Cancellable.class); - mockWorkloadManagementSettings = Mockito.mock(WorkloadManagementSettings.class); - mockQueryGroupStateMap = new HashMap<>(); - mockNodeDuressTrackers = Mockito.mock(NodeDuressTrackers.class); - mockCancellationService = Mockito.mock(TestQueryGroupCancellationService.class); - mockQueryGroupsStateAccessor = new QueryGroupsStateAccessor(); - when(mockNodeDuressTrackers.isNodeInDuress()).thenReturn(false); - - queryGroupService = new QueryGroupService( - mockCancellationService, - mockClusterService, - mockThreadPool, - mockWorkloadManagementSettings, - mockNodeDuressTrackers, - mockQueryGroupsStateAccessor, - new HashSet<>(), - new HashSet<>() - ); - } - - public void tearDown() throws Exception { - super.tearDown(); - mockThreadPool.shutdown(); - } - - public void testClusterChanged() { - ClusterChangedEvent mockClusterChangedEvent = Mockito.mock(ClusterChangedEvent.class); - ClusterState mockPreviousClusterState = Mockito.mock(ClusterState.class); - ClusterState mockClusterState = Mockito.mock(ClusterState.class); - Metadata mockPreviousMetadata = Mockito.mock(Metadata.class); - Metadata mockMetadata = Mockito.mock(Metadata.class); - QueryGroup addedQueryGroup = new QueryGroup( - "addedQueryGroup", - "4242", - new MutableQueryGroupFragment(MutableQueryGroupFragment.ResiliencyMode.ENFORCED, Map.of(ResourceType.MEMORY, 0.5)), - 1L - ); - QueryGroup deletedQueryGroup = new QueryGroup( - "deletedQueryGroup", - "4241", - new MutableQueryGroupFragment(MutableQueryGroupFragment.ResiliencyMode.ENFORCED, Map.of(ResourceType.MEMORY, 0.5)), - 1L - ); - Map previousQueryGroups = new HashMap<>(); - previousQueryGroups.put("4242", addedQueryGroup); - Map currentQueryGroups = new HashMap<>(); - currentQueryGroups.put("4241", deletedQueryGroup); - - when(mockClusterChangedEvent.previousState()).thenReturn(mockPreviousClusterState); - when(mockClusterChangedEvent.state()).thenReturn(mockClusterState); - when(mockPreviousClusterState.metadata()).thenReturn(mockPreviousMetadata); - when(mockClusterState.metadata()).thenReturn(mockMetadata); - when(mockPreviousMetadata.queryGroups()).thenReturn(previousQueryGroups); - when(mockMetadata.queryGroups()).thenReturn(currentQueryGroups); - queryGroupService.clusterChanged(mockClusterChangedEvent); - - Set currentQueryGroupsExpected = Set.of(currentQueryGroups.get("4241")); - Set previousQueryGroupsExpected = Set.of(previousQueryGroups.get("4242")); - - assertEquals(currentQueryGroupsExpected, queryGroupService.getActiveQueryGroups()); - assertEquals(previousQueryGroupsExpected, queryGroupService.getDeletedQueryGroups()); - } - - public void testDoStart_SchedulesTask() { - when(mockWorkloadManagementSettings.getWlmMode()).thenReturn(WlmMode.ENABLED); - when(mockWorkloadManagementSettings.getQueryGroupServiceRunInterval()).thenReturn(TimeValue.timeValueSeconds(1)); - queryGroupService.doStart(); - Mockito.verify(mockThreadPool).scheduleWithFixedDelay(any(Runnable.class), any(TimeValue.class), eq(ThreadPool.Names.GENERIC)); - } - - public void testDoStop_CancelsScheduledTask() { - when(mockWorkloadManagementSettings.getWlmMode()).thenReturn(WlmMode.ENABLED); - when(mockThreadPool.scheduleWithFixedDelay(any(), any(), any())).thenReturn(mockScheduledFuture); - queryGroupService.doStart(); - queryGroupService.doStop(); - Mockito.verify(mockScheduledFuture).cancel(); - } - - public void testDoRun_WhenModeEnabled() { - when(mockWorkloadManagementSettings.getWlmMode()).thenReturn(WlmMode.ENABLED); - when(mockNodeDuressTrackers.isNodeInDuress()).thenReturn(true); - // Call the method - queryGroupService.doRun(); - - // Verify that refreshQueryGroups was called - - // Verify that cancelTasks was called with a BooleanSupplier - ArgumentCaptor booleanSupplierCaptor = ArgumentCaptor.forClass(BooleanSupplier.class); - Mockito.verify(mockCancellationService).cancelTasks(booleanSupplierCaptor.capture(), any(), any()); - - // Assert the behavior of the BooleanSupplier - BooleanSupplier capturedSupplier = booleanSupplierCaptor.getValue(); - assertTrue(capturedSupplier.getAsBoolean()); - - } - - public void testDoRun_WhenModeDisabled() { - when(mockWorkloadManagementSettings.getWlmMode()).thenReturn(WlmMode.DISABLED); - when(mockNodeDuressTrackers.isNodeInDuress()).thenReturn(false); - queryGroupService.doRun(); - // Verify that refreshQueryGroups was called - - Mockito.verify(mockCancellationService, never()).cancelTasks(any(), any(), any()); - - } - - public void testRejectIfNeeded_whenQueryGroupIdIsNullOrDefaultOne() { - QueryGroup testQueryGroup = new QueryGroup( - "testQueryGroup", - "queryGroupId1", - new MutableQueryGroupFragment(MutableQueryGroupFragment.ResiliencyMode.ENFORCED, Map.of(ResourceType.CPU, 0.10)), - 1L - ); - Set activeQueryGroups = new HashSet<>() { - { - add(testQueryGroup); - } - }; - mockQueryGroupStateMap = new HashMap<>(); - mockQueryGroupsStateAccessor = new QueryGroupsStateAccessor(mockQueryGroupStateMap); - mockQueryGroupStateMap.put("queryGroupId1", new QueryGroupState()); - - Map spyMap = spy(mockQueryGroupStateMap); - - queryGroupService = new QueryGroupService( - mockCancellationService, - mockClusterService, - mockThreadPool, - mockWorkloadManagementSettings, - mockNodeDuressTrackers, - mockQueryGroupsStateAccessor, - activeQueryGroups, - new HashSet<>() - ); - queryGroupService.rejectIfNeeded(null); - - verify(spyMap, never()).get(any()); - - queryGroupService.rejectIfNeeded(QueryGroupTask.DEFAULT_QUERY_GROUP_ID_SUPPLIER.get()); - verify(spyMap, never()).get(any()); - } - - public void testRejectIfNeeded_whenSoftModeQueryGroupIsContendedAndNodeInDuress() { - Set activeQueryGroups = getActiveQueryGroups( - "testQueryGroup", - QUERY_GROUP_ID, - MutableQueryGroupFragment.ResiliencyMode.SOFT, - Map.of(ResourceType.CPU, 0.10) - ); - mockQueryGroupStateMap = new HashMap<>(); - mockQueryGroupStateMap.put("queryGroupId1", new QueryGroupState()); - QueryGroupState state = new QueryGroupState(); - QueryGroupState.ResourceTypeState cpuResourceState = new QueryGroupState.ResourceTypeState(ResourceType.CPU); - cpuResourceState.setLastRecordedUsage(0.10); - state.getResourceState().put(ResourceType.CPU, cpuResourceState); - QueryGroupState spyState = spy(state); - mockQueryGroupStateMap.put(QUERY_GROUP_ID, spyState); - - mockQueryGroupsStateAccessor = new QueryGroupsStateAccessor(mockQueryGroupStateMap); - - queryGroupService = new QueryGroupService( - mockCancellationService, - mockClusterService, - mockThreadPool, - mockWorkloadManagementSettings, - mockNodeDuressTrackers, - mockQueryGroupsStateAccessor, - activeQueryGroups, - new HashSet<>() - ); - when(mockWorkloadManagementSettings.getWlmMode()).thenReturn(WlmMode.ENABLED); - when(mockNodeDuressTrackers.isNodeInDuress()).thenReturn(true); - assertThrows(OpenSearchRejectedExecutionException.class, () -> queryGroupService.rejectIfNeeded("queryGroupId1")); - } - - public void testRejectIfNeeded_whenQueryGroupIsSoftMode() { - Set activeQueryGroups = getActiveQueryGroups( - "testQueryGroup", - QUERY_GROUP_ID, - MutableQueryGroupFragment.ResiliencyMode.SOFT, - Map.of(ResourceType.CPU, 0.10) - ); - mockQueryGroupStateMap = new HashMap<>(); - QueryGroupState spyState = spy(new QueryGroupState()); - mockQueryGroupStateMap.put("queryGroupId1", spyState); - - mockQueryGroupsStateAccessor = new QueryGroupsStateAccessor(mockQueryGroupStateMap); - - queryGroupService = new QueryGroupService( - mockCancellationService, - mockClusterService, - mockThreadPool, - mockWorkloadManagementSettings, - mockNodeDuressTrackers, - mockQueryGroupsStateAccessor, - activeQueryGroups, - new HashSet<>() - ); - queryGroupService.rejectIfNeeded("queryGroupId1"); - - verify(spyState, never()).getResourceState(); - } - - public void testRejectIfNeeded_whenQueryGroupIsEnforcedMode_andNotBreaching() { - QueryGroup testQueryGroup = getQueryGroup( - "testQueryGroup", - "queryGroupId1", - MutableQueryGroupFragment.ResiliencyMode.ENFORCED, - Map.of(ResourceType.CPU, 0.10) - ); - QueryGroup spuQueryGroup = spy(testQueryGroup); - Set activeQueryGroups = new HashSet<>() { - { - add(spuQueryGroup); - } - }; - mockQueryGroupStateMap = new HashMap<>(); - QueryGroupState queryGroupState = new QueryGroupState(); - queryGroupState.getResourceState().get(ResourceType.CPU).setLastRecordedUsage(0.05); - - mockQueryGroupStateMap.put("queryGroupId1", queryGroupState); - - mockQueryGroupsStateAccessor = new QueryGroupsStateAccessor(mockQueryGroupStateMap); - - queryGroupService = new QueryGroupService( - mockCancellationService, - mockClusterService, - mockThreadPool, - mockWorkloadManagementSettings, - mockNodeDuressTrackers, - mockQueryGroupsStateAccessor, - activeQueryGroups, - new HashSet<>() - ); - when(mockWorkloadManagementSettings.getWlmMode()).thenReturn(WlmMode.ENABLED); - when(mockWorkloadManagementSettings.getNodeLevelCpuRejectionThreshold()).thenReturn(0.8); - queryGroupService.rejectIfNeeded("queryGroupId1"); - - // verify the check to compare the current usage and limit - // this should happen 3 times => 2 to check whether the resource limit has the TRACKED resource type and 1 to get the value - verify(spuQueryGroup, times(3)).getResourceLimits(); - assertEquals(0, queryGroupState.getResourceState().get(ResourceType.CPU).rejections.count()); - assertEquals(0, queryGroupState.totalRejections.count()); - } - - public void testRejectIfNeeded_whenQueryGroupIsEnforcedMode_andBreaching() { - QueryGroup testQueryGroup = new QueryGroup( - "testQueryGroup", - "queryGroupId1", - new MutableQueryGroupFragment( - MutableQueryGroupFragment.ResiliencyMode.ENFORCED, - Map.of(ResourceType.CPU, 0.10, ResourceType.MEMORY, 0.10) - ), - 1L - ); - QueryGroup spuQueryGroup = spy(testQueryGroup); - Set activeQueryGroups = new HashSet<>() { - { - add(spuQueryGroup); - } - }; - mockQueryGroupStateMap = new HashMap<>(); - QueryGroupState queryGroupState = new QueryGroupState(); - queryGroupState.getResourceState().get(ResourceType.CPU).setLastRecordedUsage(0.18); - queryGroupState.getResourceState().get(ResourceType.MEMORY).setLastRecordedUsage(0.18); - QueryGroupState spyState = spy(queryGroupState); - - mockQueryGroupsStateAccessor = new QueryGroupsStateAccessor(mockQueryGroupStateMap); - - mockQueryGroupStateMap.put("queryGroupId1", spyState); - - queryGroupService = new QueryGroupService( - mockCancellationService, - mockClusterService, - mockThreadPool, - mockWorkloadManagementSettings, - mockNodeDuressTrackers, - mockQueryGroupsStateAccessor, - activeQueryGroups, - new HashSet<>() - ); - when(mockWorkloadManagementSettings.getWlmMode()).thenReturn(WlmMode.ENABLED); - assertThrows(OpenSearchRejectedExecutionException.class, () -> queryGroupService.rejectIfNeeded("queryGroupId1")); - - // verify the check to compare the current usage and limit - // this should happen 3 times => 1 to check whether the resource limit has the TRACKED resource type and 1 to get the value - // because it will break out of the loop since the limits are breached - verify(spuQueryGroup, times(2)).getResourceLimits(); - assertEquals( - 1, - queryGroupState.getResourceState().get(ResourceType.CPU).rejections.count() + queryGroupState.getResourceState() - .get(ResourceType.MEMORY).rejections.count() - ); - assertEquals(1, queryGroupState.totalRejections.count()); - } - - public void testRejectIfNeeded_whenFeatureIsNotEnabled() { - QueryGroup testQueryGroup = new QueryGroup( - "testQueryGroup", - "queryGroupId1", - new MutableQueryGroupFragment(MutableQueryGroupFragment.ResiliencyMode.ENFORCED, Map.of(ResourceType.CPU, 0.10)), - 1L - ); - Set activeQueryGroups = new HashSet<>() { - { - add(testQueryGroup); - } - }; - mockQueryGroupStateMap = new HashMap<>(); - mockQueryGroupStateMap.put("queryGroupId1", new QueryGroupState()); - - Map spyMap = spy(mockQueryGroupStateMap); - - mockQueryGroupsStateAccessor = new QueryGroupsStateAccessor(mockQueryGroupStateMap); - - queryGroupService = new QueryGroupService( - mockCancellationService, - mockClusterService, - mockThreadPool, - mockWorkloadManagementSettings, - mockNodeDuressTrackers, - mockQueryGroupsStateAccessor, - activeQueryGroups, - new HashSet<>() - ); - when(mockWorkloadManagementSettings.getWlmMode()).thenReturn(WlmMode.DISABLED); - - queryGroupService.rejectIfNeeded(testQueryGroup.get_id()); - verify(spyMap, never()).get(any()); - } - - public void testOnTaskCompleted() { - Task task = new SearchTask(12, "", "", () -> "", null, null); - mockThreadPool = new TestThreadPool("queryGroupServiceTests"); - mockThreadPool.getThreadContext().putHeader(QueryGroupTask.QUERY_GROUP_ID_HEADER, "testId"); - QueryGroupState queryGroupState = new QueryGroupState(); - mockQueryGroupStateMap.put("testId", queryGroupState); - mockQueryGroupsStateAccessor = new QueryGroupsStateAccessor(mockQueryGroupStateMap); - queryGroupService = new QueryGroupService( - mockCancellationService, - mockClusterService, - mockThreadPool, - mockWorkloadManagementSettings, - mockNodeDuressTrackers, - mockQueryGroupsStateAccessor, - new HashSet<>() { - { - add( - new QueryGroup( - "testQueryGroup", - "testId", - new MutableQueryGroupFragment( - MutableQueryGroupFragment.ResiliencyMode.ENFORCED, - Map.of(ResourceType.CPU, 0.10, ResourceType.MEMORY, 0.10) - ), - 1L - ) - ); - } - }, - new HashSet<>() - ); - - ((QueryGroupTask) task).setQueryGroupId(mockThreadPool.getThreadContext()); - queryGroupService.onTaskCompleted(task); - - assertEquals(1, queryGroupState.totalCompletions.count()); - - // test non QueryGroupTask - task = new Task(1, "simple", "test", "mock task", null, null); - queryGroupService.onTaskCompleted(task); - - // It should still be 1 - assertEquals(1, queryGroupState.totalCompletions.count()); - - mockThreadPool.shutdown(); - } - - public void testShouldSBPHandle() { - SearchTask task = createMockTaskWithResourceStats(SearchTask.class, 100, 200, 0, 12); - QueryGroupState queryGroupState = new QueryGroupState(); - Set activeQueryGroups = new HashSet<>(); - mockQueryGroupStateMap.put("testId", queryGroupState); - mockQueryGroupsStateAccessor = new QueryGroupsStateAccessor(mockQueryGroupStateMap); - queryGroupService = new QueryGroupService( - mockCancellationService, - mockClusterService, - mockThreadPool, - mockWorkloadManagementSettings, - mockNodeDuressTrackers, - mockQueryGroupsStateAccessor, - activeQueryGroups, - Collections.emptySet() - ); - - when(mockWorkloadManagementSettings.getWlmMode()).thenReturn(WlmMode.ENABLED); - - // Default queryGroupId - mockThreadPool = new TestThreadPool("queryGroupServiceTests"); - mockThreadPool.getThreadContext() - .putHeader(QueryGroupTask.QUERY_GROUP_ID_HEADER, QueryGroupTask.DEFAULT_QUERY_GROUP_ID_SUPPLIER.get()); - // we haven't set the queryGroupId yet SBP should still track the task for cancellation - assertTrue(queryGroupService.shouldSBPHandle(task)); - task.setQueryGroupId(mockThreadPool.getThreadContext()); - assertTrue(queryGroupService.shouldSBPHandle(task)); - - mockThreadPool.shutdownNow(); - - // invalid queryGroup task - mockThreadPool = new TestThreadPool("queryGroupServiceTests"); - mockThreadPool.getThreadContext().putHeader(QueryGroupTask.QUERY_GROUP_ID_HEADER, "testId"); - task.setQueryGroupId(mockThreadPool.getThreadContext()); - assertTrue(queryGroupService.shouldSBPHandle(task)); - - // Valid query group task but wlm not enabled - when(mockWorkloadManagementSettings.getWlmMode()).thenReturn(WlmMode.DISABLED); - activeQueryGroups.add( - new QueryGroup( - "testQueryGroup", - "testId", - new MutableQueryGroupFragment( - MutableQueryGroupFragment.ResiliencyMode.ENFORCED, - Map.of(ResourceType.CPU, 0.10, ResourceType.MEMORY, 0.10) - ), - 1L - ) - ); - assertTrue(queryGroupService.shouldSBPHandle(task)); - - mockThreadPool.shutdownNow(); - - // test the case when SBP should not track the task - when(mockWorkloadManagementSettings.getWlmMode()).thenReturn(WlmMode.ENABLED); - task = new SearchTask(1, "", "test", () -> "", null, null); - mockThreadPool = new TestThreadPool("queryGroupServiceTests"); - mockThreadPool.getThreadContext().putHeader(QueryGroupTask.QUERY_GROUP_ID_HEADER, "testId"); - task.setQueryGroupId(mockThreadPool.getThreadContext()); - assertFalse(queryGroupService.shouldSBPHandle(task)); - } - - private static Set getActiveQueryGroups( - String name, - String id, - MutableQueryGroupFragment.ResiliencyMode mode, - Map resourceLimits - ) { - QueryGroup testQueryGroup = getQueryGroup(name, id, mode, resourceLimits); - Set activeQueryGroups = new HashSet<>() { - { - add(testQueryGroup); - } - }; - return activeQueryGroups; - } - - private static QueryGroup getQueryGroup( - String name, - String id, - MutableQueryGroupFragment.ResiliencyMode mode, - Map resourceLimits - ) { - QueryGroup testQueryGroup = new QueryGroup(name, id, new MutableQueryGroupFragment(mode, resourceLimits), 1L); - return testQueryGroup; - } - - // This is needed to test the behavior of QueryGroupService#doRun method - static class TestQueryGroupCancellationService extends QueryGroupTaskCancellationService { - public TestQueryGroupCancellationService( - WorkloadManagementSettings workloadManagementSettings, - TaskSelectionStrategy taskSelectionStrategy, - QueryGroupResourceUsageTrackerService resourceUsageTrackerService, - QueryGroupsStateAccessor queryGroupsStateAccessor, - Collection activeQueryGroups, - Collection deletedQueryGroups - ) { - super(workloadManagementSettings, taskSelectionStrategy, resourceUsageTrackerService, queryGroupsStateAccessor); - } - - @Override - public void cancelTasks( - BooleanSupplier isNodeInDuress, - Collection activeQueryGroups, - Collection deletedQueryGroups - ) { - - } - } -} diff --git a/server/src/test/java/org/opensearch/wlm/QueryGroupTaskTests.java b/server/src/test/java/org/opensearch/wlm/QueryGroupTaskTests.java deleted file mode 100644 index d292809c30124..0000000000000 --- a/server/src/test/java/org/opensearch/wlm/QueryGroupTaskTests.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.wlm; - -import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.threadpool.TestThreadPool; -import org.opensearch.threadpool.ThreadPool; - -import java.util.Collections; - -import static org.opensearch.wlm.QueryGroupTask.DEFAULT_QUERY_GROUP_ID_SUPPLIER; -import static org.opensearch.wlm.QueryGroupTask.QUERY_GROUP_ID_HEADER; - -public class QueryGroupTaskTests extends OpenSearchTestCase { - private ThreadPool threadPool; - private QueryGroupTask sut; - - public void setUp() throws Exception { - super.setUp(); - threadPool = new TestThreadPool(getTestName()); - sut = new QueryGroupTask(123, "transport", "Search", "test task", null, Collections.emptyMap()); - } - - public void tearDown() throws Exception { - super.tearDown(); - threadPool.shutdown(); - } - - public void testSuccessfulSetQueryGroupId() { - sut.setQueryGroupId(threadPool.getThreadContext()); - assertEquals(DEFAULT_QUERY_GROUP_ID_SUPPLIER.get(), sut.getQueryGroupId()); - - threadPool.getThreadContext().putHeader(QUERY_GROUP_ID_HEADER, "akfanglkaglknag2332"); - - sut.setQueryGroupId(threadPool.getThreadContext()); - assertEquals("akfanglkaglknag2332", sut.getQueryGroupId()); - } -} diff --git a/server/src/test/java/org/opensearch/wlm/QueryGroupThreadContextStatePropagatorTests.java b/server/src/test/java/org/opensearch/wlm/QueryGroupThreadContextStatePropagatorTests.java deleted file mode 100644 index ad5d7f569a56e..0000000000000 --- a/server/src/test/java/org/opensearch/wlm/QueryGroupThreadContextStatePropagatorTests.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.wlm; - -import org.opensearch.test.OpenSearchTestCase; - -import java.util.Map; - -public class QueryGroupThreadContextStatePropagatorTests extends OpenSearchTestCase { - - public void testTransients() { - QueryGroupThreadContextStatePropagator sut = new QueryGroupThreadContextStatePropagator(); - Map source = Map.of("queryGroupId", "adgarja0r235te"); - Map transients = sut.transients(source); - assertEquals("adgarja0r235te", transients.get("queryGroupId")); - } - - public void testHeaders() { - QueryGroupThreadContextStatePropagator sut = new QueryGroupThreadContextStatePropagator(); - Map source = Map.of("queryGroupId", "adgarja0r235te"); - Map headers = sut.headers(source); - assertEquals("adgarja0r235te", headers.get("queryGroupId")); - } -} diff --git a/server/src/test/java/org/opensearch/wlm/QueryGroupLevelResourceUsageViewTests.java b/server/src/test/java/org/opensearch/wlm/WorkloadGroupLevelResourceUsageViewTests.java similarity index 71% rename from server/src/test/java/org/opensearch/wlm/QueryGroupLevelResourceUsageViewTests.java rename to server/src/test/java/org/opensearch/wlm/WorkloadGroupLevelResourceUsageViewTests.java index 0c7eb721806d5..957fda7b6a09f 100644 --- a/server/src/test/java/org/opensearch/wlm/QueryGroupLevelResourceUsageViewTests.java +++ b/server/src/test/java/org/opensearch/wlm/WorkloadGroupLevelResourceUsageViewTests.java @@ -14,15 +14,15 @@ import java.util.List; import java.util.Map; -import static org.opensearch.wlm.cancellation.QueryGroupTaskCancellationService.MIN_VALUE; +import static org.opensearch.wlm.cancellation.WorkloadGroupTaskCancellationService.MIN_VALUE; import static org.opensearch.wlm.tracker.CpuUsageCalculator.PROCESSOR_COUNT; import static org.opensearch.wlm.tracker.MemoryUsageCalculator.HEAP_SIZE_BYTES; import static org.opensearch.wlm.tracker.ResourceUsageCalculatorTests.createMockTaskWithResourceStats; import static org.mockito.Mockito.mock; -public class QueryGroupLevelResourceUsageViewTests extends OpenSearchTestCase { +public class WorkloadGroupLevelResourceUsageViewTests extends OpenSearchTestCase { Map resourceUsage; - List activeTasks; + List activeTasks; ResourceUsageCalculatorTrackerServiceTests.TestClock clock; WorkloadManagementSettings settings; @@ -30,7 +30,7 @@ public void setUp() throws Exception { super.setUp(); settings = mock(WorkloadManagementSettings.class); clock = new ResourceUsageCalculatorTrackerServiceTests.TestClock(); - activeTasks = List.of(createMockTaskWithResourceStats(QueryGroupTask.class, 100, 200, 0, 1)); + activeTasks = List.of(createMockTaskWithResourceStats(WorkloadGroupTask.class, 100, 200, 0, 1)); clock.fastForwardBy(300); double memoryUsage = 200.0 / HEAP_SIZE_BYTES; double cpuUsage = 100.0 / (PROCESSOR_COUNT * 300.0); @@ -39,20 +39,20 @@ public void setUp() throws Exception { } public void testGetResourceUsageData() { - QueryGroupLevelResourceUsageView queryGroupLevelResourceUsageView = new QueryGroupLevelResourceUsageView( + WorkloadGroupLevelResourceUsageView workloadGroupLevelResourceUsageView = new WorkloadGroupLevelResourceUsageView( resourceUsage, activeTasks ); - Map resourceUsageData = queryGroupLevelResourceUsageView.getResourceUsageData(); + Map resourceUsageData = workloadGroupLevelResourceUsageView.getResourceUsageData(); assertTrue(assertResourceUsageData(resourceUsageData)); } public void testGetActiveTasks() { - QueryGroupLevelResourceUsageView queryGroupLevelResourceUsageView = new QueryGroupLevelResourceUsageView( + WorkloadGroupLevelResourceUsageView workloadGroupLevelResourceUsageView = new WorkloadGroupLevelResourceUsageView( resourceUsage, activeTasks ); - List activeTasks = queryGroupLevelResourceUsageView.getActiveTasks(); + List activeTasks = workloadGroupLevelResourceUsageView.getActiveTasks(); assertEquals(1, activeTasks.size()); assertEquals(1, activeTasks.get(0).getId()); } diff --git a/server/src/test/java/org/opensearch/wlm/WorkloadGroupServiceTests.java b/server/src/test/java/org/opensearch/wlm/WorkloadGroupServiceTests.java new file mode 100644 index 0000000000000..4784fe6cedf5a --- /dev/null +++ b/server/src/test/java/org/opensearch/wlm/WorkloadGroupServiceTests.java @@ -0,0 +1,553 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.wlm; + +import org.opensearch.action.search.SearchTask; +import org.opensearch.cluster.ClusterChangedEvent; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.WorkloadGroup; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.search.backpressure.trackers.NodeDuressTrackers; +import org.opensearch.tasks.Task; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.Scheduler; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.wlm.cancellation.TaskSelectionStrategy; +import org.opensearch.wlm.cancellation.WorkloadGroupTaskCancellationService; +import org.opensearch.wlm.stats.WorkloadGroupState; +import org.opensearch.wlm.tracker.WorkloadGroupResourceUsageTrackerService; + +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.function.BooleanSupplier; + +import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; + +import static org.opensearch.wlm.tracker.ResourceUsageCalculatorTests.createMockTaskWithResourceStats; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class WorkloadGroupServiceTests extends OpenSearchTestCase { + public static final String WORKLOAD_GROUP_ID = "workloadGroupId1"; + private WorkloadGroupService workloadGroupService; + private WorkloadGroupTaskCancellationService mockCancellationService; + private ClusterService mockClusterService; + private ThreadPool mockThreadPool; + private WorkloadManagementSettings mockWorkloadManagementSettings; + private Scheduler.Cancellable mockScheduledFuture; + private Map mockWorkloadGroupStateMap; + NodeDuressTrackers mockNodeDuressTrackers; + WorkloadGroupsStateAccessor mockWorkloadGroupsStateAccessor; + + public void setUp() throws Exception { + super.setUp(); + mockClusterService = Mockito.mock(ClusterService.class); + mockThreadPool = Mockito.mock(ThreadPool.class); + mockScheduledFuture = Mockito.mock(Scheduler.Cancellable.class); + mockWorkloadManagementSettings = Mockito.mock(WorkloadManagementSettings.class); + mockWorkloadGroupStateMap = new HashMap<>(); + mockNodeDuressTrackers = Mockito.mock(NodeDuressTrackers.class); + mockCancellationService = Mockito.mock(TestWorkloadGroupCancellationService.class); + mockWorkloadGroupsStateAccessor = new WorkloadGroupsStateAccessor(); + when(mockNodeDuressTrackers.isNodeInDuress()).thenReturn(false); + + workloadGroupService = new WorkloadGroupService( + mockCancellationService, + mockClusterService, + mockThreadPool, + mockWorkloadManagementSettings, + mockNodeDuressTrackers, + mockWorkloadGroupsStateAccessor, + new HashSet<>(), + new HashSet<>() + ); + } + + public void tearDown() throws Exception { + super.tearDown(); + mockThreadPool.shutdown(); + } + + public void testClusterChanged() { + ClusterChangedEvent mockClusterChangedEvent = Mockito.mock(ClusterChangedEvent.class); + ClusterState mockPreviousClusterState = Mockito.mock(ClusterState.class); + ClusterState mockClusterState = Mockito.mock(ClusterState.class); + Metadata mockPreviousMetadata = Mockito.mock(Metadata.class); + Metadata mockMetadata = Mockito.mock(Metadata.class); + WorkloadGroup addedWorkloadGroup = new WorkloadGroup( + "addedWorkloadGroup", + "4242", + new MutableWorkloadGroupFragment(MutableWorkloadGroupFragment.ResiliencyMode.ENFORCED, Map.of(ResourceType.MEMORY, 0.5)), + 1L + ); + WorkloadGroup deletedWorkloadGroup = new WorkloadGroup( + "deletedWorkloadGroup", + "4241", + new MutableWorkloadGroupFragment(MutableWorkloadGroupFragment.ResiliencyMode.ENFORCED, Map.of(ResourceType.MEMORY, 0.5)), + 1L + ); + Map previousWorkloadGroups = new HashMap<>(); + previousWorkloadGroups.put("4242", addedWorkloadGroup); + Map currentWorkloadGroups = new HashMap<>(); + currentWorkloadGroups.put("4241", deletedWorkloadGroup); + + when(mockClusterChangedEvent.previousState()).thenReturn(mockPreviousClusterState); + when(mockClusterChangedEvent.state()).thenReturn(mockClusterState); + when(mockPreviousClusterState.metadata()).thenReturn(mockPreviousMetadata); + when(mockClusterState.metadata()).thenReturn(mockMetadata); + when(mockPreviousMetadata.workloadGroups()).thenReturn(previousWorkloadGroups); + when(mockMetadata.workloadGroups()).thenReturn(currentWorkloadGroups); + workloadGroupService.clusterChanged(mockClusterChangedEvent); + + Set currentWorkloadGroupsExpected = Set.of(currentWorkloadGroups.get("4241")); + Set previousWorkloadGroupsExpected = Set.of(previousWorkloadGroups.get("4242")); + + assertEquals(currentWorkloadGroupsExpected, workloadGroupService.getActiveWorkloadGroups()); + assertEquals(previousWorkloadGroupsExpected, workloadGroupService.getDeletedWorkloadGroups()); + } + + public void testDoStart_SchedulesTask() { + when(mockWorkloadManagementSettings.getWlmMode()).thenReturn(WlmMode.ENABLED); + when(mockWorkloadManagementSettings.getWorkloadGroupServiceRunInterval()).thenReturn(TimeValue.timeValueSeconds(1)); + workloadGroupService.doStart(); + Mockito.verify(mockThreadPool).scheduleWithFixedDelay(any(Runnable.class), any(TimeValue.class), eq(ThreadPool.Names.GENERIC)); + } + + public void testDoStop_CancelsScheduledTask() { + when(mockWorkloadManagementSettings.getWlmMode()).thenReturn(WlmMode.ENABLED); + when(mockThreadPool.scheduleWithFixedDelay(any(), any(), any())).thenReturn(mockScheduledFuture); + workloadGroupService.doStart(); + workloadGroupService.doStop(); + Mockito.verify(mockScheduledFuture).cancel(); + } + + public void testDoRun_WhenModeEnabled() { + when(mockWorkloadManagementSettings.getWlmMode()).thenReturn(WlmMode.ENABLED); + when(mockNodeDuressTrackers.isNodeInDuress()).thenReturn(true); + // Call the method + workloadGroupService.doRun(); + + // Verify that refreshWorkloadGroups was called + + // Verify that cancelTasks was called with a BooleanSupplier + ArgumentCaptor booleanSupplierCaptor = ArgumentCaptor.forClass(BooleanSupplier.class); + Mockito.verify(mockCancellationService).cancelTasks(booleanSupplierCaptor.capture(), any(), any()); + + // Assert the behavior of the BooleanSupplier + BooleanSupplier capturedSupplier = booleanSupplierCaptor.getValue(); + assertTrue(capturedSupplier.getAsBoolean()); + + } + + public void testDoRun_WhenModeDisabled() { + when(mockWorkloadManagementSettings.getWlmMode()).thenReturn(WlmMode.DISABLED); + when(mockNodeDuressTrackers.isNodeInDuress()).thenReturn(false); + workloadGroupService.doRun(); + // Verify that refreshWorkloadGroups was called + + Mockito.verify(mockCancellationService, never()).cancelTasks(any(), any(), any()); + + } + + public void testRejectIfNeeded_whenWorkloadGroupIdIsNullOrDefaultOne() { + WorkloadGroup testWorkloadGroup = new WorkloadGroup( + "testWorkloadGroup", + "workloadGroupId1", + new MutableWorkloadGroupFragment(MutableWorkloadGroupFragment.ResiliencyMode.ENFORCED, Map.of(ResourceType.CPU, 0.10)), + 1L + ); + Set activeWorkloadGroups = new HashSet<>() { + { + add(testWorkloadGroup); + } + }; + mockWorkloadGroupStateMap = new HashMap<>(); + mockWorkloadGroupsStateAccessor = new WorkloadGroupsStateAccessor(mockWorkloadGroupStateMap); + mockWorkloadGroupStateMap.put("workloadGroupId1", new WorkloadGroupState()); + + Map spyMap = spy(mockWorkloadGroupStateMap); + + workloadGroupService = new WorkloadGroupService( + mockCancellationService, + mockClusterService, + mockThreadPool, + mockWorkloadManagementSettings, + mockNodeDuressTrackers, + mockWorkloadGroupsStateAccessor, + activeWorkloadGroups, + new HashSet<>() + ); + workloadGroupService.rejectIfNeeded(null); + + verify(spyMap, never()).get(any()); + + workloadGroupService.rejectIfNeeded(WorkloadGroupTask.DEFAULT_WORKLOAD_GROUP_ID_SUPPLIER.get()); + verify(spyMap, never()).get(any()); + } + + public void testRejectIfNeeded_whenSoftModeWorkloadGroupIsContendedAndNodeInDuress() { + Set activeWorkloadGroups = getActiveWorkloadGroups( + "testWorkloadGroup", + WORKLOAD_GROUP_ID, + MutableWorkloadGroupFragment.ResiliencyMode.SOFT, + Map.of(ResourceType.CPU, 0.10) + ); + mockWorkloadGroupStateMap = new HashMap<>(); + mockWorkloadGroupStateMap.put("workloadGroupId1", new WorkloadGroupState()); + WorkloadGroupState state = new WorkloadGroupState(); + WorkloadGroupState.ResourceTypeState cpuResourceState = new WorkloadGroupState.ResourceTypeState(ResourceType.CPU); + cpuResourceState.setLastRecordedUsage(0.10); + state.getResourceState().put(ResourceType.CPU, cpuResourceState); + WorkloadGroupState spyState = spy(state); + mockWorkloadGroupStateMap.put(WORKLOAD_GROUP_ID, spyState); + + mockWorkloadGroupsStateAccessor = new WorkloadGroupsStateAccessor(mockWorkloadGroupStateMap); + + workloadGroupService = new WorkloadGroupService( + mockCancellationService, + mockClusterService, + mockThreadPool, + mockWorkloadManagementSettings, + mockNodeDuressTrackers, + mockWorkloadGroupsStateAccessor, + activeWorkloadGroups, + new HashSet<>() + ); + when(mockWorkloadManagementSettings.getWlmMode()).thenReturn(WlmMode.ENABLED); + when(mockNodeDuressTrackers.isNodeInDuress()).thenReturn(true); + assertThrows(OpenSearchRejectedExecutionException.class, () -> workloadGroupService.rejectIfNeeded("workloadGroupId1")); + } + + public void testRejectIfNeeded_whenWorkloadGroupIsSoftMode() { + Set activeWorkloadGroups = getActiveWorkloadGroups( + "testWorkloadGroup", + WORKLOAD_GROUP_ID, + MutableWorkloadGroupFragment.ResiliencyMode.SOFT, + Map.of(ResourceType.CPU, 0.10) + ); + mockWorkloadGroupStateMap = new HashMap<>(); + WorkloadGroupState spyState = spy(new WorkloadGroupState()); + mockWorkloadGroupStateMap.put("workloadGroupId1", spyState); + + mockWorkloadGroupsStateAccessor = new WorkloadGroupsStateAccessor(mockWorkloadGroupStateMap); + + workloadGroupService = new WorkloadGroupService( + mockCancellationService, + mockClusterService, + mockThreadPool, + mockWorkloadManagementSettings, + mockNodeDuressTrackers, + mockWorkloadGroupsStateAccessor, + activeWorkloadGroups, + new HashSet<>() + ); + workloadGroupService.rejectIfNeeded("workloadGroupId1"); + + verify(spyState, never()).getResourceState(); + } + + public void testRejectIfNeeded_whenWorkloadGroupIsEnforcedMode_andNotBreaching() { + WorkloadGroup testWorkloadGroup = getWorkloadGroup( + "testWorkloadGroup", + "workloadGroupId1", + MutableWorkloadGroupFragment.ResiliencyMode.ENFORCED, + Map.of(ResourceType.CPU, 0.10) + ); + WorkloadGroup spuWorkloadGroup = spy(testWorkloadGroup); + Set activeWorkloadGroups = new HashSet<>() { + { + add(spuWorkloadGroup); + } + }; + mockWorkloadGroupStateMap = new HashMap<>(); + WorkloadGroupState workloadGroupState = new WorkloadGroupState(); + workloadGroupState.getResourceState().get(ResourceType.CPU).setLastRecordedUsage(0.05); + + mockWorkloadGroupStateMap.put("workloadGroupId1", workloadGroupState); + + mockWorkloadGroupsStateAccessor = new WorkloadGroupsStateAccessor(mockWorkloadGroupStateMap); + + workloadGroupService = new WorkloadGroupService( + mockCancellationService, + mockClusterService, + mockThreadPool, + mockWorkloadManagementSettings, + mockNodeDuressTrackers, + mockWorkloadGroupsStateAccessor, + activeWorkloadGroups, + new HashSet<>() + ); + when(mockWorkloadManagementSettings.getWlmMode()).thenReturn(WlmMode.ENABLED); + when(mockWorkloadManagementSettings.getNodeLevelCpuRejectionThreshold()).thenReturn(0.8); + workloadGroupService.rejectIfNeeded("workloadGroupId1"); + + // verify the check to compare the current usage and limit + // this should happen 3 times => 2 to check whether the resource limit has the TRACKED resource type and 1 to get the value + verify(spuWorkloadGroup, times(3)).getResourceLimits(); + assertEquals(0, workloadGroupState.getResourceState().get(ResourceType.CPU).rejections.count()); + assertEquals(0, workloadGroupState.totalRejections.count()); + } + + public void testRejectIfNeeded_whenWorkloadGroupIsEnforcedMode_andBreaching() { + WorkloadGroup testWorkloadGroup = new WorkloadGroup( + "testWorkloadGroup", + "workloadGroupId1", + new MutableWorkloadGroupFragment( + MutableWorkloadGroupFragment.ResiliencyMode.ENFORCED, + Map.of(ResourceType.CPU, 0.10, ResourceType.MEMORY, 0.10) + ), + 1L + ); + WorkloadGroup spuWorkloadGroup = spy(testWorkloadGroup); + Set activeWorkloadGroups = new HashSet<>() { + { + add(spuWorkloadGroup); + } + }; + mockWorkloadGroupStateMap = new HashMap<>(); + WorkloadGroupState workloadGroupState = new WorkloadGroupState(); + workloadGroupState.getResourceState().get(ResourceType.CPU).setLastRecordedUsage(0.18); + workloadGroupState.getResourceState().get(ResourceType.MEMORY).setLastRecordedUsage(0.18); + WorkloadGroupState spyState = spy(workloadGroupState); + + mockWorkloadGroupsStateAccessor = new WorkloadGroupsStateAccessor(mockWorkloadGroupStateMap); + + mockWorkloadGroupStateMap.put("workloadGroupId1", spyState); + + workloadGroupService = new WorkloadGroupService( + mockCancellationService, + mockClusterService, + mockThreadPool, + mockWorkloadManagementSettings, + mockNodeDuressTrackers, + mockWorkloadGroupsStateAccessor, + activeWorkloadGroups, + new HashSet<>() + ); + when(mockWorkloadManagementSettings.getWlmMode()).thenReturn(WlmMode.ENABLED); + assertThrows(OpenSearchRejectedExecutionException.class, () -> workloadGroupService.rejectIfNeeded("workloadGroupId1")); + + // verify the check to compare the current usage and limit + // this should happen 3 times => 1 to check whether the resource limit has the TRACKED resource type and 1 to get the value + // because it will break out of the loop since the limits are breached + verify(spuWorkloadGroup, times(2)).getResourceLimits(); + assertEquals( + 1, + workloadGroupState.getResourceState().get(ResourceType.CPU).rejections.count() + workloadGroupState.getResourceState() + .get(ResourceType.MEMORY).rejections.count() + ); + assertEquals(1, workloadGroupState.totalRejections.count()); + } + + public void testRejectIfNeeded_whenFeatureIsNotEnabled() { + WorkloadGroup testWorkloadGroup = new WorkloadGroup( + "testWorkloadGroup", + "workloadGroupId1", + new MutableWorkloadGroupFragment(MutableWorkloadGroupFragment.ResiliencyMode.ENFORCED, Map.of(ResourceType.CPU, 0.10)), + 1L + ); + Set activeWorkloadGroups = new HashSet<>() { + { + add(testWorkloadGroup); + } + }; + mockWorkloadGroupStateMap = new HashMap<>(); + mockWorkloadGroupStateMap.put("workloadGroupId1", new WorkloadGroupState()); + + Map spyMap = spy(mockWorkloadGroupStateMap); + + mockWorkloadGroupsStateAccessor = new WorkloadGroupsStateAccessor(mockWorkloadGroupStateMap); + + workloadGroupService = new WorkloadGroupService( + mockCancellationService, + mockClusterService, + mockThreadPool, + mockWorkloadManagementSettings, + mockNodeDuressTrackers, + mockWorkloadGroupsStateAccessor, + activeWorkloadGroups, + new HashSet<>() + ); + when(mockWorkloadManagementSettings.getWlmMode()).thenReturn(WlmMode.DISABLED); + + workloadGroupService.rejectIfNeeded(testWorkloadGroup.get_id()); + verify(spyMap, never()).get(any()); + } + + public void testOnTaskCompleted() { + Task task = new SearchTask(12, "", "", () -> "", null, null); + mockThreadPool = new TestThreadPool("workloadGroupServiceTests"); + mockThreadPool.getThreadContext().putHeader(WorkloadGroupTask.WORKLOAD_GROUP_ID_HEADER, "testId"); + WorkloadGroupState workloadGroupState = new WorkloadGroupState(); + mockWorkloadGroupStateMap.put("testId", workloadGroupState); + mockWorkloadGroupsStateAccessor = new WorkloadGroupsStateAccessor(mockWorkloadGroupStateMap); + workloadGroupService = new WorkloadGroupService( + mockCancellationService, + mockClusterService, + mockThreadPool, + mockWorkloadManagementSettings, + mockNodeDuressTrackers, + mockWorkloadGroupsStateAccessor, + new HashSet<>() { + { + add( + new WorkloadGroup( + "testWorkloadGroup", + "testId", + new MutableWorkloadGroupFragment( + MutableWorkloadGroupFragment.ResiliencyMode.ENFORCED, + Map.of(ResourceType.CPU, 0.10, ResourceType.MEMORY, 0.10) + ), + 1L + ) + ); + } + }, + new HashSet<>() + ); + + ((WorkloadGroupTask) task).setWorkloadGroupId(mockThreadPool.getThreadContext()); + workloadGroupService.onTaskCompleted(task); + + assertEquals(1, workloadGroupState.totalCompletions.count()); + + // test non WorkloadGroupTask + task = new Task(1, "simple", "test", "mock task", null, null); + workloadGroupService.onTaskCompleted(task); + + // It should still be 1 + assertEquals(1, workloadGroupState.totalCompletions.count()); + + mockThreadPool.shutdown(); + } + + public void testShouldSBPHandle() { + SearchTask task = createMockTaskWithResourceStats(SearchTask.class, 100, 200, 0, 12); + WorkloadGroupState workloadGroupState = new WorkloadGroupState(); + Set activeWorkloadGroups = new HashSet<>(); + mockWorkloadGroupStateMap.put("testId", workloadGroupState); + mockWorkloadGroupsStateAccessor = new WorkloadGroupsStateAccessor(mockWorkloadGroupStateMap); + workloadGroupService = new WorkloadGroupService( + mockCancellationService, + mockClusterService, + mockThreadPool, + mockWorkloadManagementSettings, + mockNodeDuressTrackers, + mockWorkloadGroupsStateAccessor, + activeWorkloadGroups, + Collections.emptySet() + ); + + when(mockWorkloadManagementSettings.getWlmMode()).thenReturn(WlmMode.ENABLED); + + // Default workloadGroupId + mockThreadPool = new TestThreadPool("workloadGroupServiceTests"); + mockThreadPool.getThreadContext() + .putHeader(WorkloadGroupTask.WORKLOAD_GROUP_ID_HEADER, WorkloadGroupTask.DEFAULT_WORKLOAD_GROUP_ID_SUPPLIER.get()); + // we haven't set the workloadGroupId yet SBP should still track the task for cancellation + assertTrue(workloadGroupService.shouldSBPHandle(task)); + task.setWorkloadGroupId(mockThreadPool.getThreadContext()); + assertTrue(workloadGroupService.shouldSBPHandle(task)); + + mockThreadPool.shutdownNow(); + + // invalid workloadGroup task + mockThreadPool = new TestThreadPool("workloadGroupServiceTests"); + mockThreadPool.getThreadContext().putHeader(WorkloadGroupTask.WORKLOAD_GROUP_ID_HEADER, "testId"); + task.setWorkloadGroupId(mockThreadPool.getThreadContext()); + assertTrue(workloadGroupService.shouldSBPHandle(task)); + + // Valid query group task but wlm not enabled + when(mockWorkloadManagementSettings.getWlmMode()).thenReturn(WlmMode.DISABLED); + activeWorkloadGroups.add( + new WorkloadGroup( + "testWorkloadGroup", + "testId", + new MutableWorkloadGroupFragment( + MutableWorkloadGroupFragment.ResiliencyMode.ENFORCED, + Map.of(ResourceType.CPU, 0.10, ResourceType.MEMORY, 0.10) + ), + 1L + ) + ); + assertTrue(workloadGroupService.shouldSBPHandle(task)); + + mockThreadPool.shutdownNow(); + + // test the case when SBP should not track the task + when(mockWorkloadManagementSettings.getWlmMode()).thenReturn(WlmMode.ENABLED); + task = new SearchTask(1, "", "test", () -> "", null, null); + mockThreadPool = new TestThreadPool("workloadGroupServiceTests"); + mockThreadPool.getThreadContext().putHeader(WorkloadGroupTask.WORKLOAD_GROUP_ID_HEADER, "testId"); + task.setWorkloadGroupId(mockThreadPool.getThreadContext()); + assertFalse(workloadGroupService.shouldSBPHandle(task)); + } + + private static Set getActiveWorkloadGroups( + String name, + String id, + MutableWorkloadGroupFragment.ResiliencyMode mode, + Map resourceLimits + ) { + WorkloadGroup testWorkloadGroup = getWorkloadGroup(name, id, mode, resourceLimits); + Set activeWorkloadGroups = new HashSet<>() { + { + add(testWorkloadGroup); + } + }; + return activeWorkloadGroups; + } + + private static WorkloadGroup getWorkloadGroup( + String name, + String id, + MutableWorkloadGroupFragment.ResiliencyMode mode, + Map resourceLimits + ) { + WorkloadGroup testWorkloadGroup = new WorkloadGroup(name, id, new MutableWorkloadGroupFragment(mode, resourceLimits), 1L); + return testWorkloadGroup; + } + + // This is needed to test the behavior of WorkloadGroupService#doRun method + static class TestWorkloadGroupCancellationService extends WorkloadGroupTaskCancellationService { + public TestWorkloadGroupCancellationService( + WorkloadManagementSettings workloadManagementSettings, + TaskSelectionStrategy taskSelectionStrategy, + WorkloadGroupResourceUsageTrackerService resourceUsageTrackerService, + WorkloadGroupsStateAccessor workloadGroupsStateAccessor, + Collection activeWorkloadGroups, + Collection deletedWorkloadGroups + ) { + super(workloadManagementSettings, taskSelectionStrategy, resourceUsageTrackerService, workloadGroupsStateAccessor); + } + + @Override + public void cancelTasks( + BooleanSupplier isNodeInDuress, + Collection activeWorkloadGroups, + Collection deletedWorkloadGroups + ) { + + } + } +} diff --git a/server/src/test/java/org/opensearch/wlm/WorkloadGroupTaskTests.java b/server/src/test/java/org/opensearch/wlm/WorkloadGroupTaskTests.java new file mode 100644 index 0000000000000..341f31993f800 --- /dev/null +++ b/server/src/test/java/org/opensearch/wlm/WorkloadGroupTaskTests.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.wlm; + +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; + +import java.util.Collections; + +import static org.opensearch.wlm.WorkloadGroupTask.DEFAULT_WORKLOAD_GROUP_ID_SUPPLIER; +import static org.opensearch.wlm.WorkloadGroupTask.WORKLOAD_GROUP_ID_HEADER; + +public class WorkloadGroupTaskTests extends OpenSearchTestCase { + private ThreadPool threadPool; + private WorkloadGroupTask sut; + + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool(getTestName()); + sut = new WorkloadGroupTask(123, "transport", "Search", "test task", null, Collections.emptyMap()); + } + + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdown(); + } + + public void testSuccessfulSetWorkloadGroupId() { + sut.setWorkloadGroupId(threadPool.getThreadContext()); + assertEquals(DEFAULT_WORKLOAD_GROUP_ID_SUPPLIER.get(), sut.getWorkloadGroupId()); + + threadPool.getThreadContext().putHeader(WORKLOAD_GROUP_ID_HEADER, "akfanglkaglknag2332"); + + sut.setWorkloadGroupId(threadPool.getThreadContext()); + assertEquals("akfanglkaglknag2332", sut.getWorkloadGroupId()); + } +} diff --git a/server/src/test/java/org/opensearch/wlm/WorkloadGroupThreadContextStatePropagatorTests.java b/server/src/test/java/org/opensearch/wlm/WorkloadGroupThreadContextStatePropagatorTests.java new file mode 100644 index 0000000000000..9337a28b4571e --- /dev/null +++ b/server/src/test/java/org/opensearch/wlm/WorkloadGroupThreadContextStatePropagatorTests.java @@ -0,0 +1,30 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.wlm; + +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Map; + +public class WorkloadGroupThreadContextStatePropagatorTests extends OpenSearchTestCase { + + public void testTransients() { + WorkloadGroupThreadContextStatePropagator sut = new WorkloadGroupThreadContextStatePropagator(); + Map source = Map.of("workloadGroupId", "adgarja0r235te"); + Map transients = sut.transients(source); + assertEquals("adgarja0r235te", transients.get("workloadGroupId")); + } + + public void testHeaders() { + WorkloadGroupThreadContextStatePropagator sut = new WorkloadGroupThreadContextStatePropagator(); + Map source = Map.of("workloadGroupId", "adgarja0r235te"); + Map headers = sut.headers(source); + assertEquals("adgarja0r235te", headers.get("workloadGroupId")); + } +} diff --git a/server/src/test/java/org/opensearch/wlm/WorkloadManagementSettingsTests.java b/server/src/test/java/org/opensearch/wlm/WorkloadManagementSettingsTests.java index 0f183555781d3..ece6050653c45 100644 --- a/server/src/test/java/org/opensearch/wlm/WorkloadManagementSettingsTests.java +++ b/server/src/test/java/org/opensearch/wlm/WorkloadManagementSettingsTests.java @@ -20,8 +20,8 @@ public class WorkloadManagementSettingsTests extends OpenSearchTestCase { /** - * Tests the invalid value for {@code wlm.query_group.node.memory_rejection_threshold} - * When the value is set more than {@code wlm.query_group.node.memory_cancellation_threshold} accidentally during + * Tests the invalid value for {@code wlm.workload_group.node.memory_rejection_threshold} + * When the value is set more than {@code wlm.workload_group.node.memory_cancellation_threshold} accidentally during * new feature development. This test is to ensure that {@link WorkloadManagementSettings} holds the * invariant {@code nodeLevelRejectionThreshold < nodeLevelCancellationThreshold} */ @@ -36,8 +36,8 @@ public void testInvalidMemoryInstantiationOfWorkloadManagementSettings() { } /** - * Tests the invalid value for {@code wlm.query_group.node.cpu_rejection_threshold} - * When the value is set more than {@code wlm.query_group.node.cpu_cancellation_threshold} accidentally during + * Tests the invalid value for {@code wlm.workload_group.node.cpu_rejection_threshold} + * When the value is set more than {@code wlm.workload_group.node.cpu_cancellation_threshold} accidentally during * new feature development. This test is to ensure that {@link WorkloadManagementSettings} holds the * invariant {@code nodeLevelRejectionThreshold < nodeLevelCancellationThreshold} */ @@ -52,7 +52,7 @@ public void testInvalidCpuInstantiationOfWorkloadManagementSettings() { } /** - * Tests the valid value for {@code wlm.query_group.node.cpu_rejection_threshold} + * Tests the valid value for {@code wlm.workload_group.node.cpu_rejection_threshold} * Using setNodeLevelCpuRejectionThreshold function */ public void testValidNodeLevelCpuRejectionThresholdCase1() { @@ -64,7 +64,7 @@ public void testValidNodeLevelCpuRejectionThresholdCase1() { } /** - * Tests the valid value for {@code wlm.query_group.node.cpu_rejection_threshold} + * Tests the valid value for {@code wlm.workload_group.node.cpu_rejection_threshold} */ public void testValidNodeLevelCpuRejectionThresholdCase2() { Settings settings = Settings.builder().put(NODE_CPU_REJECTION_THRESHOLD_SETTING_NAME, 0.79).build(); @@ -74,7 +74,7 @@ public void testValidNodeLevelCpuRejectionThresholdCase2() { } /** - * Tests the invalid value for {@code wlm.query_group.node.cpu_rejection_threshold} + * Tests the invalid value for {@code wlm.workload_group.node.cpu_rejection_threshold} * When the value is set more than {@literal 0.9} */ public void testInvalidNodeLevelCpuRejectionThresholdCase1() { @@ -85,8 +85,8 @@ public void testInvalidNodeLevelCpuRejectionThresholdCase1() { } /** - * Tests the invalid value for {@code wlm.query_group.node.cpu_rejection_threshold} - * When the value is set more than {@code wlm.query_group.node.cpu_cancellation_threshold} + * Tests the invalid value for {@code wlm.workload_group.node.cpu_rejection_threshold} + * When the value is set more than {@code wlm.workload_group.node.cpu_cancellation_threshold} */ public void testInvalidNodeLevelCpuRejectionThresholdCase2() { Settings settings = Settings.builder() @@ -99,7 +99,7 @@ public void testInvalidNodeLevelCpuRejectionThresholdCase2() { } /** - * Tests the valid value for {@code wlm.query_group.node.cpu_cancellation_threshold} + * Tests the valid value for {@code wlm.workload_group.node.cpu_cancellation_threshold} */ public void testValidNodeLevelCpuCancellationThresholdCase1() { Settings settings = Settings.builder().put(NODE_CPU_CANCELLATION_THRESHOLD_SETTING_NAME, 0.8).build(); @@ -109,7 +109,7 @@ public void testValidNodeLevelCpuCancellationThresholdCase1() { } /** - * Tests the valid value for {@code wlm.query_group.node.cpu_cancellation_threshold} + * Tests the valid value for {@code wlm.workload_group.node.cpu_cancellation_threshold} * Using setNodeLevelCpuCancellationThreshold function */ public void testValidNodeLevelCpuCancellationThresholdCase2() { @@ -121,7 +121,7 @@ public void testValidNodeLevelCpuCancellationThresholdCase2() { } /** - * Tests the invalid value for {@code wlm.query_group.node.cpu_cancellation_threshold} + * Tests the invalid value for {@code wlm.workload_group.node.cpu_cancellation_threshold} * When the value is set more than {@literal 0.95} */ public void testInvalidNodeLevelCpuCancellationThresholdCase1() { @@ -132,8 +132,8 @@ public void testInvalidNodeLevelCpuCancellationThresholdCase1() { } /** - * Tests the invalid value for {@code wlm.query_group.node.cpu_cancellation_threshold} - * When the value is set less than {@code wlm.query_group.node.cpu_rejection_threshold} + * Tests the invalid value for {@code wlm.workload_group.node.cpu_cancellation_threshold} + * When the value is set less than {@code wlm.workload_group.node.cpu_rejection_threshold} */ public void testInvalidNodeLevelCpuCancellationThresholdCase2() { Settings settings = Settings.builder() @@ -146,7 +146,7 @@ public void testInvalidNodeLevelCpuCancellationThresholdCase2() { } /** - * Tests the valid value for {@code wlm.query_group.node.memory_cancellation_threshold} + * Tests the valid value for {@code wlm.workload_group.node.memory_cancellation_threshold} */ public void testValidNodeLevelMemoryCancellationThresholdCase1() { Settings settings = Settings.builder().put(NODE_MEMORY_CANCELLATION_THRESHOLD_SETTING_NAME, 0.8).build(); @@ -156,7 +156,7 @@ public void testValidNodeLevelMemoryCancellationThresholdCase1() { } /** - * Tests the valid value for {@code wlm.query_group.node.memory_cancellation_threshold} + * Tests the valid value for {@code wlm.workload_group.node.memory_cancellation_threshold} * Using setNodeLevelMemoryCancellationThreshold function */ public void testValidNodeLevelMemoryCancellationThresholdCase2() { @@ -168,7 +168,7 @@ public void testValidNodeLevelMemoryCancellationThresholdCase2() { } /** - * Tests the invalid value for {@code wlm.query_group.node.memory_cancellation_threshold} + * Tests the invalid value for {@code wlm.workload_group.node.memory_cancellation_threshold} * When the value is set more than {@literal 0.95} */ public void testInvalidNodeLevelMemoryCancellationThresholdCase1() { @@ -179,8 +179,8 @@ public void testInvalidNodeLevelMemoryCancellationThresholdCase1() { } /** - * Tests the invalid value for {@code wlm.query_group.node.memory_cancellation_threshold} - * When the value is set less than {@code wlm.query_group.node.memory_rejection_threshold} + * Tests the invalid value for {@code wlm.workload_group.node.memory_cancellation_threshold} + * When the value is set less than {@code wlm.workload_group.node.memory_rejection_threshold} */ public void testInvalidNodeLevelMemoryCancellationThresholdCase2() { Settings settings = Settings.builder() @@ -193,7 +193,7 @@ public void testInvalidNodeLevelMemoryCancellationThresholdCase2() { } /** - * Tests the valid value for {@code wlm.query_group.node.memory_rejection_threshold} + * Tests the valid value for {@code wlm.workload_group.node.memory_rejection_threshold} */ public void testValidNodeLevelMemoryRejectionThresholdCase1() { Settings settings = Settings.builder().put(NODE_MEMORY_REJECTION_THRESHOLD_SETTING_NAME, 0.79).build(); @@ -203,7 +203,7 @@ public void testValidNodeLevelMemoryRejectionThresholdCase1() { } /** - * Tests the valid value for {@code wlm.query_group.node.memory_rejection_threshold} + * Tests the valid value for {@code wlm.workload_group.node.memory_rejection_threshold} * Using setNodeLevelMemoryRejectionThreshold function */ public void testValidNodeLevelMemoryRejectionThresholdCase2() { @@ -215,7 +215,7 @@ public void testValidNodeLevelMemoryRejectionThresholdCase2() { } /** - * Tests the invalid value for {@code wlm.query_group.node.memory_rejection_threshold} + * Tests the invalid value for {@code wlm.workload_group.node.memory_rejection_threshold} * When the value is set more than {@literal 0.9} */ public void testInvalidNodeLevelMemoryRejectionThresholdCase1() { @@ -226,8 +226,8 @@ public void testInvalidNodeLevelMemoryRejectionThresholdCase1() { } /** - * Tests the invalid value for {@code wlm.query_group.node.memory_rejection_threshold} - * When the value is set more than {@code wlm.query_group.node.memory_cancellation_threshold} + * Tests the invalid value for {@code wlm.workload_group.node.memory_rejection_threshold} + * When the value is set more than {@code wlm.workload_group.node.memory_cancellation_threshold} */ public void testInvalidNodeLevelMemoryRejectionThresholdCase2() { Settings settings = Settings.builder() diff --git a/server/src/test/java/org/opensearch/wlm/WorkloadManagementTransportInterceptorTests.java b/server/src/test/java/org/opensearch/wlm/WorkloadManagementTransportInterceptorTests.java index d4cd7b79455a3..b0dd1221c264f 100644 --- a/server/src/test/java/org/opensearch/wlm/WorkloadManagementTransportInterceptorTests.java +++ b/server/src/test/java/org/opensearch/wlm/WorkloadManagementTransportInterceptorTests.java @@ -17,7 +17,7 @@ import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestHandler; import org.opensearch.wlm.WorkloadManagementTransportInterceptor.RequestHandler; -import org.opensearch.wlm.cancellation.QueryGroupTaskCancellationService; +import org.opensearch.wlm.cancellation.WorkloadGroupTaskCancellationService; import java.util.Collections; @@ -26,31 +26,31 @@ import static org.mockito.Mockito.when; public class WorkloadManagementTransportInterceptorTests extends OpenSearchTestCase { - private QueryGroupTaskCancellationService mockTaskCancellationService; + private WorkloadGroupTaskCancellationService mockTaskCancellationService; private ClusterService mockClusterService; private ThreadPool mockThreadPool; private WorkloadManagementSettings mockWorkloadManagementSettings; private ThreadPool threadPool; private WorkloadManagementTransportInterceptor sut; - private QueryGroupsStateAccessor stateAccessor; + private WorkloadGroupsStateAccessor stateAccessor; public void setUp() throws Exception { super.setUp(); - mockTaskCancellationService = mock(QueryGroupTaskCancellationService.class); + mockTaskCancellationService = mock(WorkloadGroupTaskCancellationService.class); mockClusterService = mock(ClusterService.class); mockThreadPool = mock(ThreadPool.class); mockWorkloadManagementSettings = mock(WorkloadManagementSettings.class); threadPool = new TestThreadPool(getTestName()); - stateAccessor = new QueryGroupsStateAccessor(); + stateAccessor = new WorkloadGroupsStateAccessor(); ClusterState state = mock(ClusterState.class); Metadata metadata = mock(Metadata.class); when(mockClusterService.state()).thenReturn(state); when(state.metadata()).thenReturn(metadata); - when(metadata.queryGroups()).thenReturn(Collections.emptyMap()); + when(metadata.workloadGroups()).thenReturn(Collections.emptyMap()); sut = new WorkloadManagementTransportInterceptor( threadPool, - new QueryGroupService( + new WorkloadGroupService( mockTaskCancellationService, mockClusterService, mockThreadPool, diff --git a/server/src/test/java/org/opensearch/wlm/WorkloadManagementTransportRequestHandlerTests.java b/server/src/test/java/org/opensearch/wlm/WorkloadManagementTransportRequestHandlerTests.java index 59818ad3dbbd2..e05aaf941c4e9 100644 --- a/server/src/test/java/org/opensearch/wlm/WorkloadManagementTransportRequestHandlerTests.java +++ b/server/src/test/java/org/opensearch/wlm/WorkloadManagementTransportRequestHandlerTests.java @@ -30,7 +30,7 @@ public class WorkloadManagementTransportRequestHandlerTests extends OpenSearchTestCase { private WorkloadManagementTransportInterceptor.RequestHandler sut; private ThreadPool threadPool; - private QueryGroupService queryGroupService; + private WorkloadGroupService workloadGroupService; private TestTransportRequestHandler actualHandler; @@ -38,9 +38,9 @@ public void setUp() throws Exception { super.setUp(); threadPool = new TestThreadPool(getTestName()); actualHandler = new TestTransportRequestHandler<>(); - queryGroupService = mock(QueryGroupService.class); + workloadGroupService = mock(WorkloadGroupService.class); - sut = new WorkloadManagementTransportInterceptor.RequestHandler<>(threadPool, actualHandler, queryGroupService); + sut = new WorkloadManagementTransportInterceptor.RequestHandler<>(threadPool, actualHandler, workloadGroupService); } public void tearDown() throws Exception { @@ -50,16 +50,16 @@ public void tearDown() throws Exception { public void testMessageReceivedForSearchWorkload_nonRejectionCase() throws Exception { ShardSearchRequest request = mock(ShardSearchRequest.class); - QueryGroupTask spyTask = getSpyTask(); - doNothing().when(queryGroupService).rejectIfNeeded(anyString()); + WorkloadGroupTask spyTask = getSpyTask(); + doNothing().when(workloadGroupService).rejectIfNeeded(anyString()); sut.messageReceived(request, mock(TransportChannel.class), spyTask); assertTrue(sut.isSearchWorkloadRequest(spyTask)); } public void testMessageReceivedForSearchWorkload_RejectionCase() throws Exception { ShardSearchRequest request = mock(ShardSearchRequest.class); - QueryGroupTask spyTask = getSpyTask(); - doThrow(OpenSearchRejectedExecutionException.class).when(queryGroupService).rejectIfNeeded(anyString()); + WorkloadGroupTask spyTask = getSpyTask(); + doThrow(OpenSearchRejectedExecutionException.class).when(workloadGroupService).rejectIfNeeded(anyString()); assertThrows(OpenSearchRejectedExecutionException.class, () -> sut.messageReceived(request, mock(TransportChannel.class), spyTask)); } @@ -72,8 +72,8 @@ public void testMessageReceivedForNonSearchWorkload() throws Exception { assertEquals(1, actualHandler.invokeCount); } - private static QueryGroupTask getSpyTask() { - final QueryGroupTask task = new QueryGroupTask(123, "transport", "Search", "test task", null, Collections.emptyMap()); + private static WorkloadGroupTask getSpyTask() { + final WorkloadGroupTask task = new WorkloadGroupTask(123, "transport", "Search", "test task", null, Collections.emptyMap()); return spy(task); } diff --git a/server/src/test/java/org/opensearch/wlm/cancellation/MaximumResourceTaskSelectionStrategyTests.java b/server/src/test/java/org/opensearch/wlm/cancellation/MaximumResourceTaskSelectionStrategyTests.java index dc79822c59c49..aa6e6f5c5a7df 100644 --- a/server/src/test/java/org/opensearch/wlm/cancellation/MaximumResourceTaskSelectionStrategyTests.java +++ b/server/src/test/java/org/opensearch/wlm/cancellation/MaximumResourceTaskSelectionStrategyTests.java @@ -15,15 +15,15 @@ import org.opensearch.core.tasks.resourcetracker.ResourceStatsType; import org.opensearch.core.tasks.resourcetracker.ResourceUsageMetric; import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.wlm.QueryGroupTask; import org.opensearch.wlm.ResourceType; +import org.opensearch.wlm.WorkloadGroupTask; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.stream.IntStream; -import static org.opensearch.wlm.cancellation.QueryGroupTaskCancellationService.MIN_VALUE; +import static org.opensearch.wlm.cancellation.WorkloadGroupTaskCancellationService.MIN_VALUE; import static org.opensearch.wlm.tracker.MemoryUsageCalculator.HEAP_SIZE_BYTES; public class MaximumResourceTaskSelectionStrategyTests extends OpenSearchTestCase { @@ -33,8 +33,8 @@ public void testSelectTasksToCancelSelectsTasksMeetingThreshold_ifReduceByIsGrea new MaximumResourceTaskSelectionStrategy(); double reduceBy = 50000.0 / HEAP_SIZE_BYTES; ResourceType resourceType = ResourceType.MEMORY; - List tasks = getListOfTasks(100); - List selectedTasks = testHighestResourceConsumingTaskFirstSelectionStrategy.selectTasksForCancellation( + List tasks = getListOfTasks(100); + List selectedTasks = testHighestResourceConsumingTaskFirstSelectionStrategy.selectTasksForCancellation( tasks, reduceBy, resourceType @@ -55,7 +55,7 @@ public void testSelectTasksToCancelSelectsTasksMeetingThreshold_ifReduceByIsLess new MaximumResourceTaskSelectionStrategy(); double reduceBy = -50.0 / HEAP_SIZE_BYTES; ResourceType resourceType = ResourceType.MEMORY; - List tasks = getListOfTasks(3); + List tasks = getListOfTasks(3); try { testHighestResourceConsumingTaskFirstSelectionStrategy.selectTasksForCancellation(tasks, reduceBy, resourceType); } catch (Exception e) { @@ -69,8 +69,8 @@ public void testSelectTasksToCancelSelectsTasksMeetingThreshold_ifReduceByIsEqua new MaximumResourceTaskSelectionStrategy(); double reduceBy = 0.0; ResourceType resourceType = ResourceType.MEMORY; - List tasks = getListOfTasks(50); - List selectedTasks = testHighestResourceConsumingTaskFirstSelectionStrategy.selectTasksForCancellation( + List tasks = getListOfTasks(50); + List selectedTasks = testHighestResourceConsumingTaskFirstSelectionStrategy.selectTasksForCancellation( tasks, reduceBy, resourceType @@ -78,9 +78,9 @@ public void testSelectTasksToCancelSelectsTasksMeetingThreshold_ifReduceByIsEqua assertTrue(selectedTasks.isEmpty()); } - private boolean tasksUsageMeetsThreshold(List selectedTasks, double threshold) { + private boolean tasksUsageMeetsThreshold(List selectedTasks, double threshold) { double memory = 0; - for (QueryGroupTask task : selectedTasks) { + for (WorkloadGroupTask task : selectedTasks) { memory += ResourceType.MEMORY.getResourceUsageCalculator().calculateTaskResourceUsage(task); if ((memory - threshold) > MIN_VALUE) { return true; @@ -89,12 +89,12 @@ private boolean tasksUsageMeetsThreshold(List selectedTasks, dou return false; } - private List getListOfTasks(int numberOfTasks) { - List tasks = new ArrayList<>(); + private List getListOfTasks(int numberOfTasks) { + List tasks = new ArrayList<>(); while (tasks.size() < numberOfTasks) { long id = randomLong(); - final QueryGroupTask task = getRandomSearchTask(id); + final WorkloadGroupTask task = getRandomSearchTask(id); long initial_memory = randomLongBetween(1, 100); ResourceUsageMetric[] initialTaskResourceMetrics = new ResourceUsageMetric[] { @@ -113,7 +113,7 @@ private List getListOfTasks(int numberOfTasks) { return tasks; } - private QueryGroupTask getRandomSearchTask(long id) { + private WorkloadGroupTask getRandomSearchTask(long id) { return new SearchTask( id, "transport", diff --git a/server/src/test/java/org/opensearch/wlm/cancellation/QueryGroupTaskCancellationServiceTests.java b/server/src/test/java/org/opensearch/wlm/cancellation/QueryGroupTaskCancellationServiceTests.java deleted file mode 100644 index 13e8e2c527073..0000000000000 --- a/server/src/test/java/org/opensearch/wlm/cancellation/QueryGroupTaskCancellationServiceTests.java +++ /dev/null @@ -1,589 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.wlm.cancellation; - -import org.opensearch.action.search.SearchAction; -import org.opensearch.cluster.metadata.QueryGroup; -import org.opensearch.core.tasks.TaskId; -import org.opensearch.tasks.TaskCancellation; -import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.wlm.MutableQueryGroupFragment; -import org.opensearch.wlm.MutableQueryGroupFragment.ResiliencyMode; -import org.opensearch.wlm.QueryGroupLevelResourceUsageView; -import org.opensearch.wlm.QueryGroupTask; -import org.opensearch.wlm.QueryGroupsStateAccessor; -import org.opensearch.wlm.ResourceType; -import org.opensearch.wlm.WlmMode; -import org.opensearch.wlm.WorkloadManagementSettings; -import org.opensearch.wlm.stats.QueryGroupState; -import org.opensearch.wlm.tracker.QueryGroupResourceUsageTrackerService; -import org.opensearch.wlm.tracker.ResourceUsageCalculatorTrackerServiceTests.TestClock; -import org.junit.Before; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.stream.Collectors; -import java.util.stream.IntStream; - -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -public class QueryGroupTaskCancellationServiceTests extends OpenSearchTestCase { - private static final String queryGroupId1 = "queryGroup1"; - private static final String queryGroupId2 = "queryGroup2"; - - private TestClock clock; - - private Map queryGroupLevelViews; - private Set activeQueryGroups; - private Set deletedQueryGroups; - private QueryGroupTaskCancellationService taskCancellation; - private WorkloadManagementSettings workloadManagementSettings; - private QueryGroupResourceUsageTrackerService resourceUsageTrackerService; - private QueryGroupsStateAccessor stateAccessor; - - @Before - public void setup() { - workloadManagementSettings = mock(WorkloadManagementSettings.class); - queryGroupLevelViews = new HashMap<>(); - activeQueryGroups = new HashSet<>(); - deletedQueryGroups = new HashSet<>(); - - clock = new TestClock(); - when(workloadManagementSettings.getNodeLevelCpuCancellationThreshold()).thenReturn(0.9); - when(workloadManagementSettings.getNodeLevelMemoryCancellationThreshold()).thenReturn(0.9); - resourceUsageTrackerService = mock(QueryGroupResourceUsageTrackerService.class); - stateAccessor = mock(QueryGroupsStateAccessor.class); - when(stateAccessor.getQueryGroupState(any())).thenReturn(new QueryGroupState()); - taskCancellation = new QueryGroupTaskCancellationService( - workloadManagementSettings, - new MaximumResourceTaskSelectionStrategy(), - resourceUsageTrackerService, - stateAccessor - ); - } - - public void testGetCancellableTasksFrom_setupAppropriateCancellationReasonAndScore() { - ResourceType resourceType = ResourceType.CPU; - double cpuUsage = 0.11; - double memoryUsage = 0.0; - Double threshold = 0.1; - - QueryGroup queryGroup1 = new QueryGroup( - "testQueryGroup", - queryGroupId1, - new MutableQueryGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), - 1L - ); - clock.fastForwardBy(1000); - - QueryGroupLevelResourceUsageView mockView = createResourceUsageViewMock(); - when(mockView.getResourceUsageData()).thenReturn(Map.of(resourceType, cpuUsage, ResourceType.MEMORY, memoryUsage)); - queryGroupLevelViews.put(queryGroupId1, mockView); - taskCancellation.queryGroupLevelResourceUsageViews = queryGroupLevelViews; - - List cancellableTasksFrom = taskCancellation.getAllCancellableTasks(List.of(queryGroup1)); - assertEquals(2, cancellableTasksFrom.size()); - assertEquals(1234, cancellableTasksFrom.get(0).getTask().getId()); - assertEquals(4321, cancellableTasksFrom.get(1).getTask().getId()); - assertEquals(1, cancellableTasksFrom.get(0).getReasons().get(0).getCancellationScore()); - } - - public void testGetCancellableTasksFrom_returnsTasksWhenBreachingThreshold() { - ResourceType resourceType = ResourceType.CPU; - double cpuUsage = 0.11; - double memoryUsage = 0.0; - Double threshold = 0.1; - - QueryGroup queryGroup1 = new QueryGroup( - "testQueryGroup", - queryGroupId1, - new MutableQueryGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), - 1L - ); - - QueryGroupLevelResourceUsageView mockView = createResourceUsageViewMock(); - when(mockView.getResourceUsageData()).thenReturn(Map.of(resourceType, cpuUsage, ResourceType.MEMORY, memoryUsage)); - queryGroupLevelViews.put(queryGroupId1, mockView); - taskCancellation.queryGroupLevelResourceUsageViews = queryGroupLevelViews; - - List cancellableTasksFrom = taskCancellation.getAllCancellableTasks(List.of(queryGroup1)); - assertEquals(2, cancellableTasksFrom.size()); - assertEquals(1234, cancellableTasksFrom.get(0).getTask().getId()); - assertEquals(4321, cancellableTasksFrom.get(1).getTask().getId()); - } - - public void testGetCancellableTasksFrom_returnsTasksWhenBreachingThresholdForMemory() { - ResourceType resourceType = ResourceType.MEMORY; - double cpuUsage = 0.0; - double memoryUsage = 0.11; - Double threshold = 0.1; - - QueryGroup queryGroup1 = new QueryGroup( - "testQueryGroup", - queryGroupId1, - new MutableQueryGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), - 1L - ); - - QueryGroupLevelResourceUsageView mockView = createResourceUsageViewMock(); - when(mockView.getResourceUsageData()).thenReturn(Map.of(ResourceType.CPU, cpuUsage, resourceType, memoryUsage)); - - queryGroupLevelViews.put(queryGroupId1, mockView); - activeQueryGroups.add(queryGroup1); - taskCancellation.queryGroupLevelResourceUsageViews = queryGroupLevelViews; - - List cancellableTasksFrom = taskCancellation.getAllCancellableTasks(ResiliencyMode.ENFORCED, activeQueryGroups); - assertEquals(2, cancellableTasksFrom.size()); - assertEquals(1234, cancellableTasksFrom.get(0).getTask().getId()); - assertEquals(4321, cancellableTasksFrom.get(1).getTask().getId()); - } - - public void testGetCancellableTasksFrom_returnsNoTasksWhenNotBreachingThreshold() { - ResourceType resourceType = ResourceType.CPU; - double cpuUsage = 0.81; - double memoryUsage = 0.0; - Double threshold = 0.9; - QueryGroup queryGroup1 = new QueryGroup( - "testQueryGroup", - queryGroupId1, - new MutableQueryGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), - 1L - ); - - QueryGroupLevelResourceUsageView mockView = createResourceUsageViewMock(); - when(mockView.getResourceUsageData()).thenReturn(Map.of(ResourceType.CPU, cpuUsage, ResourceType.MEMORY, memoryUsage)); - queryGroupLevelViews.put(queryGroupId1, mockView); - activeQueryGroups.add(queryGroup1); - taskCancellation.queryGroupLevelResourceUsageViews = queryGroupLevelViews; - - List cancellableTasksFrom = taskCancellation.getAllCancellableTasks(List.of(queryGroup1)); - assertTrue(cancellableTasksFrom.isEmpty()); - } - - public void testGetCancellableTasksFrom_filtersQueryGroupCorrectly() { - ResourceType resourceType = ResourceType.CPU; - double usage = 0.02; - Double threshold = 0.01; - - QueryGroup queryGroup1 = new QueryGroup( - "testQueryGroup", - queryGroupId1, - new MutableQueryGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), - 1L - ); - - QueryGroupLevelResourceUsageView mockView = createResourceUsageViewMock(); - queryGroupLevelViews.put(queryGroupId1, mockView); - activeQueryGroups.add(queryGroup1); - taskCancellation.queryGroupLevelResourceUsageViews = queryGroupLevelViews; - - QueryGroupTaskCancellationService taskCancellation = new QueryGroupTaskCancellationService( - workloadManagementSettings, - new MaximumResourceTaskSelectionStrategy(), - resourceUsageTrackerService, - stateAccessor - ); - - List cancellableTasksFrom = taskCancellation.getAllCancellableTasks(ResiliencyMode.SOFT, activeQueryGroups); - assertEquals(0, cancellableTasksFrom.size()); - } - - public void testCancelTasks_cancelsGivenTasks() { - ResourceType resourceType = ResourceType.CPU; - double cpuUsage = 0.011; - double memoryUsage = 0.011; - - Double threshold = 0.01; - - QueryGroup queryGroup1 = new QueryGroup( - "testQueryGroup", - queryGroupId1, - new MutableQueryGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold, ResourceType.MEMORY, threshold)), - 1L - ); - - QueryGroupLevelResourceUsageView mockView = createResourceUsageViewMock(); - when(mockView.getResourceUsageData()).thenReturn(Map.of(ResourceType.CPU, cpuUsage, ResourceType.MEMORY, memoryUsage)); - - queryGroupLevelViews.put(queryGroupId1, mockView); - activeQueryGroups.add(queryGroup1); - - QueryGroupTaskCancellationService taskCancellation = new QueryGroupTaskCancellationService( - workloadManagementSettings, - new MaximumResourceTaskSelectionStrategy(), - resourceUsageTrackerService, - stateAccessor - ); - - taskCancellation.queryGroupLevelResourceUsageViews = queryGroupLevelViews; - - List cancellableTasksFrom = taskCancellation.getAllCancellableTasks(ResiliencyMode.ENFORCED, activeQueryGroups); - assertEquals(2, cancellableTasksFrom.size()); - assertEquals(1234, cancellableTasksFrom.get(0).getTask().getId()); - assertEquals(4321, cancellableTasksFrom.get(1).getTask().getId()); - - when(resourceUsageTrackerService.constructQueryGroupLevelUsageViews()).thenReturn(queryGroupLevelViews); - when(workloadManagementSettings.getWlmMode()).thenReturn(WlmMode.ENABLED); - taskCancellation.cancelTasks(() -> false, activeQueryGroups, deletedQueryGroups); - assertTrue(cancellableTasksFrom.get(0).getTask().isCancelled()); - assertTrue(cancellableTasksFrom.get(1).getTask().isCancelled()); - } - - public void testCancelTasks_cancelsTasksFromDeletedQueryGroups() { - ResourceType resourceType = ResourceType.CPU; - double activeQueryGroupCpuUsage = 0.01; - double activeQueryGroupMemoryUsage = 0.0; - double deletedQueryGroupCpuUsage = 0.01; - double deletedQueryGroupMemoryUsage = 0.0; - Double threshold = 0.01; - - QueryGroup activeQueryGroup = new QueryGroup( - "testQueryGroup", - queryGroupId1, - new MutableQueryGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), - 1L - ); - - QueryGroup deletedQueryGroup = new QueryGroup( - "testQueryGroup", - queryGroupId2, - new MutableQueryGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), - 1L - ); - - QueryGroupLevelResourceUsageView mockView1 = createResourceUsageViewMock(); - QueryGroupLevelResourceUsageView mockView2 = createResourceUsageViewMock( - resourceType, - deletedQueryGroupCpuUsage, - List.of(1000, 1001) - ); - - when(mockView1.getResourceUsageData()).thenReturn( - Map.of(ResourceType.CPU, activeQueryGroupCpuUsage, ResourceType.MEMORY, activeQueryGroupMemoryUsage) - ); - when(mockView2.getResourceUsageData()).thenReturn( - Map.of(ResourceType.CPU, deletedQueryGroupCpuUsage, ResourceType.MEMORY, deletedQueryGroupMemoryUsage) - ); - queryGroupLevelViews.put(queryGroupId1, mockView1); - queryGroupLevelViews.put(queryGroupId2, mockView2); - - activeQueryGroups.add(activeQueryGroup); - deletedQueryGroups.add(deletedQueryGroup); - - QueryGroupTaskCancellationService taskCancellation = new QueryGroupTaskCancellationService( - workloadManagementSettings, - new MaximumResourceTaskSelectionStrategy(), - resourceUsageTrackerService, - stateAccessor - ); - taskCancellation.queryGroupLevelResourceUsageViews = queryGroupLevelViews; - - List cancellableTasksFrom = taskCancellation.getAllCancellableTasks(ResiliencyMode.ENFORCED, activeQueryGroups); - assertEquals(2, cancellableTasksFrom.size()); - assertEquals(1234, cancellableTasksFrom.get(0).getTask().getId()); - assertEquals(4321, cancellableTasksFrom.get(1).getTask().getId()); - - List cancellableTasksFromDeletedQueryGroups = taskCancellation.getAllCancellableTasks(List.of(deletedQueryGroup)); - assertEquals(2, cancellableTasksFromDeletedQueryGroups.size()); - assertEquals(1000, cancellableTasksFromDeletedQueryGroups.get(0).getTask().getId()); - assertEquals(1001, cancellableTasksFromDeletedQueryGroups.get(1).getTask().getId()); - - when(resourceUsageTrackerService.constructQueryGroupLevelUsageViews()).thenReturn(queryGroupLevelViews); - when(workloadManagementSettings.getWlmMode()).thenReturn(WlmMode.ENABLED); - taskCancellation.cancelTasks(() -> true, activeQueryGroups, deletedQueryGroups); - - assertTrue(cancellableTasksFrom.get(0).getTask().isCancelled()); - assertTrue(cancellableTasksFrom.get(1).getTask().isCancelled()); - assertTrue(cancellableTasksFromDeletedQueryGroups.get(0).getTask().isCancelled()); - assertTrue(cancellableTasksFromDeletedQueryGroups.get(1).getTask().isCancelled()); - } - - public void testCancelTasks_does_not_cancelTasksFromDeletedQueryGroups_whenNodeNotInDuress() { - ResourceType resourceType = ResourceType.CPU; - double activeQueryGroupCpuUsage = 0.11; - double activeQueryGroupMemoryUsage = 0.0; - double deletedQueryGroupCpuUsage = 0.11; - double deletedQueryGroupMemoryUsage = 0.0; - - Double threshold = 0.01; - - QueryGroup activeQueryGroup = new QueryGroup( - "testQueryGroup", - queryGroupId1, - new MutableQueryGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), - 1L - ); - - QueryGroup deletedQueryGroup = new QueryGroup( - "testQueryGroup", - queryGroupId2, - new MutableQueryGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), - 1L - ); - - QueryGroupLevelResourceUsageView mockView1 = createResourceUsageViewMock(); - QueryGroupLevelResourceUsageView mockView2 = createResourceUsageViewMock( - resourceType, - deletedQueryGroupCpuUsage, - List.of(1000, 1001) - ); - - when(mockView1.getResourceUsageData()).thenReturn( - Map.of(ResourceType.CPU, activeQueryGroupCpuUsage, ResourceType.MEMORY, activeQueryGroupMemoryUsage) - ); - when(mockView2.getResourceUsageData()).thenReturn( - Map.of(ResourceType.CPU, deletedQueryGroupCpuUsage, ResourceType.MEMORY, deletedQueryGroupMemoryUsage) - ); - - queryGroupLevelViews.put(queryGroupId1, mockView1); - queryGroupLevelViews.put(queryGroupId2, mockView2); - activeQueryGroups.add(activeQueryGroup); - deletedQueryGroups.add(deletedQueryGroup); - - QueryGroupTaskCancellationService taskCancellation = new QueryGroupTaskCancellationService( - workloadManagementSettings, - new MaximumResourceTaskSelectionStrategy(), - resourceUsageTrackerService, - stateAccessor - ); - taskCancellation.queryGroupLevelResourceUsageViews = queryGroupLevelViews; - - List cancellableTasksFrom = taskCancellation.getAllCancellableTasks(ResiliencyMode.ENFORCED, activeQueryGroups); - assertEquals(2, cancellableTasksFrom.size()); - assertEquals(1234, cancellableTasksFrom.get(0).getTask().getId()); - assertEquals(4321, cancellableTasksFrom.get(1).getTask().getId()); - - List cancellableTasksFromDeletedQueryGroups = taskCancellation.getAllCancellableTasks(List.of(deletedQueryGroup)); - assertEquals(2, cancellableTasksFromDeletedQueryGroups.size()); - assertEquals(1000, cancellableTasksFromDeletedQueryGroups.get(0).getTask().getId()); - assertEquals(1001, cancellableTasksFromDeletedQueryGroups.get(1).getTask().getId()); - - when(resourceUsageTrackerService.constructQueryGroupLevelUsageViews()).thenReturn(queryGroupLevelViews); - when(workloadManagementSettings.getWlmMode()).thenReturn(WlmMode.ENABLED); - taskCancellation.cancelTasks(() -> false, activeQueryGroups, deletedQueryGroups); - - assertTrue(cancellableTasksFrom.get(0).getTask().isCancelled()); - assertTrue(cancellableTasksFrom.get(1).getTask().isCancelled()); - assertFalse(cancellableTasksFromDeletedQueryGroups.get(0).getTask().isCancelled()); - assertFalse(cancellableTasksFromDeletedQueryGroups.get(1).getTask().isCancelled()); - } - - public void testCancelTasks_cancelsGivenTasks_WhenNodeInDuress() { - ResourceType resourceType = ResourceType.CPU; - double cpuUsage1 = 0.11; - double memoryUsage1 = 0.0; - double cpuUsage2 = 0.11; - double memoryUsage2 = 0.0; - Double threshold = 0.01; - - QueryGroup queryGroup1 = new QueryGroup( - "testQueryGroup", - queryGroupId1, - new MutableQueryGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), - 1L - ); - - QueryGroup queryGroup2 = new QueryGroup( - "testQueryGroup", - queryGroupId2, - new MutableQueryGroupFragment(ResiliencyMode.SOFT, Map.of(resourceType, threshold)), - 1L - ); - - QueryGroupLevelResourceUsageView mockView1 = createResourceUsageViewMock(); - when(mockView1.getResourceUsageData()).thenReturn(Map.of(ResourceType.CPU, cpuUsage1, ResourceType.MEMORY, memoryUsage1)); - queryGroupLevelViews.put(queryGroupId1, mockView1); - QueryGroupLevelResourceUsageView mockView = createResourceUsageViewMock(); - when(mockView.getActiveTasks()).thenReturn(List.of(getRandomSearchTask(5678), getRandomSearchTask(8765))); - when(mockView.getResourceUsageData()).thenReturn(Map.of(ResourceType.CPU, cpuUsage2, ResourceType.MEMORY, memoryUsage2)); - queryGroupLevelViews.put(queryGroupId2, mockView); - Collections.addAll(activeQueryGroups, queryGroup1, queryGroup2); - - QueryGroupTaskCancellationService taskCancellation = new QueryGroupTaskCancellationService( - workloadManagementSettings, - new MaximumResourceTaskSelectionStrategy(), - resourceUsageTrackerService, - stateAccessor - ); - - taskCancellation.queryGroupLevelResourceUsageViews = queryGroupLevelViews; - - List cancellableTasksFrom = taskCancellation.getAllCancellableTasks(ResiliencyMode.ENFORCED, activeQueryGroups); - assertEquals(2, cancellableTasksFrom.size()); - assertEquals(1234, cancellableTasksFrom.get(0).getTask().getId()); - assertEquals(4321, cancellableTasksFrom.get(1).getTask().getId()); - - List cancellableTasksFrom1 = taskCancellation.getAllCancellableTasks(ResiliencyMode.SOFT, activeQueryGroups); - assertEquals(2, cancellableTasksFrom1.size()); - assertEquals(5678, cancellableTasksFrom1.get(0).getTask().getId()); - assertEquals(8765, cancellableTasksFrom1.get(1).getTask().getId()); - - when(resourceUsageTrackerService.constructQueryGroupLevelUsageViews()).thenReturn(queryGroupLevelViews); - when(workloadManagementSettings.getWlmMode()).thenReturn(WlmMode.ENABLED); - taskCancellation.cancelTasks(() -> true, activeQueryGroups, deletedQueryGroups); - assertTrue(cancellableTasksFrom.get(0).getTask().isCancelled()); - assertTrue(cancellableTasksFrom.get(1).getTask().isCancelled()); - assertTrue(cancellableTasksFrom1.get(0).getTask().isCancelled()); - assertTrue(cancellableTasksFrom1.get(1).getTask().isCancelled()); - } - - public void testGetAllCancellableTasks_ReturnsNoTasksWhenNotBreachingThresholds() { - ResourceType resourceType = ResourceType.CPU; - double queryGroupCpuUsage = 0.09; - double queryGroupMemoryUsage = 0.0; - Double threshold = 0.1; - - QueryGroup queryGroup1 = new QueryGroup( - "testQueryGroup", - queryGroupId1, - new MutableQueryGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), - 1L - ); - - QueryGroupLevelResourceUsageView mockView = createResourceUsageViewMock(); - when(mockView.getResourceUsageData()).thenReturn( - Map.of(ResourceType.CPU, queryGroupCpuUsage, ResourceType.MEMORY, queryGroupMemoryUsage) - ); - queryGroupLevelViews.put(queryGroupId1, mockView); - activeQueryGroups.add(queryGroup1); - taskCancellation.queryGroupLevelResourceUsageViews = queryGroupLevelViews; - - List allCancellableTasks = taskCancellation.getAllCancellableTasks(ResiliencyMode.ENFORCED, activeQueryGroups); - assertTrue(allCancellableTasks.isEmpty()); - } - - public void testGetAllCancellableTasks_ReturnsTasksWhenBreachingThresholds() { - ResourceType resourceType = ResourceType.CPU; - double cpuUsage = 0.11; - double memoryUsage = 0.0; - Double threshold = 0.01; - - QueryGroup queryGroup1 = new QueryGroup( - "testQueryGroup", - queryGroupId1, - new MutableQueryGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), - 1L - ); - - QueryGroupLevelResourceUsageView mockView = createResourceUsageViewMock(); - when(mockView.getResourceUsageData()).thenReturn(Map.of(ResourceType.CPU, cpuUsage, ResourceType.MEMORY, memoryUsage)); - queryGroupLevelViews.put(queryGroupId1, mockView); - activeQueryGroups.add(queryGroup1); - taskCancellation.queryGroupLevelResourceUsageViews = queryGroupLevelViews; - - List allCancellableTasks = taskCancellation.getAllCancellableTasks(ResiliencyMode.ENFORCED, activeQueryGroups); - assertEquals(2, allCancellableTasks.size()); - assertEquals(1234, allCancellableTasks.get(0).getTask().getId()); - assertEquals(4321, allCancellableTasks.get(1).getTask().getId()); - } - - public void testGetCancellableTasksFrom_doesNotReturnTasksWhenQueryGroupIdNotFound() { - ResourceType resourceType = ResourceType.CPU; - double usage = 0.11; - Double threshold = 0.01; - - QueryGroup queryGroup1 = new QueryGroup( - "testQueryGroup1", - queryGroupId1, - new MutableQueryGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), - 1L - ); - QueryGroup queryGroup2 = new QueryGroup( - "testQueryGroup2", - queryGroupId2, - new MutableQueryGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), - 1L - ); - - QueryGroupLevelResourceUsageView mockView = createResourceUsageViewMock(); - queryGroupLevelViews.put(queryGroupId1, mockView); - activeQueryGroups.add(queryGroup1); - activeQueryGroups.add(queryGroup2); - taskCancellation.queryGroupLevelResourceUsageViews = queryGroupLevelViews; - - List cancellableTasksFrom = taskCancellation.getAllCancellableTasks(List.of(queryGroup2)); - assertEquals(0, cancellableTasksFrom.size()); - } - - public void testPruneDeletedQueryGroups() { - QueryGroup queryGroup1 = new QueryGroup( - "testQueryGroup1", - queryGroupId1, - new MutableQueryGroupFragment(ResiliencyMode.ENFORCED, Map.of(ResourceType.CPU, 0.2)), - 1L - ); - QueryGroup queryGroup2 = new QueryGroup( - "testQueryGroup2", - queryGroupId2, - new MutableQueryGroupFragment(ResiliencyMode.ENFORCED, Map.of(ResourceType.CPU, 0.1)), - 1L - ); - List deletedQueryGroups = new ArrayList<>(); - deletedQueryGroups.add(queryGroup1); - deletedQueryGroups.add(queryGroup2); - QueryGroupLevelResourceUsageView resourceUsageView1 = createResourceUsageViewMock(); - - List activeTasks = IntStream.range(0, 5).mapToObj(this::getRandomSearchTask).collect(Collectors.toList()); - when(resourceUsageView1.getActiveTasks()).thenReturn(activeTasks); - - QueryGroupLevelResourceUsageView resourceUsageView2 = createResourceUsageViewMock(); - when(resourceUsageView2.getActiveTasks()).thenReturn(new ArrayList<>()); - - queryGroupLevelViews.put(queryGroupId1, resourceUsageView1); - queryGroupLevelViews.put(queryGroupId2, resourceUsageView2); - - QueryGroupTaskCancellationService taskCancellation = new QueryGroupTaskCancellationService( - workloadManagementSettings, - new MaximumResourceTaskSelectionStrategy(), - resourceUsageTrackerService, - stateAccessor - ); - taskCancellation.queryGroupLevelResourceUsageViews = queryGroupLevelViews; - - taskCancellation.pruneDeletedQueryGroups(deletedQueryGroups); - - assertEquals(1, deletedQueryGroups.size()); - assertEquals(queryGroupId1, deletedQueryGroups.get(0).get_id()); - - } - - private QueryGroupLevelResourceUsageView createResourceUsageViewMock() { - QueryGroupLevelResourceUsageView mockView = mock(QueryGroupLevelResourceUsageView.class); - when(mockView.getActiveTasks()).thenReturn(List.of(getRandomSearchTask(1234), getRandomSearchTask(4321))); - return mockView; - } - - private QueryGroupLevelResourceUsageView createResourceUsageViewMock(ResourceType resourceType, double usage, Collection ids) { - QueryGroupLevelResourceUsageView mockView = mock(QueryGroupLevelResourceUsageView.class); - when(mockView.getResourceUsageData()).thenReturn(Collections.singletonMap(resourceType, usage)); - when(mockView.getActiveTasks()).thenReturn(ids.stream().map(this::getRandomSearchTask).collect(Collectors.toList())); - return mockView; - } - - private QueryGroupTask getRandomSearchTask(long id) { - return new QueryGroupTask( - id, - "transport", - SearchAction.NAME, - "test description", - new TaskId(randomLong() + ":" + randomLong()), - Collections.emptyMap(), - null, - clock::getTime - ); - } -} diff --git a/server/src/test/java/org/opensearch/wlm/cancellation/WorkloadGroupTaskCancellationServiceTests.java b/server/src/test/java/org/opensearch/wlm/cancellation/WorkloadGroupTaskCancellationServiceTests.java new file mode 100644 index 0000000000000..9cef7f939df1d --- /dev/null +++ b/server/src/test/java/org/opensearch/wlm/cancellation/WorkloadGroupTaskCancellationServiceTests.java @@ -0,0 +1,612 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.wlm.cancellation; + +import org.opensearch.action.search.SearchAction; +import org.opensearch.cluster.metadata.WorkloadGroup; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.tasks.TaskCancellation; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.wlm.MutableWorkloadGroupFragment; +import org.opensearch.wlm.MutableWorkloadGroupFragment.ResiliencyMode; +import org.opensearch.wlm.ResourceType; +import org.opensearch.wlm.WlmMode; +import org.opensearch.wlm.WorkloadGroupLevelResourceUsageView; +import org.opensearch.wlm.WorkloadGroupTask; +import org.opensearch.wlm.WorkloadGroupsStateAccessor; +import org.opensearch.wlm.WorkloadManagementSettings; +import org.opensearch.wlm.stats.WorkloadGroupState; +import org.opensearch.wlm.tracker.ResourceUsageCalculatorTrackerServiceTests.TestClock; +import org.opensearch.wlm.tracker.WorkloadGroupResourceUsageTrackerService; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class WorkloadGroupTaskCancellationServiceTests extends OpenSearchTestCase { + private static final String workloadGroupId1 = "workloadGroup1"; + private static final String workloadGroupId2 = "workloadGroup2"; + + private TestClock clock; + + private Map workloadGroupLevelViews; + private Set activeWorkloadGroups; + private Set deletedWorkloadGroups; + private WorkloadGroupTaskCancellationService taskCancellation; + private WorkloadManagementSettings workloadManagementSettings; + private WorkloadGroupResourceUsageTrackerService resourceUsageTrackerService; + private WorkloadGroupsStateAccessor stateAccessor; + + @Before + public void setup() { + workloadManagementSettings = mock(WorkloadManagementSettings.class); + workloadGroupLevelViews = new HashMap<>(); + activeWorkloadGroups = new HashSet<>(); + deletedWorkloadGroups = new HashSet<>(); + + clock = new TestClock(); + when(workloadManagementSettings.getNodeLevelCpuCancellationThreshold()).thenReturn(0.9); + when(workloadManagementSettings.getNodeLevelMemoryCancellationThreshold()).thenReturn(0.9); + resourceUsageTrackerService = mock(WorkloadGroupResourceUsageTrackerService.class); + stateAccessor = mock(WorkloadGroupsStateAccessor.class); + when(stateAccessor.getWorkloadGroupState(any())).thenReturn(new WorkloadGroupState()); + taskCancellation = new WorkloadGroupTaskCancellationService( + workloadManagementSettings, + new MaximumResourceTaskSelectionStrategy(), + resourceUsageTrackerService, + stateAccessor + ); + } + + public void testGetCancellableTasksFrom_setupAppropriateCancellationReasonAndScore() { + ResourceType resourceType = ResourceType.CPU; + double cpuUsage = 0.11; + double memoryUsage = 0.0; + Double threshold = 0.1; + + WorkloadGroup workloadGroup1 = new WorkloadGroup( + "testWorkloadGroup", + workloadGroupId1, + new MutableWorkloadGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), + 1L + ); + clock.fastForwardBy(1000); + + WorkloadGroupLevelResourceUsageView mockView = createResourceUsageViewMock(); + when(mockView.getResourceUsageData()).thenReturn(Map.of(resourceType, cpuUsage, ResourceType.MEMORY, memoryUsage)); + workloadGroupLevelViews.put(workloadGroupId1, mockView); + taskCancellation.workloadGroupLevelResourceUsageViews = workloadGroupLevelViews; + + List cancellableTasksFrom = taskCancellation.getAllCancellableTasks(List.of(workloadGroup1)); + assertEquals(2, cancellableTasksFrom.size()); + assertEquals(1234, cancellableTasksFrom.get(0).getTask().getId()); + assertEquals(4321, cancellableTasksFrom.get(1).getTask().getId()); + assertEquals(1, cancellableTasksFrom.get(0).getReasons().get(0).getCancellationScore()); + } + + public void testGetCancellableTasksFrom_returnsTasksWhenBreachingThreshold() { + ResourceType resourceType = ResourceType.CPU; + double cpuUsage = 0.11; + double memoryUsage = 0.0; + Double threshold = 0.1; + + WorkloadGroup workloadGroup1 = new WorkloadGroup( + "testWorkloadGroup", + workloadGroupId1, + new MutableWorkloadGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), + 1L + ); + + WorkloadGroupLevelResourceUsageView mockView = createResourceUsageViewMock(); + when(mockView.getResourceUsageData()).thenReturn(Map.of(resourceType, cpuUsage, ResourceType.MEMORY, memoryUsage)); + workloadGroupLevelViews.put(workloadGroupId1, mockView); + taskCancellation.workloadGroupLevelResourceUsageViews = workloadGroupLevelViews; + + List cancellableTasksFrom = taskCancellation.getAllCancellableTasks(List.of(workloadGroup1)); + assertEquals(2, cancellableTasksFrom.size()); + assertEquals(1234, cancellableTasksFrom.get(0).getTask().getId()); + assertEquals(4321, cancellableTasksFrom.get(1).getTask().getId()); + } + + public void testGetCancellableTasksFrom_returnsTasksWhenBreachingThresholdForMemory() { + ResourceType resourceType = ResourceType.MEMORY; + double cpuUsage = 0.0; + double memoryUsage = 0.11; + Double threshold = 0.1; + + WorkloadGroup workloadGroup1 = new WorkloadGroup( + "testWorkloadGroup", + workloadGroupId1, + new MutableWorkloadGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), + 1L + ); + + WorkloadGroupLevelResourceUsageView mockView = createResourceUsageViewMock(); + when(mockView.getResourceUsageData()).thenReturn(Map.of(ResourceType.CPU, cpuUsage, resourceType, memoryUsage)); + + workloadGroupLevelViews.put(workloadGroupId1, mockView); + activeWorkloadGroups.add(workloadGroup1); + taskCancellation.workloadGroupLevelResourceUsageViews = workloadGroupLevelViews; + + List cancellableTasksFrom = taskCancellation.getAllCancellableTasks( + ResiliencyMode.ENFORCED, + activeWorkloadGroups + ); + assertEquals(2, cancellableTasksFrom.size()); + assertEquals(1234, cancellableTasksFrom.get(0).getTask().getId()); + assertEquals(4321, cancellableTasksFrom.get(1).getTask().getId()); + } + + public void testGetCancellableTasksFrom_returnsNoTasksWhenNotBreachingThreshold() { + ResourceType resourceType = ResourceType.CPU; + double cpuUsage = 0.81; + double memoryUsage = 0.0; + Double threshold = 0.9; + WorkloadGroup workloadGroup1 = new WorkloadGroup( + "testWorkloadGroup", + workloadGroupId1, + new MutableWorkloadGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), + 1L + ); + + WorkloadGroupLevelResourceUsageView mockView = createResourceUsageViewMock(); + when(mockView.getResourceUsageData()).thenReturn(Map.of(ResourceType.CPU, cpuUsage, ResourceType.MEMORY, memoryUsage)); + workloadGroupLevelViews.put(workloadGroupId1, mockView); + activeWorkloadGroups.add(workloadGroup1); + taskCancellation.workloadGroupLevelResourceUsageViews = workloadGroupLevelViews; + + List cancellableTasksFrom = taskCancellation.getAllCancellableTasks(List.of(workloadGroup1)); + assertTrue(cancellableTasksFrom.isEmpty()); + } + + public void testGetCancellableTasksFrom_filtersWorkloadGroupCorrectly() { + ResourceType resourceType = ResourceType.CPU; + double usage = 0.02; + Double threshold = 0.01; + + WorkloadGroup workloadGroup1 = new WorkloadGroup( + "testWorkloadGroup", + workloadGroupId1, + new MutableWorkloadGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), + 1L + ); + + WorkloadGroupLevelResourceUsageView mockView = createResourceUsageViewMock(); + workloadGroupLevelViews.put(workloadGroupId1, mockView); + activeWorkloadGroups.add(workloadGroup1); + taskCancellation.workloadGroupLevelResourceUsageViews = workloadGroupLevelViews; + + WorkloadGroupTaskCancellationService taskCancellation = new WorkloadGroupTaskCancellationService( + workloadManagementSettings, + new MaximumResourceTaskSelectionStrategy(), + resourceUsageTrackerService, + stateAccessor + ); + + List cancellableTasksFrom = taskCancellation.getAllCancellableTasks(ResiliencyMode.SOFT, activeWorkloadGroups); + assertEquals(0, cancellableTasksFrom.size()); + } + + public void testCancelTasks_cancelsGivenTasks() { + ResourceType resourceType = ResourceType.CPU; + double cpuUsage = 0.011; + double memoryUsage = 0.011; + + Double threshold = 0.01; + + WorkloadGroup workloadGroup1 = new WorkloadGroup( + "testWorkloadGroup", + workloadGroupId1, + new MutableWorkloadGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold, ResourceType.MEMORY, threshold)), + 1L + ); + + WorkloadGroupLevelResourceUsageView mockView = createResourceUsageViewMock(); + when(mockView.getResourceUsageData()).thenReturn(Map.of(ResourceType.CPU, cpuUsage, ResourceType.MEMORY, memoryUsage)); + + workloadGroupLevelViews.put(workloadGroupId1, mockView); + activeWorkloadGroups.add(workloadGroup1); + + WorkloadGroupTaskCancellationService taskCancellation = new WorkloadGroupTaskCancellationService( + workloadManagementSettings, + new MaximumResourceTaskSelectionStrategy(), + resourceUsageTrackerService, + stateAccessor + ); + + taskCancellation.workloadGroupLevelResourceUsageViews = workloadGroupLevelViews; + + List cancellableTasksFrom = taskCancellation.getAllCancellableTasks( + ResiliencyMode.ENFORCED, + activeWorkloadGroups + ); + assertEquals(2, cancellableTasksFrom.size()); + assertEquals(1234, cancellableTasksFrom.get(0).getTask().getId()); + assertEquals(4321, cancellableTasksFrom.get(1).getTask().getId()); + + when(resourceUsageTrackerService.constructWorkloadGroupLevelUsageViews()).thenReturn(workloadGroupLevelViews); + when(workloadManagementSettings.getWlmMode()).thenReturn(WlmMode.ENABLED); + taskCancellation.cancelTasks(() -> false, activeWorkloadGroups, deletedWorkloadGroups); + assertTrue(cancellableTasksFrom.get(0).getTask().isCancelled()); + assertTrue(cancellableTasksFrom.get(1).getTask().isCancelled()); + } + + public void testCancelTasks_cancelsTasksFromDeletedWorkloadGroups() { + ResourceType resourceType = ResourceType.CPU; + double activeWorkloadGroupCpuUsage = 0.01; + double activeWorkloadGroupMemoryUsage = 0.0; + double deletedWorkloadGroupCpuUsage = 0.01; + double deletedWorkloadGroupMemoryUsage = 0.0; + Double threshold = 0.01; + + WorkloadGroup activeWorkloadGroup = new WorkloadGroup( + "testWorkloadGroup", + workloadGroupId1, + new MutableWorkloadGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), + 1L + ); + + WorkloadGroup deletedWorkloadGroup = new WorkloadGroup( + "testWorkloadGroup", + workloadGroupId2, + new MutableWorkloadGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), + 1L + ); + + WorkloadGroupLevelResourceUsageView mockView1 = createResourceUsageViewMock(); + WorkloadGroupLevelResourceUsageView mockView2 = createResourceUsageViewMock( + resourceType, + deletedWorkloadGroupCpuUsage, + List.of(1000, 1001) + ); + + when(mockView1.getResourceUsageData()).thenReturn( + Map.of(ResourceType.CPU, activeWorkloadGroupCpuUsage, ResourceType.MEMORY, activeWorkloadGroupMemoryUsage) + ); + when(mockView2.getResourceUsageData()).thenReturn( + Map.of(ResourceType.CPU, deletedWorkloadGroupCpuUsage, ResourceType.MEMORY, deletedWorkloadGroupMemoryUsage) + ); + workloadGroupLevelViews.put(workloadGroupId1, mockView1); + workloadGroupLevelViews.put(workloadGroupId2, mockView2); + + activeWorkloadGroups.add(activeWorkloadGroup); + deletedWorkloadGroups.add(deletedWorkloadGroup); + + WorkloadGroupTaskCancellationService taskCancellation = new WorkloadGroupTaskCancellationService( + workloadManagementSettings, + new MaximumResourceTaskSelectionStrategy(), + resourceUsageTrackerService, + stateAccessor + ); + taskCancellation.workloadGroupLevelResourceUsageViews = workloadGroupLevelViews; + + List cancellableTasksFrom = taskCancellation.getAllCancellableTasks( + ResiliencyMode.ENFORCED, + activeWorkloadGroups + ); + assertEquals(2, cancellableTasksFrom.size()); + assertEquals(1234, cancellableTasksFrom.get(0).getTask().getId()); + assertEquals(4321, cancellableTasksFrom.get(1).getTask().getId()); + + List cancellableTasksFromDeletedWorkloadGroups = taskCancellation.getAllCancellableTasks( + List.of(deletedWorkloadGroup) + ); + assertEquals(2, cancellableTasksFromDeletedWorkloadGroups.size()); + assertEquals(1000, cancellableTasksFromDeletedWorkloadGroups.get(0).getTask().getId()); + assertEquals(1001, cancellableTasksFromDeletedWorkloadGroups.get(1).getTask().getId()); + + when(resourceUsageTrackerService.constructWorkloadGroupLevelUsageViews()).thenReturn(workloadGroupLevelViews); + when(workloadManagementSettings.getWlmMode()).thenReturn(WlmMode.ENABLED); + taskCancellation.cancelTasks(() -> true, activeWorkloadGroups, deletedWorkloadGroups); + + assertTrue(cancellableTasksFrom.get(0).getTask().isCancelled()); + assertTrue(cancellableTasksFrom.get(1).getTask().isCancelled()); + assertTrue(cancellableTasksFromDeletedWorkloadGroups.get(0).getTask().isCancelled()); + assertTrue(cancellableTasksFromDeletedWorkloadGroups.get(1).getTask().isCancelled()); + } + + public void testCancelTasks_does_not_cancelTasksFromDeletedWorkloadGroups_whenNodeNotInDuress() { + ResourceType resourceType = ResourceType.CPU; + double activeWorkloadGroupCpuUsage = 0.11; + double activeWorkloadGroupMemoryUsage = 0.0; + double deletedWorkloadGroupCpuUsage = 0.11; + double deletedWorkloadGroupMemoryUsage = 0.0; + + Double threshold = 0.01; + + WorkloadGroup activeWorkloadGroup = new WorkloadGroup( + "testWorkloadGroup", + workloadGroupId1, + new MutableWorkloadGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), + 1L + ); + + WorkloadGroup deletedWorkloadGroup = new WorkloadGroup( + "testWorkloadGroup", + workloadGroupId2, + new MutableWorkloadGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), + 1L + ); + + WorkloadGroupLevelResourceUsageView mockView1 = createResourceUsageViewMock(); + WorkloadGroupLevelResourceUsageView mockView2 = createResourceUsageViewMock( + resourceType, + deletedWorkloadGroupCpuUsage, + List.of(1000, 1001) + ); + + when(mockView1.getResourceUsageData()).thenReturn( + Map.of(ResourceType.CPU, activeWorkloadGroupCpuUsage, ResourceType.MEMORY, activeWorkloadGroupMemoryUsage) + ); + when(mockView2.getResourceUsageData()).thenReturn( + Map.of(ResourceType.CPU, deletedWorkloadGroupCpuUsage, ResourceType.MEMORY, deletedWorkloadGroupMemoryUsage) + ); + + workloadGroupLevelViews.put(workloadGroupId1, mockView1); + workloadGroupLevelViews.put(workloadGroupId2, mockView2); + activeWorkloadGroups.add(activeWorkloadGroup); + deletedWorkloadGroups.add(deletedWorkloadGroup); + + WorkloadGroupTaskCancellationService taskCancellation = new WorkloadGroupTaskCancellationService( + workloadManagementSettings, + new MaximumResourceTaskSelectionStrategy(), + resourceUsageTrackerService, + stateAccessor + ); + taskCancellation.workloadGroupLevelResourceUsageViews = workloadGroupLevelViews; + + List cancellableTasksFrom = taskCancellation.getAllCancellableTasks( + ResiliencyMode.ENFORCED, + activeWorkloadGroups + ); + assertEquals(2, cancellableTasksFrom.size()); + assertEquals(1234, cancellableTasksFrom.get(0).getTask().getId()); + assertEquals(4321, cancellableTasksFrom.get(1).getTask().getId()); + + List cancellableTasksFromDeletedWorkloadGroups = taskCancellation.getAllCancellableTasks( + List.of(deletedWorkloadGroup) + ); + assertEquals(2, cancellableTasksFromDeletedWorkloadGroups.size()); + assertEquals(1000, cancellableTasksFromDeletedWorkloadGroups.get(0).getTask().getId()); + assertEquals(1001, cancellableTasksFromDeletedWorkloadGroups.get(1).getTask().getId()); + + when(resourceUsageTrackerService.constructWorkloadGroupLevelUsageViews()).thenReturn(workloadGroupLevelViews); + when(workloadManagementSettings.getWlmMode()).thenReturn(WlmMode.ENABLED); + taskCancellation.cancelTasks(() -> false, activeWorkloadGroups, deletedWorkloadGroups); + + assertTrue(cancellableTasksFrom.get(0).getTask().isCancelled()); + assertTrue(cancellableTasksFrom.get(1).getTask().isCancelled()); + assertFalse(cancellableTasksFromDeletedWorkloadGroups.get(0).getTask().isCancelled()); + assertFalse(cancellableTasksFromDeletedWorkloadGroups.get(1).getTask().isCancelled()); + } + + public void testCancelTasks_cancelsGivenTasks_WhenNodeInDuress() { + ResourceType resourceType = ResourceType.CPU; + double cpuUsage1 = 0.11; + double memoryUsage1 = 0.0; + double cpuUsage2 = 0.11; + double memoryUsage2 = 0.0; + Double threshold = 0.01; + + WorkloadGroup workloadGroup1 = new WorkloadGroup( + "testWorkloadGroup", + workloadGroupId1, + new MutableWorkloadGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), + 1L + ); + + WorkloadGroup workloadGroup2 = new WorkloadGroup( + "testWorkloadGroup", + workloadGroupId2, + new MutableWorkloadGroupFragment(ResiliencyMode.SOFT, Map.of(resourceType, threshold)), + 1L + ); + + WorkloadGroupLevelResourceUsageView mockView1 = createResourceUsageViewMock(); + when(mockView1.getResourceUsageData()).thenReturn(Map.of(ResourceType.CPU, cpuUsage1, ResourceType.MEMORY, memoryUsage1)); + workloadGroupLevelViews.put(workloadGroupId1, mockView1); + WorkloadGroupLevelResourceUsageView mockView = createResourceUsageViewMock(); + when(mockView.getActiveTasks()).thenReturn(List.of(getRandomSearchTask(5678), getRandomSearchTask(8765))); + when(mockView.getResourceUsageData()).thenReturn(Map.of(ResourceType.CPU, cpuUsage2, ResourceType.MEMORY, memoryUsage2)); + workloadGroupLevelViews.put(workloadGroupId2, mockView); + Collections.addAll(activeWorkloadGroups, workloadGroup1, workloadGroup2); + + WorkloadGroupTaskCancellationService taskCancellation = new WorkloadGroupTaskCancellationService( + workloadManagementSettings, + new MaximumResourceTaskSelectionStrategy(), + resourceUsageTrackerService, + stateAccessor + ); + + taskCancellation.workloadGroupLevelResourceUsageViews = workloadGroupLevelViews; + + List cancellableTasksFrom = taskCancellation.getAllCancellableTasks( + ResiliencyMode.ENFORCED, + activeWorkloadGroups + ); + assertEquals(2, cancellableTasksFrom.size()); + assertEquals(1234, cancellableTasksFrom.get(0).getTask().getId()); + assertEquals(4321, cancellableTasksFrom.get(1).getTask().getId()); + + List cancellableTasksFrom1 = taskCancellation.getAllCancellableTasks(ResiliencyMode.SOFT, activeWorkloadGroups); + assertEquals(2, cancellableTasksFrom1.size()); + assertEquals(5678, cancellableTasksFrom1.get(0).getTask().getId()); + assertEquals(8765, cancellableTasksFrom1.get(1).getTask().getId()); + + when(resourceUsageTrackerService.constructWorkloadGroupLevelUsageViews()).thenReturn(workloadGroupLevelViews); + when(workloadManagementSettings.getWlmMode()).thenReturn(WlmMode.ENABLED); + taskCancellation.cancelTasks(() -> true, activeWorkloadGroups, deletedWorkloadGroups); + assertTrue(cancellableTasksFrom.get(0).getTask().isCancelled()); + assertTrue(cancellableTasksFrom.get(1).getTask().isCancelled()); + assertTrue(cancellableTasksFrom1.get(0).getTask().isCancelled()); + assertTrue(cancellableTasksFrom1.get(1).getTask().isCancelled()); + } + + public void testGetAllCancellableTasks_ReturnsNoTasksWhenNotBreachingThresholds() { + ResourceType resourceType = ResourceType.CPU; + double workloadGroupCpuUsage = 0.09; + double workloadGroupMemoryUsage = 0.0; + Double threshold = 0.1; + + WorkloadGroup workloadGroup1 = new WorkloadGroup( + "testWorkloadGroup", + workloadGroupId1, + new MutableWorkloadGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), + 1L + ); + + WorkloadGroupLevelResourceUsageView mockView = createResourceUsageViewMock(); + when(mockView.getResourceUsageData()).thenReturn( + Map.of(ResourceType.CPU, workloadGroupCpuUsage, ResourceType.MEMORY, workloadGroupMemoryUsage) + ); + workloadGroupLevelViews.put(workloadGroupId1, mockView); + activeWorkloadGroups.add(workloadGroup1); + taskCancellation.workloadGroupLevelResourceUsageViews = workloadGroupLevelViews; + + List allCancellableTasks = taskCancellation.getAllCancellableTasks(ResiliencyMode.ENFORCED, activeWorkloadGroups); + assertTrue(allCancellableTasks.isEmpty()); + } + + public void testGetAllCancellableTasks_ReturnsTasksWhenBreachingThresholds() { + ResourceType resourceType = ResourceType.CPU; + double cpuUsage = 0.11; + double memoryUsage = 0.0; + Double threshold = 0.01; + + WorkloadGroup workloadGroup1 = new WorkloadGroup( + "testWorkloadGroup", + workloadGroupId1, + new MutableWorkloadGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), + 1L + ); + + WorkloadGroupLevelResourceUsageView mockView = createResourceUsageViewMock(); + when(mockView.getResourceUsageData()).thenReturn(Map.of(ResourceType.CPU, cpuUsage, ResourceType.MEMORY, memoryUsage)); + workloadGroupLevelViews.put(workloadGroupId1, mockView); + activeWorkloadGroups.add(workloadGroup1); + taskCancellation.workloadGroupLevelResourceUsageViews = workloadGroupLevelViews; + + List allCancellableTasks = taskCancellation.getAllCancellableTasks(ResiliencyMode.ENFORCED, activeWorkloadGroups); + assertEquals(2, allCancellableTasks.size()); + assertEquals(1234, allCancellableTasks.get(0).getTask().getId()); + assertEquals(4321, allCancellableTasks.get(1).getTask().getId()); + } + + public void testGetCancellableTasksFrom_doesNotReturnTasksWhenWorkloadGroupIdNotFound() { + ResourceType resourceType = ResourceType.CPU; + double usage = 0.11; + Double threshold = 0.01; + + WorkloadGroup workloadGroup1 = new WorkloadGroup( + "testWorkloadGroup1", + workloadGroupId1, + new MutableWorkloadGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), + 1L + ); + WorkloadGroup workloadGroup2 = new WorkloadGroup( + "testWorkloadGroup2", + workloadGroupId2, + new MutableWorkloadGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), + 1L + ); + + WorkloadGroupLevelResourceUsageView mockView = createResourceUsageViewMock(); + workloadGroupLevelViews.put(workloadGroupId1, mockView); + activeWorkloadGroups.add(workloadGroup1); + activeWorkloadGroups.add(workloadGroup2); + taskCancellation.workloadGroupLevelResourceUsageViews = workloadGroupLevelViews; + + List cancellableTasksFrom = taskCancellation.getAllCancellableTasks(List.of(workloadGroup2)); + assertEquals(0, cancellableTasksFrom.size()); + } + + public void testPruneDeletedWorkloadGroups() { + WorkloadGroup workloadGroup1 = new WorkloadGroup( + "testWorkloadGroup1", + workloadGroupId1, + new MutableWorkloadGroupFragment(ResiliencyMode.ENFORCED, Map.of(ResourceType.CPU, 0.2)), + 1L + ); + WorkloadGroup workloadGroup2 = new WorkloadGroup( + "testWorkloadGroup2", + workloadGroupId2, + new MutableWorkloadGroupFragment(ResiliencyMode.ENFORCED, Map.of(ResourceType.CPU, 0.1)), + 1L + ); + List deletedWorkloadGroups = new ArrayList<>(); + deletedWorkloadGroups.add(workloadGroup1); + deletedWorkloadGroups.add(workloadGroup2); + WorkloadGroupLevelResourceUsageView resourceUsageView1 = createResourceUsageViewMock(); + + List activeTasks = IntStream.range(0, 5).mapToObj(this::getRandomSearchTask).collect(Collectors.toList()); + when(resourceUsageView1.getActiveTasks()).thenReturn(activeTasks); + + WorkloadGroupLevelResourceUsageView resourceUsageView2 = createResourceUsageViewMock(); + when(resourceUsageView2.getActiveTasks()).thenReturn(new ArrayList<>()); + + workloadGroupLevelViews.put(workloadGroupId1, resourceUsageView1); + workloadGroupLevelViews.put(workloadGroupId2, resourceUsageView2); + + WorkloadGroupTaskCancellationService taskCancellation = new WorkloadGroupTaskCancellationService( + workloadManagementSettings, + new MaximumResourceTaskSelectionStrategy(), + resourceUsageTrackerService, + stateAccessor + ); + taskCancellation.workloadGroupLevelResourceUsageViews = workloadGroupLevelViews; + + taskCancellation.pruneDeletedWorkloadGroups(deletedWorkloadGroups); + + assertEquals(1, deletedWorkloadGroups.size()); + assertEquals(workloadGroupId1, deletedWorkloadGroups.get(0).get_id()); + + } + + private WorkloadGroupLevelResourceUsageView createResourceUsageViewMock() { + WorkloadGroupLevelResourceUsageView mockView = mock(WorkloadGroupLevelResourceUsageView.class); + when(mockView.getActiveTasks()).thenReturn(List.of(getRandomSearchTask(1234), getRandomSearchTask(4321))); + return mockView; + } + + private WorkloadGroupLevelResourceUsageView createResourceUsageViewMock( + ResourceType resourceType, + double usage, + Collection ids + ) { + WorkloadGroupLevelResourceUsageView mockView = mock(WorkloadGroupLevelResourceUsageView.class); + when(mockView.getResourceUsageData()).thenReturn(Collections.singletonMap(resourceType, usage)); + when(mockView.getActiveTasks()).thenReturn(ids.stream().map(this::getRandomSearchTask).collect(Collectors.toList())); + return mockView; + } + + private WorkloadGroupTask getRandomSearchTask(long id) { + return new WorkloadGroupTask( + id, + "transport", + SearchAction.NAME, + "test description", + new TaskId(randomLong() + ":" + randomLong()), + Collections.emptyMap(), + null, + clock::getTime + ); + } +} diff --git a/server/src/test/java/org/opensearch/wlm/listeners/QueryGroupRequestOperationListenerTests.java b/server/src/test/java/org/opensearch/wlm/listeners/WorkloadGroupRequestOperationListenerTests.java similarity index 53% rename from server/src/test/java/org/opensearch/wlm/listeners/QueryGroupRequestOperationListenerTests.java rename to server/src/test/java/org/opensearch/wlm/listeners/WorkloadGroupRequestOperationListenerTests.java index 016588acf1e24..9b76ba541a72f 100644 --- a/server/src/test/java/org/opensearch/wlm/listeners/QueryGroupRequestOperationListenerTests.java +++ b/server/src/test/java/org/opensearch/wlm/listeners/WorkloadGroupRequestOperationListenerTests.java @@ -16,14 +16,14 @@ import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; -import org.opensearch.wlm.QueryGroupService; -import org.opensearch.wlm.QueryGroupTask; -import org.opensearch.wlm.QueryGroupsStateAccessor; import org.opensearch.wlm.ResourceType; +import org.opensearch.wlm.WorkloadGroupService; +import org.opensearch.wlm.WorkloadGroupTask; +import org.opensearch.wlm.WorkloadGroupsStateAccessor; import org.opensearch.wlm.WorkloadManagementSettings; -import org.opensearch.wlm.cancellation.QueryGroupTaskCancellationService; -import org.opensearch.wlm.stats.QueryGroupState; -import org.opensearch.wlm.stats.QueryGroupStats; +import org.opensearch.wlm.cancellation.WorkloadGroupTaskCancellationService; +import org.opensearch.wlm.stats.WorkloadGroupState; +import org.opensearch.wlm.stats.WorkloadGroupStats; import java.io.IOException; import java.util.ArrayList; @@ -38,31 +38,31 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -public class QueryGroupRequestOperationListenerTests extends OpenSearchTestCase { +public class WorkloadGroupRequestOperationListenerTests extends OpenSearchTestCase { public static final int ITERATIONS = 20; ThreadPool testThreadPool; - QueryGroupService queryGroupService; - private QueryGroupTaskCancellationService taskCancellationService; + WorkloadGroupService workloadGroupService; + private WorkloadGroupTaskCancellationService taskCancellationService; private ClusterService mockClusterService; private WorkloadManagementSettings mockWorkloadManagementSettings; - Map queryGroupStateMap; - String testQueryGroupId; - QueryGroupRequestOperationListener sut; + Map workloadGroupStateMap; + String testWorkloadGroupId; + WorkloadGroupRequestOperationListener sut; public void setUp() throws Exception { super.setUp(); - taskCancellationService = mock(QueryGroupTaskCancellationService.class); + taskCancellationService = mock(WorkloadGroupTaskCancellationService.class); mockClusterService = mock(ClusterService.class); mockWorkloadManagementSettings = mock(WorkloadManagementSettings.class); - queryGroupStateMap = new HashMap<>(); - testQueryGroupId = "safjgagnakg-3r3fads"; + workloadGroupStateMap = new HashMap<>(); + testWorkloadGroupId = "safjgagnakg-3r3fads"; testThreadPool = new TestThreadPool("RejectionTestThreadPool"); ClusterState mockClusterState = mock(ClusterState.class); when(mockClusterService.state()).thenReturn(mockClusterState); Metadata mockMetaData = mock(Metadata.class); when(mockClusterState.metadata()).thenReturn(mockMetaData); - queryGroupService = mock(QueryGroupService.class); - sut = new QueryGroupRequestOperationListener(queryGroupService, testThreadPool); + workloadGroupService = mock(WorkloadGroupService.class); + sut = new WorkloadGroupRequestOperationListener(workloadGroupService, testThreadPool); } public void tearDown() throws Exception { @@ -71,62 +71,62 @@ public void tearDown() throws Exception { } public void testRejectionCase() { - final String testQueryGroupId = "asdgasgkajgkw3141_3rt4t"; - testThreadPool.getThreadContext().putHeader(QueryGroupTask.QUERY_GROUP_ID_HEADER, testQueryGroupId); - doThrow(OpenSearchRejectedExecutionException.class).when(queryGroupService).rejectIfNeeded(testQueryGroupId); + final String testWorkloadGroupId = "asdgasgkajgkw3141_3rt4t"; + testThreadPool.getThreadContext().putHeader(WorkloadGroupTask.WORKLOAD_GROUP_ID_HEADER, testWorkloadGroupId); + doThrow(OpenSearchRejectedExecutionException.class).when(workloadGroupService).rejectIfNeeded(testWorkloadGroupId); assertThrows(OpenSearchRejectedExecutionException.class, () -> sut.onRequestStart(null)); } public void testNonRejectionCase() { - final String testQueryGroupId = "asdgasgkajgkw3141_3rt4t"; - testThreadPool.getThreadContext().putHeader(QueryGroupTask.QUERY_GROUP_ID_HEADER, testQueryGroupId); - doNothing().when(queryGroupService).rejectIfNeeded(testQueryGroupId); + final String testWorkloadGroupId = "asdgasgkajgkw3141_3rt4t"; + testThreadPool.getThreadContext().putHeader(WorkloadGroupTask.WORKLOAD_GROUP_ID_HEADER, testWorkloadGroupId); + doNothing().when(workloadGroupService).rejectIfNeeded(testWorkloadGroupId); sut.onRequestStart(null); } - public void testValidQueryGroupRequestFailure() throws IOException { + public void testValidWorkloadGroupRequestFailure() throws IOException { - QueryGroupStats expectedStats = new QueryGroupStats( + WorkloadGroupStats expectedStats = new WorkloadGroupStats( Map.of( - testQueryGroupId, - new QueryGroupStats.QueryGroupStatsHolder( + testWorkloadGroupId, + new WorkloadGroupStats.WorkloadGroupStatsHolder( 0, 0, 1, 0, Map.of( ResourceType.CPU, - new QueryGroupStats.ResourceStats(0, 0, 0), + new WorkloadGroupStats.ResourceStats(0, 0, 0), ResourceType.MEMORY, - new QueryGroupStats.ResourceStats(0, 0, 0) + new WorkloadGroupStats.ResourceStats(0, 0, 0) ) ), - QueryGroupTask.DEFAULT_QUERY_GROUP_ID_SUPPLIER.get(), - new QueryGroupStats.QueryGroupStatsHolder( + WorkloadGroupTask.DEFAULT_WORKLOAD_GROUP_ID_SUPPLIER.get(), + new WorkloadGroupStats.WorkloadGroupStatsHolder( 0, 0, 0, 0, Map.of( ResourceType.CPU, - new QueryGroupStats.ResourceStats(0, 0, 0), + new WorkloadGroupStats.ResourceStats(0, 0, 0), ResourceType.MEMORY, - new QueryGroupStats.ResourceStats(0, 0, 0) + new WorkloadGroupStats.ResourceStats(0, 0, 0) ) ) ) ); - assertSuccess(testQueryGroupId, queryGroupStateMap, expectedStats, testQueryGroupId); + assertSuccess(testWorkloadGroupId, workloadGroupStateMap, expectedStats, testWorkloadGroupId); } - public void testMultiThreadedValidQueryGroupRequestFailures() { + public void testMultiThreadedValidWorkloadGroupRequestFailures() { - queryGroupStateMap.put(testQueryGroupId, new QueryGroupState()); - QueryGroupsStateAccessor accessor = new QueryGroupsStateAccessor(queryGroupStateMap); - setupMockedQueryGroupsFromClusterState(); - queryGroupService = new QueryGroupService( + workloadGroupStateMap.put(testWorkloadGroupId, new WorkloadGroupState()); + WorkloadGroupsStateAccessor accessor = new WorkloadGroupsStateAccessor(workloadGroupStateMap); + setupMockedWorkloadGroupsFromClusterState(); + workloadGroupService = new WorkloadGroupService( taskCancellationService, mockClusterService, testThreadPool, @@ -137,13 +137,13 @@ public void testMultiThreadedValidQueryGroupRequestFailures() { Collections.emptySet() ); - sut = new QueryGroupRequestOperationListener(queryGroupService, testThreadPool); + sut = new WorkloadGroupRequestOperationListener(workloadGroupService, testThreadPool); List threads = new ArrayList<>(); for (int i = 0; i < ITERATIONS; i++) { threads.add(new Thread(() -> { try (ThreadContext.StoredContext currentContext = testThreadPool.getThreadContext().stashContext()) { - testThreadPool.getThreadContext().putHeader(QueryGroupTask.QUERY_GROUP_ID_HEADER, testQueryGroupId); + testThreadPool.getThreadContext().putHeader(WorkloadGroupTask.WORKLOAD_GROUP_ID_HEADER, testWorkloadGroupId); sut.onRequestFailure(null, null); } })); @@ -160,34 +160,34 @@ public void testMultiThreadedValidQueryGroupRequestFailures() { HashSet set = new HashSet<>(); set.add("_all"); - QueryGroupStats actualStats = queryGroupService.nodeStats(set, null); + WorkloadGroupStats actualStats = workloadGroupService.nodeStats(set, null); - QueryGroupStats expectedStats = new QueryGroupStats( + WorkloadGroupStats expectedStats = new WorkloadGroupStats( Map.of( - testQueryGroupId, - new QueryGroupStats.QueryGroupStatsHolder( + testWorkloadGroupId, + new WorkloadGroupStats.WorkloadGroupStatsHolder( 0, 0, ITERATIONS, 0, Map.of( ResourceType.CPU, - new QueryGroupStats.ResourceStats(0, 0, 0), + new WorkloadGroupStats.ResourceStats(0, 0, 0), ResourceType.MEMORY, - new QueryGroupStats.ResourceStats(0, 0, 0) + new WorkloadGroupStats.ResourceStats(0, 0, 0) ) ), - QueryGroupTask.DEFAULT_QUERY_GROUP_ID_SUPPLIER.get(), - new QueryGroupStats.QueryGroupStatsHolder( + WorkloadGroupTask.DEFAULT_WORKLOAD_GROUP_ID_SUPPLIER.get(), + new WorkloadGroupStats.WorkloadGroupStatsHolder( 0, 0, 0, 0, Map.of( ResourceType.CPU, - new QueryGroupStats.ResourceStats(0, 0, 0), + new WorkloadGroupStats.ResourceStats(0, 0, 0), ResourceType.MEMORY, - new QueryGroupStats.ResourceStats(0, 0, 0) + new WorkloadGroupStats.ResourceStats(0, 0, 0) ) ) ) @@ -196,56 +196,56 @@ public void testMultiThreadedValidQueryGroupRequestFailures() { assertEquals(expectedStats, actualStats); } - public void testInvalidQueryGroupFailure() throws IOException { - QueryGroupStats expectedStats = new QueryGroupStats( + public void testInvalidWorkloadGroupFailure() throws IOException { + WorkloadGroupStats expectedStats = new WorkloadGroupStats( Map.of( - testQueryGroupId, - new QueryGroupStats.QueryGroupStatsHolder( + testWorkloadGroupId, + new WorkloadGroupStats.WorkloadGroupStatsHolder( 0, 0, 0, 0, Map.of( ResourceType.CPU, - new QueryGroupStats.ResourceStats(0, 0, 0), + new WorkloadGroupStats.ResourceStats(0, 0, 0), ResourceType.MEMORY, - new QueryGroupStats.ResourceStats(0, 0, 0) + new WorkloadGroupStats.ResourceStats(0, 0, 0) ) ), - QueryGroupTask.DEFAULT_QUERY_GROUP_ID_SUPPLIER.get(), - new QueryGroupStats.QueryGroupStatsHolder( + WorkloadGroupTask.DEFAULT_WORKLOAD_GROUP_ID_SUPPLIER.get(), + new WorkloadGroupStats.WorkloadGroupStatsHolder( 0, 0, 1, 0, Map.of( ResourceType.CPU, - new QueryGroupStats.ResourceStats(0, 0, 0), + new WorkloadGroupStats.ResourceStats(0, 0, 0), ResourceType.MEMORY, - new QueryGroupStats.ResourceStats(0, 0, 0) + new WorkloadGroupStats.ResourceStats(0, 0, 0) ) ) ) ); - assertSuccess(testQueryGroupId, queryGroupStateMap, expectedStats, "dummy-invalid-qg-id"); + assertSuccess(testWorkloadGroupId, workloadGroupStateMap, expectedStats, "dummy-invalid-qg-id"); } private void assertSuccess( - String testQueryGroupId, - Map queryGroupStateMap, - QueryGroupStats expectedStats, + String testWorkloadGroupId, + Map workloadGroupStateMap, + WorkloadGroupStats expectedStats, String threadContextQG_Id ) { - QueryGroupsStateAccessor stateAccessor = new QueryGroupsStateAccessor(queryGroupStateMap); + WorkloadGroupsStateAccessor stateAccessor = new WorkloadGroupsStateAccessor(workloadGroupStateMap); try (ThreadContext.StoredContext currentContext = testThreadPool.getThreadContext().stashContext()) { - testThreadPool.getThreadContext().putHeader(QueryGroupTask.QUERY_GROUP_ID_HEADER, threadContextQG_Id); - queryGroupStateMap.put(testQueryGroupId, new QueryGroupState()); + testThreadPool.getThreadContext().putHeader(WorkloadGroupTask.WORKLOAD_GROUP_ID_HEADER, threadContextQG_Id); + workloadGroupStateMap.put(testWorkloadGroupId, new WorkloadGroupState()); - setupMockedQueryGroupsFromClusterState(); + setupMockedWorkloadGroupsFromClusterState(); - queryGroupService = new QueryGroupService( + workloadGroupService = new WorkloadGroupService( taskCancellationService, mockClusterService, testThreadPool, @@ -255,22 +255,22 @@ private void assertSuccess( Collections.emptySet(), Collections.emptySet() ); - sut = new QueryGroupRequestOperationListener(queryGroupService, testThreadPool); + sut = new WorkloadGroupRequestOperationListener(workloadGroupService, testThreadPool); sut.onRequestFailure(null, null); HashSet set = new HashSet<>(); set.add("_all"); - QueryGroupStats actualStats = queryGroupService.nodeStats(set, null); + WorkloadGroupStats actualStats = workloadGroupService.nodeStats(set, null); assertEquals(expectedStats, actualStats); } } - private void setupMockedQueryGroupsFromClusterState() { + private void setupMockedWorkloadGroupsFromClusterState() { ClusterState state = mock(ClusterState.class); Metadata metadata = mock(Metadata.class); when(mockClusterService.state()).thenReturn(state); when(state.metadata()).thenReturn(metadata); - when(metadata.queryGroups()).thenReturn(Collections.emptyMap()); + when(metadata.workloadGroups()).thenReturn(Collections.emptyMap()); } } diff --git a/server/src/test/java/org/opensearch/wlm/stats/QueryGroupStateTests.java b/server/src/test/java/org/opensearch/wlm/stats/QueryGroupStateTests.java deleted file mode 100644 index c0dfa06a0fba1..0000000000000 --- a/server/src/test/java/org/opensearch/wlm/stats/QueryGroupStateTests.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.wlm.stats; - -import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.wlm.ResourceType; - -import java.util.ArrayList; -import java.util.List; - -public class QueryGroupStateTests extends OpenSearchTestCase { - QueryGroupState queryGroupState; - - public void testRandomQueryGroupsStateUpdates() { - queryGroupState = new QueryGroupState(); - List updaterThreads = new ArrayList<>(); - - for (int i = 0; i < 25; i++) { - if (i % 5 == 0) { - updaterThreads.add(new Thread(() -> { queryGroupState.totalCompletions.inc(); })); - } else if (i % 5 == 1) { - updaterThreads.add(new Thread(() -> { - queryGroupState.totalRejections.inc(); - if (randomBoolean()) { - queryGroupState.getResourceState().get(ResourceType.CPU).rejections.inc(); - } else { - queryGroupState.getResourceState().get(ResourceType.MEMORY).rejections.inc(); - } - })); - } else if (i % 5 == 2) { - updaterThreads.add(new Thread(() -> queryGroupState.failures.inc())); - } else if (i % 5 == 3) { - updaterThreads.add(new Thread(() -> queryGroupState.getResourceState().get(ResourceType.CPU).cancellations.inc())); - } else { - updaterThreads.add(new Thread(() -> queryGroupState.getResourceState().get(ResourceType.MEMORY).cancellations.inc())); - } - - if (i % 5 == 3 || i % 5 == 4) { - updaterThreads.add(new Thread(() -> queryGroupState.totalCancellations.inc())); - } - } - - // trigger the updates - updaterThreads.forEach(Thread::start); - // wait for updates to be finished - updaterThreads.forEach(thread -> { - try { - thread.join(); - } catch (InterruptedException ignored) { - - } - }); - - assertEquals(5, queryGroupState.getTotalCompletions()); - assertEquals(5, queryGroupState.getTotalRejections()); - - final long sumOfRejectionsDueToResourceTypes = queryGroupState.getResourceState().get(ResourceType.CPU).rejections.count() - + queryGroupState.getResourceState().get(ResourceType.MEMORY).rejections.count(); - assertEquals(sumOfRejectionsDueToResourceTypes, queryGroupState.getTotalRejections()); - - assertEquals(5, queryGroupState.getFailures()); - assertEquals(10, queryGroupState.getTotalCancellations()); - assertEquals(5, queryGroupState.getResourceState().get(ResourceType.CPU).cancellations.count()); - assertEquals(5, queryGroupState.getResourceState().get(ResourceType.MEMORY).cancellations.count()); - } - -} diff --git a/server/src/test/java/org/opensearch/wlm/stats/WlmStatsTests.java b/server/src/test/java/org/opensearch/wlm/stats/WlmStatsTests.java index 6910ca7f9937c..5589db7c0c20d 100644 --- a/server/src/test/java/org/opensearch/wlm/stats/WlmStatsTests.java +++ b/server/src/test/java/org/opensearch/wlm/stats/WlmStatsTests.java @@ -30,26 +30,26 @@ public class WlmStatsTests extends AbstractWireSerializingTestCase { public void testToXContent() throws IOException { - final Map stats = new HashMap<>(); - final String queryGroupId = "afakjklaj304041-afaka"; + final Map stats = new HashMap<>(); + final String workloadGroupId = "afakjklaj304041-afaka"; stats.put( - queryGroupId, - new QueryGroupStats.QueryGroupStatsHolder( + workloadGroupId, + new WorkloadGroupStats.WorkloadGroupStatsHolder( 123456789, 13, 2, 0, - Map.of(ResourceType.CPU, new QueryGroupStats.ResourceStats(0.3, 13, 2)) + Map.of(ResourceType.CPU, new WorkloadGroupStats.ResourceStats(0.3, 13, 2)) ) ); XContentBuilder builder = JsonXContent.contentBuilder(); - QueryGroupStats queryGroupStats = new QueryGroupStats(stats); - WlmStats wlmStats = new WlmStats(mock(DiscoveryNode.class), queryGroupStats); + WorkloadGroupStats workloadGroupStats = new WorkloadGroupStats(stats); + WlmStats wlmStats = new WlmStats(mock(DiscoveryNode.class), workloadGroupStats); builder.startObject(); wlmStats.toXContent(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); assertEquals( - "{\"query_groups\":{\"afakjklaj304041-afaka\":{\"total_completions\":123456789,\"total_rejections\":13,\"total_cancellations\":0,\"cpu\":{\"current_usage\":0.3,\"cancellations\":13,\"rejections\":2}}}}", + "{\"workload_groups\":{\"afakjklaj304041-afaka\":{\"total_completions\":123456789,\"total_rejections\":13,\"total_cancellations\":0,\"cpu\":{\"current_usage\":0.3,\"cancellations\":13,\"rejections\":2}}}}", builder.toString() ); } @@ -68,7 +68,7 @@ protected WlmStats createTestInstance() { DiscoveryNodeRole.BUILT_IN_ROLES, VersionUtils.randomCompatibleVersion(random(), Version.CURRENT) ); - QueryGroupStatsTests queryGroupStatsTests = new QueryGroupStatsTests(); - return new WlmStats(discoveryNode, queryGroupStatsTests.createTestInstance()); + WorkloadGroupStatsTests workloadGroupStatsTests = new WorkloadGroupStatsTests(); + return new WlmStats(discoveryNode, workloadGroupStatsTests.createTestInstance()); } } diff --git a/server/src/test/java/org/opensearch/wlm/stats/WorkloadGroupStateTests.java b/server/src/test/java/org/opensearch/wlm/stats/WorkloadGroupStateTests.java new file mode 100644 index 0000000000000..4aa70d17064e0 --- /dev/null +++ b/server/src/test/java/org/opensearch/wlm/stats/WorkloadGroupStateTests.java @@ -0,0 +1,73 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.wlm.stats; + +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.wlm.ResourceType; + +import java.util.ArrayList; +import java.util.List; + +public class WorkloadGroupStateTests extends OpenSearchTestCase { + WorkloadGroupState workloadGroupState; + + public void testRandomWorkloadGroupsStateUpdates() { + workloadGroupState = new WorkloadGroupState(); + List updaterThreads = new ArrayList<>(); + + for (int i = 0; i < 25; i++) { + if (i % 5 == 0) { + updaterThreads.add(new Thread(() -> { workloadGroupState.totalCompletions.inc(); })); + } else if (i % 5 == 1) { + updaterThreads.add(new Thread(() -> { + workloadGroupState.totalRejections.inc(); + if (randomBoolean()) { + workloadGroupState.getResourceState().get(ResourceType.CPU).rejections.inc(); + } else { + workloadGroupState.getResourceState().get(ResourceType.MEMORY).rejections.inc(); + } + })); + } else if (i % 5 == 2) { + updaterThreads.add(new Thread(() -> workloadGroupState.failures.inc())); + } else if (i % 5 == 3) { + updaterThreads.add(new Thread(() -> workloadGroupState.getResourceState().get(ResourceType.CPU).cancellations.inc())); + } else { + updaterThreads.add(new Thread(() -> workloadGroupState.getResourceState().get(ResourceType.MEMORY).cancellations.inc())); + } + + if (i % 5 == 3 || i % 5 == 4) { + updaterThreads.add(new Thread(() -> workloadGroupState.totalCancellations.inc())); + } + } + + // trigger the updates + updaterThreads.forEach(Thread::start); + // wait for updates to be finished + updaterThreads.forEach(thread -> { + try { + thread.join(); + } catch (InterruptedException ignored) { + + } + }); + + assertEquals(5, workloadGroupState.getTotalCompletions()); + assertEquals(5, workloadGroupState.getTotalRejections()); + + final long sumOfRejectionsDueToResourceTypes = workloadGroupState.getResourceState().get(ResourceType.CPU).rejections.count() + + workloadGroupState.getResourceState().get(ResourceType.MEMORY).rejections.count(); + assertEquals(sumOfRejectionsDueToResourceTypes, workloadGroupState.getTotalRejections()); + + assertEquals(5, workloadGroupState.getFailures()); + assertEquals(10, workloadGroupState.getTotalCancellations()); + assertEquals(5, workloadGroupState.getResourceState().get(ResourceType.CPU).cancellations.count()); + assertEquals(5, workloadGroupState.getResourceState().get(ResourceType.MEMORY).cancellations.count()); + } + +} diff --git a/server/src/test/java/org/opensearch/wlm/stats/QueryGroupStatsTests.java b/server/src/test/java/org/opensearch/wlm/stats/WorkloadGroupStatsTests.java similarity index 64% rename from server/src/test/java/org/opensearch/wlm/stats/QueryGroupStatsTests.java rename to server/src/test/java/org/opensearch/wlm/stats/WorkloadGroupStatsTests.java index 6fc4d178e54bc..d7d77761aa9fa 100644 --- a/server/src/test/java/org/opensearch/wlm/stats/QueryGroupStatsTests.java +++ b/server/src/test/java/org/opensearch/wlm/stats/WorkloadGroupStatsTests.java @@ -26,50 +26,50 @@ import static java.util.Collections.emptyMap; -public class QueryGroupStatsTests extends AbstractWireSerializingTestCase { +public class WorkloadGroupStatsTests extends AbstractWireSerializingTestCase { public void testToXContent() throws IOException { - final Map stats = new HashMap<>(); - final String queryGroupId = "afakjklaj304041-afaka"; + final Map stats = new HashMap<>(); + final String workloadGroupId = "afakjklaj304041-afaka"; stats.put( - queryGroupId, - new QueryGroupStats.QueryGroupStatsHolder( + workloadGroupId, + new WorkloadGroupStats.WorkloadGroupStatsHolder( 123456789, 13, 2, 0, - Map.of(ResourceType.CPU, new QueryGroupStats.ResourceStats(0.3, 13, 2)) + Map.of(ResourceType.CPU, new WorkloadGroupStats.ResourceStats(0.3, 13, 2)) ) ); XContentBuilder builder = JsonXContent.contentBuilder(); - QueryGroupStats queryGroupStats = new QueryGroupStats(stats); + WorkloadGroupStats workloadGroupStats = new WorkloadGroupStats(stats); builder.startObject(); - queryGroupStats.toXContent(builder, ToXContent.EMPTY_PARAMS); + workloadGroupStats.toXContent(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); assertEquals( - "{\"query_groups\":{\"afakjklaj304041-afaka\":{\"total_completions\":123456789,\"total_rejections\":13,\"total_cancellations\":0,\"cpu\":{\"current_usage\":0.3,\"cancellations\":13,\"rejections\":2}}}}", + "{\"workload_groups\":{\"afakjklaj304041-afaka\":{\"total_completions\":123456789,\"total_rejections\":13,\"total_cancellations\":0,\"cpu\":{\"current_usage\":0.3,\"cancellations\":13,\"rejections\":2}}}}", builder.toString() ); } @Override - protected Writeable.Reader instanceReader() { - return QueryGroupStats::new; + protected Writeable.Reader instanceReader() { + return WorkloadGroupStats::new; } @Override - protected QueryGroupStats createTestInstance() { - Map stats = new HashMap<>(); + protected WorkloadGroupStats createTestInstance() { + Map stats = new HashMap<>(); stats.put( randomAlphaOfLength(10), - new QueryGroupStats.QueryGroupStatsHolder( + new WorkloadGroupStats.WorkloadGroupStatsHolder( randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), Map.of( ResourceType.CPU, - new QueryGroupStats.ResourceStats( + new WorkloadGroupStats.ResourceStats( randomDoubleBetween(0.0, 0.90, false), randomNonNegativeLong(), randomNonNegativeLong() @@ -84,6 +84,6 @@ protected QueryGroupStats createTestInstance() { DiscoveryNodeRole.BUILT_IN_ROLES, VersionUtils.randomCompatibleVersion(random(), Version.CURRENT) ); - return new QueryGroupStats(stats); + return new WorkloadGroupStats(stats); } } diff --git a/server/src/test/java/org/opensearch/wlm/tracker/ResourceUsageCalculatorTests.java b/server/src/test/java/org/opensearch/wlm/tracker/ResourceUsageCalculatorTests.java index 21d9717a1aaca..ffaeca795f9f2 100644 --- a/server/src/test/java/org/opensearch/wlm/tracker/ResourceUsageCalculatorTests.java +++ b/server/src/test/java/org/opensearch/wlm/tracker/ResourceUsageCalculatorTests.java @@ -10,13 +10,13 @@ import org.opensearch.core.tasks.resourcetracker.ResourceStats; import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.wlm.QueryGroupTask; import org.opensearch.wlm.ResourceType; +import org.opensearch.wlm.WorkloadGroupTask; import org.opensearch.wlm.tracker.ResourceUsageCalculatorTrackerServiceTests.TestClock; import java.util.List; -import static org.opensearch.wlm.cancellation.QueryGroupTaskCancellationService.MIN_VALUE; +import static org.opensearch.wlm.cancellation.WorkloadGroupTaskCancellationService.MIN_VALUE; import static org.opensearch.wlm.tracker.CpuUsageCalculator.PROCESSOR_COUNT; import static org.opensearch.wlm.tracker.MemoryUsageCalculator.HEAP_SIZE_BYTES; import static org.mockito.Mockito.mock; @@ -24,24 +24,24 @@ public class ResourceUsageCalculatorTests extends OpenSearchTestCase { - public void testQueryGroupCpuUsage() { + public void testWorkloadGroupCpuUsage() { TestClock clock = new TestClock(); long fastForwardTime = PROCESSOR_COUNT * 200L; clock.fastForwardBy(fastForwardTime); - double expectedQueryGroupCpuUsage = 1.0 / PROCESSOR_COUNT; + double expectedWorkloadGroupCpuUsage = 1.0 / PROCESSOR_COUNT; - QueryGroupTask mockTask = createMockTaskWithResourceStats(QueryGroupTask.class, fastForwardTime, 200, 0, 123); + WorkloadGroupTask mockTask = createMockTaskWithResourceStats(WorkloadGroupTask.class, fastForwardTime, 200, 0, 123); when(mockTask.getElapsedTime()).thenReturn(fastForwardTime); double actualUsage = ResourceType.CPU.getResourceUsageCalculator().calculateResourceUsage(List.of(mockTask)); - assertEquals(expectedQueryGroupCpuUsage, actualUsage, MIN_VALUE); + assertEquals(expectedWorkloadGroupCpuUsage, actualUsage, MIN_VALUE); double taskResourceUsage = ResourceType.CPU.getResourceUsageCalculator().calculateTaskResourceUsage(mockTask); assertEquals(1.0, taskResourceUsage, MIN_VALUE); } - public void testQueryGroupMemoryUsage() { - QueryGroupTask mockTask = createMockTaskWithResourceStats(QueryGroupTask.class, 100, 200, 0, 123); + public void testWorkloadGroupMemoryUsage() { + WorkloadGroupTask mockTask = createMockTaskWithResourceStats(WorkloadGroupTask.class, 100, 200, 0, 123); double actualMemoryUsage = ResourceType.MEMORY.getResourceUsageCalculator().calculateResourceUsage(List.of(mockTask)); double expectedMemoryUsage = 200.0 / HEAP_SIZE_BYTES; @@ -53,7 +53,7 @@ public void testQueryGroupMemoryUsage() { ); } - public static T createMockTaskWithResourceStats( + public static T createMockTaskWithResourceStats( Class type, long cpuUsage, long heapUsage, diff --git a/server/src/test/java/org/opensearch/wlm/tracker/ResourceUsageCalculatorTrackerServiceTests.java b/server/src/test/java/org/opensearch/wlm/tracker/ResourceUsageCalculatorTrackerServiceTests.java index c14ac6a143c95..1eed6028392e2 100644 --- a/server/src/test/java/org/opensearch/wlm/tracker/ResourceUsageCalculatorTrackerServiceTests.java +++ b/server/src/test/java/org/opensearch/wlm/tracker/ResourceUsageCalculatorTrackerServiceTests.java @@ -16,9 +16,9 @@ import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; -import org.opensearch.wlm.QueryGroupLevelResourceUsageView; -import org.opensearch.wlm.QueryGroupTask; import org.opensearch.wlm.ResourceType; +import org.opensearch.wlm.WorkloadGroupLevelResourceUsageView; +import org.opensearch.wlm.WorkloadGroupTask; import org.opensearch.wlm.WorkloadManagementSettings; import org.junit.After; import org.junit.Before; @@ -29,8 +29,8 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import static org.opensearch.wlm.QueryGroupTask.QUERY_GROUP_ID_HEADER; -import static org.opensearch.wlm.cancellation.QueryGroupTaskCancellationService.MIN_VALUE; +import static org.opensearch.wlm.WorkloadGroupTask.WORKLOAD_GROUP_ID_HEADER; +import static org.opensearch.wlm.cancellation.WorkloadGroupTaskCancellationService.MIN_VALUE; import static org.opensearch.wlm.tracker.CpuUsageCalculator.PROCESSOR_COUNT; import static org.opensearch.wlm.tracker.MemoryUsageCalculator.HEAP_SIZE_BYTES; import static org.mockito.ArgumentMatchers.anyString; @@ -41,7 +41,7 @@ public class ResourceUsageCalculatorTrackerServiceTests extends OpenSearchTestCase { TestThreadPool threadPool; TaskResourceTrackingService mockTaskResourceTrackingService; - QueryGroupResourceUsageTrackerService queryGroupResourceUsageTrackerService; + WorkloadGroupResourceUsageTrackerService workloadGroupResourceUsageTrackerService; WorkloadManagementSettings settings; public static class TestClock { @@ -64,7 +64,7 @@ public void setup() { settings = mock(WorkloadManagementSettings.class); threadPool = new TestThreadPool(getTestName()); mockTaskResourceTrackingService = mock(TaskResourceTrackingService.class); - queryGroupResourceUsageTrackerService = new QueryGroupResourceUsageTrackerService(mockTaskResourceTrackingService); + workloadGroupResourceUsageTrackerService = new WorkloadGroupResourceUsageTrackerService(mockTaskResourceTrackingService); } @After @@ -72,81 +72,81 @@ public void cleanup() { ThreadPool.terminate(threadPool, 5, TimeUnit.SECONDS); } - public void testConstructQueryGroupLevelViews_CreatesQueryGroupLevelUsageView_WhenTasksArePresent() { - List queryGroupIds = List.of("queryGroup1", "queryGroup2", "queryGroup3"); + public void testConstructWorkloadGroupLevelViews_CreatesWorkloadGroupLevelUsageView_WhenTasksArePresent() { + List workloadGroupIds = List.of("workloadGroup1", "workloadGroup2", "workloadGroup3"); clock.fastForwardBy(2000); - Map activeSearchShardTasks = createActiveSearchShardTasks(queryGroupIds); + Map activeSearchShardTasks = createActiveSearchShardTasks(workloadGroupIds); when(mockTaskResourceTrackingService.getResourceAwareTasks()).thenReturn(activeSearchShardTasks); - Map stringQueryGroupLevelResourceUsageViewMap = queryGroupResourceUsageTrackerService - .constructQueryGroupLevelUsageViews(); + Map stringWorkloadGroupLevelResourceUsageViewMap = + workloadGroupResourceUsageTrackerService.constructWorkloadGroupLevelUsageViews(); - for (String queryGroupId : queryGroupIds) { + for (String workloadGroupId : workloadGroupIds) { assertEquals( (400 * 1.0f) / HEAP_SIZE_BYTES, - stringQueryGroupLevelResourceUsageViewMap.get(queryGroupId).getResourceUsageData().get(ResourceType.MEMORY), + stringWorkloadGroupLevelResourceUsageViewMap.get(workloadGroupId).getResourceUsageData().get(ResourceType.MEMORY), MIN_VALUE ); assertEquals( (200 * 1.0f) / (PROCESSOR_COUNT * 2000), - stringQueryGroupLevelResourceUsageViewMap.get(queryGroupId).getResourceUsageData().get(ResourceType.CPU), + stringWorkloadGroupLevelResourceUsageViewMap.get(workloadGroupId).getResourceUsageData().get(ResourceType.CPU), MIN_VALUE ); - assertEquals(2, stringQueryGroupLevelResourceUsageViewMap.get(queryGroupId).getActiveTasks().size()); + assertEquals(2, stringWorkloadGroupLevelResourceUsageViewMap.get(workloadGroupId).getActiveTasks().size()); } } - public void testConstructQueryGroupLevelViews_CreatesQueryGroupLevelUsageView_WhenTasksAreNotPresent() { - Map stringQueryGroupLevelResourceUsageViewMap = queryGroupResourceUsageTrackerService - .constructQueryGroupLevelUsageViews(); - assertTrue(stringQueryGroupLevelResourceUsageViewMap.isEmpty()); + public void testConstructWorkloadGroupLevelViews_CreatesWorkloadGroupLevelUsageView_WhenTasksAreNotPresent() { + Map stringWorkloadGroupLevelResourceUsageViewMap = + workloadGroupResourceUsageTrackerService.constructWorkloadGroupLevelUsageViews(); + assertTrue(stringWorkloadGroupLevelResourceUsageViewMap.isEmpty()); } - public void testConstructQueryGroupLevelUsageViews_WithTasksHavingDifferentResourceUsage() { + public void testConstructWorkloadGroupLevelUsageViews_WithTasksHavingDifferentResourceUsage() { Map activeSearchShardTasks = new HashMap<>(); clock.fastForwardBy(2000); - activeSearchShardTasks.put(1L, createMockTask(SearchShardTask.class, 100, 200, "queryGroup1")); - activeSearchShardTasks.put(2L, createMockTask(SearchShardTask.class, 200, 400, "queryGroup1")); + activeSearchShardTasks.put(1L, createMockTask(SearchShardTask.class, 100, 200, "workloadGroup1")); + activeSearchShardTasks.put(2L, createMockTask(SearchShardTask.class, 200, 400, "workloadGroup1")); when(mockTaskResourceTrackingService.getResourceAwareTasks()).thenReturn(activeSearchShardTasks); - Map queryGroupViews = queryGroupResourceUsageTrackerService - .constructQueryGroupLevelUsageViews(); + Map workloadGroupViews = workloadGroupResourceUsageTrackerService + .constructWorkloadGroupLevelUsageViews(); assertEquals( (double) 600 / HEAP_SIZE_BYTES, - queryGroupViews.get("queryGroup1").getResourceUsageData().get(ResourceType.MEMORY), + workloadGroupViews.get("workloadGroup1").getResourceUsageData().get(ResourceType.MEMORY), MIN_VALUE ); assertEquals( ((double) 300) / (PROCESSOR_COUNT * 2000), - queryGroupViews.get("queryGroup1").getResourceUsageData().get(ResourceType.CPU), + workloadGroupViews.get("workloadGroup1").getResourceUsageData().get(ResourceType.CPU), MIN_VALUE ); - assertEquals(2, queryGroupViews.get("queryGroup1").getActiveTasks().size()); + assertEquals(2, workloadGroupViews.get("workloadGroup1").getActiveTasks().size()); } - private Map createActiveSearchShardTasks(List queryGroupIds) { + private Map createActiveSearchShardTasks(List workloadGroupIds) { Map activeSearchShardTasks = new HashMap<>(); long task_id = 0; - for (String queryGroupId : queryGroupIds) { + for (String workloadGroupId : workloadGroupIds) { for (int i = 0; i < 2; i++) { - activeSearchShardTasks.put(++task_id, createMockTask(SearchShardTask.class, 100, 200, queryGroupId)); + activeSearchShardTasks.put(++task_id, createMockTask(SearchShardTask.class, 100, 200, workloadGroupId)); } } return activeSearchShardTasks; } - private T createMockTask(Class type, long cpuUsage, long heapUsage, String queryGroupId) { + private T createMockTask(Class type, long cpuUsage, long heapUsage, String workloadGroupId) { T task = mock(type); try (ThreadContext.StoredContext ignore = threadPool.getThreadContext().stashContext()) { - threadPool.getThreadContext().putHeader(QUERY_GROUP_ID_HEADER, queryGroupId); - task.setQueryGroupId(threadPool.getThreadContext()); + threadPool.getThreadContext().putHeader(WORKLOAD_GROUP_ID_HEADER, workloadGroupId); + task.setWorkloadGroupId(threadPool.getThreadContext()); } when(task.getTotalResourceUtilization(ResourceStats.CPU)).thenReturn(cpuUsage); when(task.getTotalResourceUtilization(ResourceStats.MEMORY)).thenReturn(heapUsage); when(task.getStartTimeNanos()).thenReturn((long) 0); when(task.getElapsedTime()).thenReturn(clock.getTime()); - when(task.isQueryGroupSet()).thenReturn(true); + when(task.isWorkloadGroupSet()).thenReturn(true); AtomicBoolean isCancelled = new AtomicBoolean(false); doAnswer(invocation -> { diff --git a/server/src/test/java/org/opensearch/wlm/tracker/QueryGroupTaskResourceTrackingTests.java b/server/src/test/java/org/opensearch/wlm/tracker/WorkloadGroupTaskResourceTrackingTests.java similarity index 61% rename from server/src/test/java/org/opensearch/wlm/tracker/QueryGroupTaskResourceTrackingTests.java rename to server/src/test/java/org/opensearch/wlm/tracker/WorkloadGroupTaskResourceTrackingTests.java index 5d54de3536596..123352c6f67a5 100644 --- a/server/src/test/java/org/opensearch/wlm/tracker/QueryGroupTaskResourceTrackingTests.java +++ b/server/src/test/java/org/opensearch/wlm/tracker/WorkloadGroupTaskResourceTrackingTests.java @@ -17,15 +17,15 @@ import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; -import org.opensearch.wlm.QueryGroupLevelResourceUsageView; -import org.opensearch.wlm.QueryGroupTask; +import org.opensearch.wlm.WorkloadGroupLevelResourceUsageView; +import org.opensearch.wlm.WorkloadGroupTask; import java.util.HashMap; import java.util.Map; -public class QueryGroupTaskResourceTrackingTests extends OpenSearchTestCase { +public class WorkloadGroupTaskResourceTrackingTests extends OpenSearchTestCase { ThreadPool threadPool; - QueryGroupResourceUsageTrackerService queryGroupResourceUsageTrackerService; + WorkloadGroupResourceUsageTrackerService workloadGroupResourceUsageTrackerService; TaskResourceTrackingService taskResourceTrackingService; @Override @@ -37,7 +37,7 @@ public void setUp() throws Exception { new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool ); - queryGroupResourceUsageTrackerService = new QueryGroupResourceUsageTrackerService(taskResourceTrackingService); + workloadGroupResourceUsageTrackerService = new WorkloadGroupResourceUsageTrackerService(taskResourceTrackingService); } public void tearDown() throws Exception { @@ -45,21 +45,21 @@ public void tearDown() throws Exception { threadPool.shutdownNow(); } - public void testValidQueryGroupTasksCase() { + public void testValidWorkloadGroupTasksCase() { taskResourceTrackingService.setTaskResourceTrackingEnabled(true); - QueryGroupTask task = new SearchTask(1, "test", "test", () -> "Test", TaskId.EMPTY_TASK_ID, new HashMap<>()); + WorkloadGroupTask task = new SearchTask(1, "test", "test", () -> "Test", TaskId.EMPTY_TASK_ID, new HashMap<>()); taskResourceTrackingService.startTracking(task); // since the query group id is not set we should not track this task - Map resourceUsageViewMap = queryGroupResourceUsageTrackerService - .constructQueryGroupLevelUsageViews(); + Map resourceUsageViewMap = workloadGroupResourceUsageTrackerService + .constructWorkloadGroupLevelUsageViews(); assertTrue(resourceUsageViewMap.isEmpty()); - // Now since this task has a valid queryGroupId header it should be tracked + // Now since this task has a valid workloadGroupId header it should be tracked try (ThreadContext.StoredContext context = threadPool.getThreadContext().stashContext()) { - threadPool.getThreadContext().putHeader(QueryGroupTask.QUERY_GROUP_ID_HEADER, "testHeader"); - task.setQueryGroupId(threadPool.getThreadContext()); - resourceUsageViewMap = queryGroupResourceUsageTrackerService.constructQueryGroupLevelUsageViews(); + threadPool.getThreadContext().putHeader(WorkloadGroupTask.WORKLOAD_GROUP_ID_HEADER, "testHeader"); + task.setWorkloadGroupId(threadPool.getThreadContext()); + resourceUsageViewMap = workloadGroupResourceUsageTrackerService.constructWorkloadGroupLevelUsageViews(); assertFalse(resourceUsageViewMap.isEmpty()); } } From 07cb4c9f17cfa86924e0457968cdd69d9ad84572 Mon Sep 17 00:00:00 2001 From: Rishabh Maurya Date: Fri, 11 Apr 2025 12:12:20 -0700 Subject: [PATCH 202/550] Add the base FlightProducer for getStream API (#17446) * make use of extendedPlugins provide runtime dependencies for arrow-memrory-core Signed-off-by: Rishabh Maurya * merged opensearch-arrow-core into arrow-flight-rpc plugin Signed-off-by: Rishabh Maurya * moved libs:arrow-spi to server module Signed-off-by: Rishabh Maurya * Flight producer changes Signed-off-by: Rishabh Maurya * add hook in StreamManager plugin to get its instance on initialization Signed-off-by: Rishabh Maurya * Minor refactor Signed-off-by: Rishabh Maurya * Add more unit tests and address PR comment Signed-off-by: Rishabh Maurya * Address PR comments Signed-off-by: Rishabh Maurya * defer setting BufferAllocator in FlightStreamManager Signed-off-by: Rishabh Maurya * Address PR comments Signed-off-by: Rishabh Maurya * Refactor BaseFlightProducer and improved error handling Signed-off-by: Rishabh Maurya * Address PR comments Signed-off-by: Rishabh Maurya * refactor and improve test coverage Signed-off-by: Rishabh Maurya * throw UnsupportedOperationException for ProxyStreamProducer#getAction() Signed-off-by: Rishabh Maurya * refactor error handling Signed-off-by: Rishabh Maurya --------- Signed-off-by: Rishabh Maurya --- distribution/src/config/jvm.options | 1 + gradle/run.gradle | 8 + libs/arrow-spi/build.gradle | 20 - plugins/arrow-flight-rpc/build.gradle | 74 +- .../error_prone_annotations-2.31.0.jar.sha1 | 1 + .../error_prone_annotations-LICENSE.txt | 202 +++++ .../error_prone_annotations-NOTICE.txt | 0 .../licenses/netty-LICENSE.txt | 4 +- .../licenses/netty-NOTICE.txt | 206 +---- .../arrow/flight/ArrowFlightServerIT.java | 280 ++++++- .../FlightServerInfoAction.java | 2 +- .../api/{ => flightinfo}/NodeFlightInfo.java | 2 +- .../NodesFlightInfoAction.java | 2 +- .../NodesFlightInfoRequest.java | 2 +- .../NodesFlightInfoResponse.java | 2 +- .../TransportNodesFlightInfoAction.java | 2 +- .../api/{ => flightinfo}/package-info.java | 2 +- .../flight/bootstrap/FlightClientManager.java | 49 +- .../arrow/flight/bootstrap/FlightService.java | 11 +- .../flight/bootstrap/FlightStreamPlugin.java | 23 +- .../arrow/flight/impl/BaseFlightProducer.java | 259 +++++++ .../CustomCallbackBackpressureStrategy.java | 44 ++ .../flight/impl/FlightStreamManager.java | 212 +++++ .../arrow/flight/impl/FlightStreamReader.java | 61 ++ .../arrow/flight/impl/FlightStreamTicket.java | 111 +++ .../impl/FlightStreamTicketFactory.java | 60 ++ .../flight/impl/ProxyStreamProducer.java | 122 +++ .../arrow/flight/impl/package-info.java | 12 + .../plugin-metadata/plugin-security.policy | 18 +- .../arrow/flight/FlightStreamPluginTests.java | 10 +- .../FlightServerInfoActionTests.java | 2 +- .../{ => flightinfo}/NodeFlightInfoTests.java | 2 +- .../NodesFlightInfoRequestTests.java | 2 +- .../NodesFlightInfoResponseTests.java | 2 +- .../TransportNodesFlightInfoActionTests.java | 2 +- .../bootstrap/FlightClientManagerTests.java | 16 +- .../flight/bootstrap/FlightServiceTests.java | 2 +- .../flight/impl/BaseFlightProducerTests.java | 732 ++++++++++++++++++ .../flight/impl/FlightStreamManagerTests.java | 176 +++++ .../flight/impl/FlightStreamReaderTests.java | 86 ++ .../flight/impl/FlightStreamTicketTests.java | 111 +++ .../flight/impl/ProxyStreamProducerTests.java | 120 +++ server/build.gradle | 1 - .../opensearch/arrow/spi/StreamManager.java | 0 .../opensearch/arrow/spi/StreamProducer.java | 13 +- .../opensearch/arrow/spi/StreamReader.java | 0 .../opensearch/arrow/spi/StreamTicket.java | 0 .../arrow/spi/StreamTicketFactory.java | 0 .../opensearch/arrow/spi/package-info.java | 0 .../org/opensearch/common/cache/Cache.java | 24 + .../main/java/org/opensearch/node/Node.java | 22 + .../plugins/StreamManagerPlugin.java | 16 +- 52 files changed, 2821 insertions(+), 310 deletions(-) delete mode 100644 libs/arrow-spi/build.gradle create mode 100644 plugins/arrow-flight-rpc/licenses/error_prone_annotations-2.31.0.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/error_prone_annotations-LICENSE.txt create mode 100644 plugins/arrow-flight-rpc/licenses/error_prone_annotations-NOTICE.txt rename plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/{ => flightinfo}/FlightServerInfoAction.java (97%) rename plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/{ => flightinfo}/NodeFlightInfo.java (98%) rename plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/{ => flightinfo}/NodesFlightInfoAction.java (93%) rename plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/{ => flightinfo}/NodesFlightInfoRequest.java (97%) rename plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/{ => flightinfo}/NodesFlightInfoResponse.java (98%) rename plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/{ => flightinfo}/TransportNodesFlightInfoAction.java (98%) rename plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/{ => flightinfo}/package-info.java (83%) create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/impl/BaseFlightProducer.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/impl/CustomCallbackBackpressureStrategy.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/impl/FlightStreamManager.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/impl/FlightStreamReader.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/impl/FlightStreamTicket.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/impl/FlightStreamTicketFactory.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/impl/ProxyStreamProducer.java create mode 100644 plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/impl/package-info.java rename plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/{ => flightinfo}/FlightServerInfoActionTests.java (98%) rename plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/{ => flightinfo}/NodeFlightInfoTests.java (99%) rename plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/{ => flightinfo}/NodesFlightInfoRequestTests.java (96%) rename plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/{ => flightinfo}/NodesFlightInfoResponseTests.java (99%) rename plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/{ => flightinfo}/TransportNodesFlightInfoActionTests.java (99%) create mode 100644 plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/impl/BaseFlightProducerTests.java create mode 100644 plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/impl/FlightStreamManagerTests.java create mode 100644 plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/impl/FlightStreamReaderTests.java create mode 100644 plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/impl/FlightStreamTicketTests.java create mode 100644 plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/impl/ProxyStreamProducerTests.java rename {libs/arrow-spi => server}/src/main/java/org/opensearch/arrow/spi/StreamManager.java (100%) rename {libs/arrow-spi => server}/src/main/java/org/opensearch/arrow/spi/StreamProducer.java (94%) rename {libs/arrow-spi => server}/src/main/java/org/opensearch/arrow/spi/StreamReader.java (100%) rename {libs/arrow-spi => server}/src/main/java/org/opensearch/arrow/spi/StreamTicket.java (100%) rename {libs/arrow-spi => server}/src/main/java/org/opensearch/arrow/spi/StreamTicketFactory.java (100%) rename {libs/arrow-spi => server}/src/main/java/org/opensearch/arrow/spi/package-info.java (100%) diff --git a/distribution/src/config/jvm.options b/distribution/src/config/jvm.options index 54222d07634fc..e083f07edabc8 100644 --- a/distribution/src/config/jvm.options +++ b/distribution/src/config/jvm.options @@ -85,3 +85,4 @@ ${error.file} 23:-XX:CompileCommand=dontinline,java/lang/invoke/MethodHandle.asTypeUncached 21-:-javaagent:agent/opensearch-agent.jar +21-:--add-opens=java.base/java.nio=org.apache.arrow.memory.core,ALL-UNNAMED diff --git a/gradle/run.gradle b/gradle/run.gradle index 34651f1d94964..ac58d74acd6b0 100644 --- a/gradle/run.gradle +++ b/gradle/run.gradle @@ -43,9 +43,17 @@ testClusters { installedPlugins = Eval.me(installedPlugins) for (String p : installedPlugins) { plugin('plugins:'.concat(p)) + if (p.equals("arrow-flight-rpc")) { + // Add system properties for Netty configuration + systemProperty 'io.netty.allocator.numDirectArenas', '1' + systemProperty 'io.netty.noUnsafe', 'false' + systemProperty 'io.netty.tryUnsafe', 'true' + systemProperty 'io.netty.tryReflectionSetAccessible', 'true' + } } } } + } tasks.register("run", RunTask) { diff --git a/libs/arrow-spi/build.gradle b/libs/arrow-spi/build.gradle deleted file mode 100644 index 90a4c162e428b..0000000000000 --- a/libs/arrow-spi/build.gradle +++ /dev/null @@ -1,20 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - * - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -testingConventions.enabled = false - -dependencies { - api project(':libs:opensearch-core') -} - -tasks.named('forbiddenApisMain').configure { - replaceSignatureFiles 'jdk-signatures' -} diff --git a/plugins/arrow-flight-rpc/build.gradle b/plugins/arrow-flight-rpc/build.gradle index f3a166bc39ae7..1d05464d0ee87 100644 --- a/plugins/arrow-flight-rpc/build.gradle +++ b/plugins/arrow-flight-rpc/build.gradle @@ -12,25 +12,34 @@ apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description = 'Arrow flight based Stream implementation' + description = 'Arrow flight based transport and stream implementation. It also provides Arrow vector and memory dependencies as' + + 'an extended-plugin at runtime; consumers should take a compile time dependency and not runtime on this project.\'\n' classname = 'org.opensearch.arrow.flight.bootstrap.FlightStreamPlugin' } dependencies { - implementation project(':libs:opensearch-arrow-spi') - compileOnly 'org.checkerframework:checker-qual:3.44.0' + // all transitive dependencies exported to use arrow-vector and arrow-memory-core + api "org.apache.arrow:arrow-memory-netty:${versions.arrow}" + api "org.apache.arrow:arrow-memory-core:${versions.arrow}" + api "org.apache.arrow:arrow-memory-netty-buffer-patch:${versions.arrow}" + api "io.netty:netty-buffer:${versions.netty}" + api "io.netty:netty-common:${versions.netty}" + api "org.apache.arrow:arrow-vector:${versions.arrow}" + api "org.apache.arrow:arrow-format:${versions.arrow}" - implementation "org.apache.arrow:arrow-vector:${versions.arrow}" - implementation "org.apache.arrow:arrow-format:${versions.arrow}" + compileOnly 'org.checkerframework:checker-qual:3.44.0' + api "com.google.flatbuffers:flatbuffers-java:${versions.flatbuffers}" + api "org.slf4j:slf4j-api:${versions.slf4j}" + api "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" + api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" + api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" + api "commons-codec:commons-codec:${versions.commonscodec}" + + // arrow flight dependencies. implementation "org.apache.arrow:flight-core:${versions.arrow}" - implementation "org.apache.arrow:arrow-memory-core:${versions.arrow}" - - runtimeOnly "org.apache.arrow:arrow-memory-netty:${versions.arrow}" - runtimeOnly "org.apache.arrow:arrow-memory-netty-buffer-patch:${versions.arrow}" - - implementation "io.netty:netty-buffer:${versions.netty}" - implementation "io.netty:netty-common:${versions.netty}" + // since netty-common will be added by opensearch-arrow-core at runtime, so declaring them as compileOnly + // compileOnly "io.netty:netty-common:${versions.netty}" implementation "io.netty:netty-codec:${versions.netty}" implementation "io.netty:netty-codec-http:${versions.netty}" implementation "io.netty:netty-codec-http2:${versions.netty}" @@ -41,28 +50,21 @@ dependencies { implementation "io.netty:netty-transport-classes-epoll:${versions.netty}" implementation "io.netty:netty-tcnative-classes:2.0.66.Final" - implementation "org.slf4j:slf4j-api:${versions.slf4j}" - runtimeOnly "com.google.flatbuffers:flatbuffers-java:${versions.flatbuffers}" - runtimeOnly "commons-codec:commons-codec:${versions.commonscodec}" - implementation "io.grpc:grpc-api:${versions.grpc}" runtimeOnly "io.grpc:grpc-core:${versions.grpc}" implementation "io.grpc:grpc-stub:${versions.grpc}" implementation "io.grpc:grpc-netty:${versions.grpc}" + implementation "com.google.errorprone:error_prone_annotations:2.31.0" runtimeOnly group: 'com.google.code.findbugs', name: 'jsr305', version: '3.0.2' - compileOnly 'org.immutables:value:2.10.1' annotationProcessor 'org.immutables:value:2.10.1' runtimeOnly 'io.perfmark:perfmark-api:0.27.0' runtimeOnly 'org.apache.parquet:parquet-arrow:1.13.1' runtimeOnly "io.grpc:grpc-protobuf-lite:${versions.grpc}" runtimeOnly "io.grpc:grpc-protobuf:${versions.grpc}" - implementation "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" - implementation "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" runtimeOnly "com.google.guava:failureaccess:1.0.1" - compileOnly "com.google.errorprone:error_prone_annotations:2.31.0" runtimeOnly('com.google.guava:guava:33.3.1-jre') { attributes { attribute(Attribute.of('org.gradle.jvm.environment', String), 'standard-jvm') @@ -88,6 +90,7 @@ internalClusterTest { systemProperty 'io.netty.noUnsafe', 'false' systemProperty 'io.netty.tryUnsafe', 'true' systemProperty 'io.netty.tryReflectionSetAccessible', 'true' + jvmArgs += ["--add-opens", "java.base/java.nio=org.apache.arrow.memory.core,ALL-UNNAMED"] } spotless { @@ -120,6 +123,19 @@ tasks.named('forbiddenApisMain').configure { tasks.named('thirdPartyAudit').configure { ignoreMissingClasses( + 'org.apache.commons.logging.Log', + 'org.apache.commons.logging.LogFactory', + + 'org.slf4j.impl.StaticLoggerBinder', + 'org.slf4j.impl.StaticMDCBinder', + 'org.slf4j.impl.StaticMarkerBinder', + + // from Log4j (deliberate, Netty will fallback to Log4j 2) + 'org.apache.log4j.Level', + 'org.apache.log4j.Logger', + + 'reactor.blockhound.BlockHound$Builder', + 'reactor.blockhound.integration.BlockHoundIntegration', 'com.google.gson.stream.JsonReader', 'com.google.gson.stream.JsonToken', 'org.apache.parquet.schema.GroupType', @@ -158,18 +174,6 @@ tasks.named('thirdPartyAudit').configure { 'com.aayushatharva.brotli4j.encoder.Encoder$Parameters', // classes are missing - // from io.netty.logging.CommonsLoggerFactory (netty) - 'org.apache.commons.logging.Log', - 'org.apache.commons.logging.LogFactory', - - 'org.slf4j.impl.StaticLoggerBinder', - 'org.slf4j.impl.StaticMDCBinder', - 'org.slf4j.impl.StaticMarkerBinder', - - // from Log4j (deliberate, Netty will fallback to Log4j 2) - 'org.apache.log4j.Level', - 'org.apache.log4j.Logger', - // from io.netty.handler.ssl.util.BouncyCastleSelfSignedCertGenerator (netty) 'org.bouncycastle.cert.X509v3CertificateBuilder', 'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter', @@ -224,9 +228,6 @@ tasks.named('thirdPartyAudit').configure { 'org.conscrypt.Conscrypt', 'org.conscrypt.HandshakeListener', - 'reactor.blockhound.BlockHound$Builder', - 'reactor.blockhound.integration.BlockHoundIntegration', - 'com.google.protobuf.util.Timestamps' ) ignoreViolations( @@ -288,7 +289,7 @@ tasks.named('thirdPartyAudit').configure { 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueConsumerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerLimitField', - 'io.netty.util.internal.shaded.org.jctools.queues.unpadded.MpscUnpaddedArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.unpadded.MpscUnpaddedArrayQueueConsumerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.unpadded.MpscUnpaddedArrayQueueProducerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.unpadded.MpscUnpaddedArrayQueueProducerLimitField', 'io.netty.util.internal.shaded.org.jctools.util.UnsafeAccess', @@ -296,6 +297,5 @@ tasks.named('thirdPartyAudit').configure { 'io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess', 'org.apache.arrow.memory.util.MemoryUtil', 'org.apache.arrow.memory.util.MemoryUtil$1' - ) } diff --git a/plugins/arrow-flight-rpc/licenses/error_prone_annotations-2.31.0.jar.sha1 b/plugins/arrow-flight-rpc/licenses/error_prone_annotations-2.31.0.jar.sha1 new file mode 100644 index 0000000000000..4872d644799f5 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/error_prone_annotations-2.31.0.jar.sha1 @@ -0,0 +1 @@ +c3ba307b915d6d506e98ffbb49e6d2d12edad65b \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/error_prone_annotations-LICENSE.txt b/plugins/arrow-flight-rpc/licenses/error_prone_annotations-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/error_prone_annotations-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/arrow-flight-rpc/licenses/error_prone_annotations-NOTICE.txt b/plugins/arrow-flight-rpc/licenses/error_prone_annotations-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/arrow-flight-rpc/licenses/netty-LICENSE.txt b/plugins/arrow-flight-rpc/licenses/netty-LICENSE.txt index 62589edd12a37..d645695673349 100644 --- a/plugins/arrow-flight-rpc/licenses/netty-LICENSE.txt +++ b/plugins/arrow-flight-rpc/licenses/netty-LICENSE.txt @@ -1,7 +1,7 @@ Apache License Version 2.0, January 2004 - https://www.apache.org/licenses/ + http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION @@ -193,7 +193,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/plugins/arrow-flight-rpc/licenses/netty-NOTICE.txt b/plugins/arrow-flight-rpc/licenses/netty-NOTICE.txt index 971865b7c1c23..5bbf91a14de23 100644 --- a/plugins/arrow-flight-rpc/licenses/netty-NOTICE.txt +++ b/plugins/arrow-flight-rpc/licenses/netty-NOTICE.txt @@ -4,15 +4,15 @@ Please visit the Netty web site for more information: - * https://netty.io/ + * http://netty.io/ -Copyright 2014 The Netty Project +Copyright 2011 The Netty Project The Netty Project licenses this file to you under the Apache License, version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at: - https://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -42,112 +42,29 @@ Base64 Encoder and Decoder, which can be obtained at: * HOMEPAGE: * http://iharder.sourceforge.net/current/java/base64/ -This product contains a modified portion of 'Webbit', an event based -WebSocket and HTTP server, which can be obtained at: +This product contains a modified version of 'JZlib', a re-implementation of +zlib in pure Java, which can be obtained at: * LICENSE: - * license/LICENSE.webbit.txt (BSD License) - * HOMEPAGE: - * https://github.com/joewalnes/webbit - -This product contains a modified portion of 'SLF4J', a simple logging -facade for Java, which can be obtained at: - - * LICENSE: - * license/LICENSE.slf4j.txt (MIT License) - * HOMEPAGE: - * https://www.slf4j.org/ - -This product contains a modified portion of 'Apache Harmony', an open source -Java SE, which can be obtained at: - - * NOTICE: - * license/NOTICE.harmony.txt - * LICENSE: - * license/LICENSE.harmony.txt (Apache License 2.0) - * HOMEPAGE: - * https://archive.apache.org/dist/harmony/ - -This product contains a modified portion of 'jbzip2', a Java bzip2 compression -and decompression library written by Matthew J. Francis. It can be obtained at: - - * LICENSE: - * license/LICENSE.jbzip2.txt (MIT License) - * HOMEPAGE: - * https://code.google.com/p/jbzip2/ - -This product contains a modified portion of 'libdivsufsort', a C API library to construct -the suffix array and the Burrows-Wheeler transformed string for any input string of -a constant-size alphabet written by Yuta Mori. It can be obtained at: - - * LICENSE: - * license/LICENSE.libdivsufsort.txt (MIT License) - * HOMEPAGE: - * https://github.com/y-256/libdivsufsort - -This product contains a modified portion of Nitsan Wakart's 'JCTools', Java Concurrency Tools for the JVM, - which can be obtained at: - - * LICENSE: - * license/LICENSE.jctools.txt (ASL2 License) - * HOMEPAGE: - * https://github.com/JCTools/JCTools - -This product optionally depends on 'JZlib', a re-implementation of zlib in -pure Java, which can be obtained at: - - * LICENSE: - * license/LICENSE.jzlib.txt (BSD style License) + * license/LICENSE.jzlib.txt (BSD Style License) * HOMEPAGE: * http://www.jcraft.com/jzlib/ -This product optionally depends on 'Compress-LZF', a Java library for encoding and -decoding data in LZF format, written by Tatu Saloranta. It can be obtained at: - - * LICENSE: - * license/LICENSE.compress-lzf.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/ning/compress - -This product optionally depends on 'lz4', a LZ4 Java compression -and decompression library written by Adrien Grand. It can be obtained at: - - * LICENSE: - * license/LICENSE.lz4.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/jpountz/lz4-java - -This product optionally depends on 'lzma-java', a LZMA Java compression -and decompression library, which can be obtained at: - - * LICENSE: - * license/LICENSE.lzma-java.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/jponge/lzma-java - -This product optionally depends on 'zstd-jni', a zstd-jni Java compression -and decompression library, which can be obtained at: +This product contains a modified version of 'Webbit', a Java event based +WebSocket and HTTP server: * LICENSE: - * license/LICENSE.zstd-jni.txt (BSD) - * HOMEPAGE: - * https://github.com/luben/zstd-jni - -This product contains a modified portion of 'jfastlz', a Java port of FastLZ compression -and decompression library written by William Kinney. It can be obtained at: - - * LICENSE: - * license/LICENSE.jfastlz.txt (MIT License) + * license/LICENSE.webbit.txt (BSD License) * HOMEPAGE: - * https://code.google.com/p/jfastlz/ + * https://github.com/joewalnes/webbit -This product contains a modified portion of and optionally depends on 'Protocol Buffers', Google's data +This product optionally depends on 'Protocol Buffers', Google's data interchange format, which can be obtained at: * LICENSE: * license/LICENSE.protobuf.txt (New BSD License) * HOMEPAGE: - * https://github.com/google/protobuf + * http://code.google.com/p/protobuf/ This product optionally depends on 'Bouncy Castle Crypto APIs' to generate a temporary self-signed X.509 certificate when the JVM does not provide the @@ -156,31 +73,15 @@ equivalent functionality. It can be obtained at: * LICENSE: * license/LICENSE.bouncycastle.txt (MIT License) * HOMEPAGE: - * https://www.bouncycastle.org/ - -This product optionally depends on 'Snappy', a compression library produced -by Google Inc, which can be obtained at: - - * LICENSE: - * license/LICENSE.snappy.txt (New BSD License) - * HOMEPAGE: - * https://github.com/google/snappy + * http://www.bouncycastle.org/ -This product optionally depends on 'JBoss Marshalling', an alternative Java -serialization API, which can be obtained at: +This product optionally depends on 'SLF4J', a simple logging facade for Java, +which can be obtained at: * LICENSE: - * license/LICENSE.jboss-marshalling.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/jboss-remoting/jboss-marshalling - -This product optionally depends on 'Caliper', Google's micro- -benchmarking framework, which can be obtained at: - - * LICENSE: - * license/LICENSE.caliper.txt (Apache License 2.0) + * license/LICENSE.slf4j.txt (MIT License) * HOMEPAGE: - * https://github.com/google/caliper + * http://www.slf4j.org/ This product optionally depends on 'Apache Commons Logging', a logging framework, which can be obtained at: @@ -188,77 +89,28 @@ framework, which can be obtained at: * LICENSE: * license/LICENSE.commons-logging.txt (Apache License 2.0) * HOMEPAGE: - * https://commons.apache.org/logging/ + * http://commons.apache.org/logging/ -This product optionally depends on 'Apache Log4J', a logging framework, which -can be obtained at: +This product optionally depends on 'Apache Log4J', a logging framework, +which can be obtained at: * LICENSE: * license/LICENSE.log4j.txt (Apache License 2.0) * HOMEPAGE: - * https://logging.apache.org/log4j/ - -This product optionally depends on 'Aalto XML', an ultra-high performance -non-blocking XML processor, which can be obtained at: - - * LICENSE: - * license/LICENSE.aalto-xml.txt (Apache License 2.0) - * HOMEPAGE: - * https://wiki.fasterxml.com/AaltoHome + * http://logging.apache.org/log4j/ -This product contains a modified version of 'HPACK', a Java implementation of -the HTTP/2 HPACK algorithm written by Twitter. It can be obtained at: +This product optionally depends on 'JBoss Logging', a logging framework, +which can be obtained at: * LICENSE: - * license/LICENSE.hpack.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/twitter/hpack - -This product contains a modified version of 'HPACK', a Java implementation of -the HTTP/2 HPACK algorithm written by Cory Benfield. It can be obtained at: - - * LICENSE: - * license/LICENSE.hyper-hpack.txt (MIT License) - * HOMEPAGE: - * https://github.com/python-hyper/hpack/ - -This product contains a modified version of 'HPACK', a Java implementation of -the HTTP/2 HPACK algorithm written by Tatsuhiro Tsujikawa. It can be obtained at: - - * LICENSE: - * license/LICENSE.nghttp2-hpack.txt (MIT License) - * HOMEPAGE: - * https://github.com/nghttp2/nghttp2/ - -This product contains a modified portion of 'Apache Commons Lang', a Java library -provides utilities for the java.lang API, which can be obtained at: - - * LICENSE: - * license/LICENSE.commons-lang.txt (Apache License 2.0) - * HOMEPAGE: - * https://commons.apache.org/proper/commons-lang/ - - -This product contains the Maven wrapper scripts from 'Maven Wrapper', that provides an easy way to ensure a user has everything necessary to run the Maven build. - - * LICENSE: - * license/LICENSE.mvn-wrapper.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/takari/maven-wrapper - -This product contains the dnsinfo.h header file, that provides a way to retrieve the system DNS configuration on MacOS. -This private header is also used by Apple's open source - mDNSResponder (https://opensource.apple.com/tarballs/mDNSResponder/). - - * LICENSE: - * license/LICENSE.dnsinfo.txt (Apple Public Source License 2.0) + * license/LICENSE.jboss-logging.txt (GNU LGPL 2.1) * HOMEPAGE: - * https://www.opensource.apple.com/source/configd/configd-453.19/dnsinfo/dnsinfo.h + * http://anonsvn.jboss.org/repos/common/common-logging-spi/ -This product optionally depends on 'Brotli4j', Brotli compression and -decompression for Java., which can be obtained at: +This product optionally depends on 'Apache Felix', an open source OSGi +framework implementation, which can be obtained at: * LICENSE: - * license/LICENSE.brotli4j.txt (Apache License 2.0) + * license/LICENSE.felix.txt (Apache License 2.0) * HOMEPAGE: - * https://github.com/hyperxpro/Brotli4j + * http://felix.apache.org/ diff --git a/plugins/arrow-flight-rpc/src/internalClusterTest/java/org/opensearch/arrow/flight/ArrowFlightServerIT.java b/plugins/arrow-flight-rpc/src/internalClusterTest/java/org/opensearch/arrow/flight/ArrowFlightServerIT.java index 54b47329dab7f..46f72bea3e4c7 100644 --- a/plugins/arrow-flight-rpc/src/internalClusterTest/java/org/opensearch/arrow/flight/ArrowFlightServerIT.java +++ b/plugins/arrow-flight-rpc/src/internalClusterTest/java/org/opensearch/arrow/flight/ArrowFlightServerIT.java @@ -10,24 +10,37 @@ import org.apache.arrow.flight.CallOptions; import org.apache.arrow.flight.FlightClient; +import org.apache.arrow.flight.FlightDescriptor; +import org.apache.arrow.flight.FlightInfo; +import org.apache.arrow.flight.FlightRuntimeException; +import org.apache.arrow.memory.BufferAllocator; +import org.apache.arrow.vector.FieldVector; +import org.apache.arrow.vector.IntVector; +import org.apache.arrow.vector.VectorSchemaRoot; import org.opensearch.arrow.flight.bootstrap.FlightClientManager; import org.opensearch.arrow.flight.bootstrap.FlightService; import org.opensearch.arrow.flight.bootstrap.FlightStreamPlugin; +import org.opensearch.arrow.spi.StreamManager; +import org.opensearch.arrow.spi.StreamProducer; +import org.opensearch.arrow.spi.StreamReader; +import org.opensearch.arrow.spi.StreamTicket; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.unit.TimeValue; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import static org.opensearch.common.util.FeatureFlags.ARROW_STREAMS; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, numDataNodes = 5) public class ArrowFlightServerIT extends OpenSearchIntegTestCase { - private FlightClientManager flightClientManager; - @Override protected Collection> nodePlugins() { return Collections.singleton(FlightStreamPlugin.class); @@ -37,18 +50,269 @@ protected Collection> nodePlugins() { public void setUp() throws Exception { super.setUp(); ensureGreen(); - Thread.sleep(1000); - FlightService flightService = internalCluster().getInstance(FlightService.class); - flightClientManager = flightService.getFlightClientManager(); + for (DiscoveryNode node : getClusterState().nodes()) { + FlightService flightService = internalCluster().getInstance(FlightService.class, node.getName()); + FlightClientManager flightClientManager = flightService.getFlightClientManager(); + assertBusy(() -> { + assertTrue( + "Flight client should be created successfully before running tests", + flightClientManager.getFlightClient(node.getId()).isPresent() + ); + }, 3, TimeUnit.SECONDS); + } } @LockFeatureFlag(ARROW_STREAMS) public void testArrowFlightEndpoint() throws Exception { for (DiscoveryNode node : getClusterState().nodes()) { - try (FlightClient flightClient = flightClientManager.getFlightClient(node.getId()).get()) { - assertNotNull(flightClient); - flightClient.handshake(CallOptions.timeout(5000L, TimeUnit.MILLISECONDS)); + FlightService flightService = internalCluster().getInstance(FlightService.class, node.getName()); + FlightClientManager flightClientManager = flightService.getFlightClientManager(); + FlightClient flightClient = flightClientManager.getFlightClient(node.getId()).get(); + assertNotNull(flightClient); + flightClient.handshake(CallOptions.timeout(5000L, TimeUnit.MILLISECONDS)); + flightClient.handshake(CallOptions.timeout(5000L, TimeUnit.MILLISECONDS)); + } + } + + @LockFeatureFlag(ARROW_STREAMS) + public void testFlightStreamReader() throws Exception { + for (DiscoveryNode node : getClusterState().nodes()) { + StreamManager streamManagerRandomNode = getStreamManagerRandomNode(); + StreamTicket ticket = streamManagerRandomNode.registerStream(getStreamProducer(), null); + StreamManager streamManagerCurrentNode = getStreamManager(node.getName()); + // reader should be accessible from any node in the cluster due to the use ProxyStreamProducer + try (StreamReader reader = streamManagerCurrentNode.getStreamReader(ticket)) { + int totalBatches = 0; + assertNotNull(reader.getRoot().getVector("docID")); + while (reader.next()) { + IntVector docIDVector = (IntVector) reader.getRoot().getVector("docID"); + assertEquals(10, docIDVector.getValueCount()); + for (int i = 0; i < 10; i++) { + assertEquals(docIDVector.toString(), i + (totalBatches * 10L), docIDVector.get(i)); + } + totalBatches++; + } + assertEquals(10, totalBatches); + } + } + } + + @LockFeatureFlag(ARROW_STREAMS) + public void testEarlyCancel() throws Exception { + DiscoveryNode previousNode = null; + for (DiscoveryNode node : getClusterState().nodes()) { + if (previousNode == null) { + previousNode = node; + continue; + } + StreamManager streamManagerServer = getStreamManager(node.getName()); + TestStreamProducer streamProducer = getStreamProducer(); + StreamTicket ticket = streamManagerServer.registerStream(streamProducer, null); + StreamManager streamManagerClient = getStreamManager(previousNode.getName()); + + CountDownLatch readerComplete = new CountDownLatch(1); + AtomicReference readerException = new AtomicReference<>(); + AtomicReference> readerRef = new AtomicReference<>(); + + // Start reader thread + Thread readerThread = new Thread(() -> { + try (StreamReader reader = streamManagerClient.getStreamReader(ticket)) { + readerRef.set(reader); + assertNotNull(reader.getRoot()); + IntVector docIDVector = (IntVector) reader.getRoot().getVector("docID"); + assertNotNull(docIDVector); + + // Read first batch + reader.next(); + assertEquals(10, docIDVector.getValueCount()); + for (int i = 0; i < 10; i++) { + assertEquals(docIDVector.toString(), i, docIDVector.get(i)); + } + } catch (Exception e) { + readerException.set(e); + } finally { + readerComplete.countDown(); + } + }, "flight-reader-thread"); + + readerThread.start(); + assertTrue("Reader thread did not complete in time", readerComplete.await(5, TimeUnit.SECONDS)); + + if (readerException.get() != null) { + throw readerException.get(); + } + + StreamReader reader = readerRef.get(); + + try { + reader.next(); + fail("Expected FlightRuntimeException"); + } catch (FlightRuntimeException e) { + assertEquals("CANCELLED", e.status().code().name()); + assertEquals("Stream closed before end", e.getMessage()); + reader.close(); + } + + // Wait for close to complete + // Due to https://github.com/grpc/grpc-java/issues/5882, there is a logic in FlightStream.java + // where it exhausts the stream on the server side before it is actually cancelled. + assertTrue( + "Timeout waiting for stream cancellation on server [" + node.getName() + "]", + streamProducer.waitForClose(2, TimeUnit.SECONDS) + ); + previousNode = node; + } + } + + @LockFeatureFlag(ARROW_STREAMS) + public void testFlightStreamServerError() throws Exception { + DiscoveryNode previousNode = null; + for (DiscoveryNode node : getClusterState().nodes()) { + if (previousNode == null) { + previousNode = node; + continue; + } + StreamManager streamManagerServer = getStreamManager(node.getName()); + TestStreamProducer streamProducer = getStreamProducer(); + streamProducer.setProduceError(true); + StreamTicket ticket = streamManagerServer.registerStream(streamProducer, null); + StreamManager streamManagerClient = getStreamManager(previousNode.getName()); + try (StreamReader reader = streamManagerClient.getStreamReader(ticket)) { + int totalBatches = 0; + assertNotNull(reader.getRoot().getVector("docID")); + try { + while (reader.next()) { + IntVector docIDVector = (IntVector) reader.getRoot().getVector("docID"); + assertEquals(10, docIDVector.getValueCount()); + totalBatches++; + } + fail("Expected FlightRuntimeException"); + } catch (FlightRuntimeException e) { + assertEquals("INTERNAL", e.status().code().name()); + assertEquals("Unexpected server error", e.getMessage()); + } + assertEquals(1, totalBatches); + } + previousNode = node; + } + } + + @LockFeatureFlag(ARROW_STREAMS) + public void testFlightGetInfo() throws Exception { + StreamTicket ticket = null; + for (DiscoveryNode node : getClusterState().nodes()) { + FlightService flightService = internalCluster().getInstance(FlightService.class, node.getName()); + StreamManager streamManager = flightService.getStreamManager(); + if (ticket == null) { + ticket = streamManager.registerStream(getStreamProducer(), null); + } + FlightClientManager flightClientManager = flightService.getFlightClientManager(); + FlightClient flightClient = flightClientManager.getFlightClient(node.getId()).get(); + assertNotNull(flightClient); + FlightDescriptor flightDescriptor = FlightDescriptor.command(ticket.toBytes()); + FlightInfo flightInfo = flightClient.getInfo(flightDescriptor, CallOptions.timeout(5000L, TimeUnit.MILLISECONDS)); + assertNotNull(flightInfo); + assertEquals(100, flightInfo.getRecords()); + } + } + + private StreamManager getStreamManager(String nodeName) { + FlightService flightService = internalCluster().getInstance(FlightService.class, nodeName); + return flightService.getStreamManager(); + } + + private StreamManager getStreamManagerRandomNode() { + FlightService flightService = internalCluster().getInstance(FlightService.class); + return flightService.getStreamManager(); + } + + private TestStreamProducer getStreamProducer() { + return new TestStreamProducer(); + } + + private static class TestStreamProducer implements StreamProducer { + volatile boolean isClosed = false; + private final CountDownLatch closeLatch = new CountDownLatch(1); + TimeValue deadline = TimeValue.timeValueSeconds(5); + private boolean produceError = false; + + public void setProduceError(boolean produceError) { + this.produceError = produceError; + } + + TestStreamProducer() {} + + VectorSchemaRoot root; + + @Override + public VectorSchemaRoot createRoot(BufferAllocator allocator) { + IntVector docIDVector = new IntVector("docID", allocator); + FieldVector[] vectors = new FieldVector[] { docIDVector }; + root = new VectorSchemaRoot(Arrays.asList(vectors)); + return root; + } + + @Override + public BatchedJob createJob(BufferAllocator allocator) { + return new BatchedJob<>() { + @Override + public void run(VectorSchemaRoot root, FlushSignal flushSignal) { + IntVector docIDVector = (IntVector) root.getVector("docID"); + root.setRowCount(10); + for (int i = 0; i < 100; i++) { + docIDVector.setSafe(i % 10, i); + if ((i + 1) % 10 == 0) { + flushSignal.awaitConsumption(TimeValue.timeValueMillis(1000)); + docIDVector.clear(); + root.setRowCount(10); + if (produceError) { + throw new RuntimeException("Server error while producing batch"); + } + } + } + } + + @Override + public void onCancel() { + if (!isClosed && root != null) { + root.close(); + } + isClosed = true; + } + + @Override + public boolean isCancelled() { + return isClosed; + } + }; + } + + @Override + public TimeValue getJobDeadline() { + return deadline; + } + + @Override + public int estimatedRowCount() { + return 100; + } + + @Override + public String getAction() { + return ""; + } + + @Override + public void close() { + if (!isClosed && root != null) { + root.close(); } + closeLatch.countDown(); + isClosed = true; + } + + public boolean waitForClose(long timeout, TimeUnit unit) throws InterruptedException { + return closeLatch.await(timeout, unit); } } } diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/FlightServerInfoAction.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/flightinfo/FlightServerInfoAction.java similarity index 97% rename from plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/FlightServerInfoAction.java rename to plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/flightinfo/FlightServerInfoAction.java index 529bee72c708d..c988090081266 100644 --- a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/FlightServerInfoAction.java +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/flightinfo/FlightServerInfoAction.java @@ -5,7 +5,7 @@ * this file be licensed under the Apache-2.0 license or a * compatible open source license. */ -package org.opensearch.arrow.flight.api; +package org.opensearch.arrow.flight.api.flightinfo; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodeFlightInfo.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/flightinfo/NodeFlightInfo.java similarity index 98% rename from plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodeFlightInfo.java rename to plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/flightinfo/NodeFlightInfo.java index e804b0c518523..23163bfac8c2e 100644 --- a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodeFlightInfo.java +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/flightinfo/NodeFlightInfo.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.arrow.flight.api; +package org.opensearch.arrow.flight.api.flightinfo; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.cluster.node.DiscoveryNode; diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodesFlightInfoAction.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/flightinfo/NodesFlightInfoAction.java similarity index 93% rename from plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodesFlightInfoAction.java rename to plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/flightinfo/NodesFlightInfoAction.java index 3148c58a1509d..3c3a9965459cb 100644 --- a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodesFlightInfoAction.java +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/flightinfo/NodesFlightInfoAction.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.arrow.flight.api; +package org.opensearch.arrow.flight.api.flightinfo; import org.opensearch.action.ActionType; diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodesFlightInfoRequest.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/flightinfo/NodesFlightInfoRequest.java similarity index 97% rename from plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodesFlightInfoRequest.java rename to plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/flightinfo/NodesFlightInfoRequest.java index 1b707f461819c..43bf38a096b57 100644 --- a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodesFlightInfoRequest.java +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/flightinfo/NodesFlightInfoRequest.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.arrow.flight.api; +package org.opensearch.arrow.flight.api.flightinfo; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.core.common.io.stream.StreamInput; diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodesFlightInfoResponse.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/flightinfo/NodesFlightInfoResponse.java similarity index 98% rename from plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodesFlightInfoResponse.java rename to plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/flightinfo/NodesFlightInfoResponse.java index 721cd631924bd..805aa188ce37a 100644 --- a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/NodesFlightInfoResponse.java +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/flightinfo/NodesFlightInfoResponse.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.arrow.flight.api; +package org.opensearch.arrow.flight.api.flightinfo; import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.nodes.BaseNodesResponse; diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/TransportNodesFlightInfoAction.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/flightinfo/TransportNodesFlightInfoAction.java similarity index 98% rename from plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/TransportNodesFlightInfoAction.java rename to plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/flightinfo/TransportNodesFlightInfoAction.java index d4722e20d1f84..51f4cc05b8001 100644 --- a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/TransportNodesFlightInfoAction.java +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/flightinfo/TransportNodesFlightInfoAction.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.arrow.flight.api; +package org.opensearch.arrow.flight.api.flightinfo; import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/package-info.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/flightinfo/package-info.java similarity index 83% rename from plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/package-info.java rename to plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/flightinfo/package-info.java index d89ec87f9a51e..19dde32f32e8f 100644 --- a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/package-info.java +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/api/flightinfo/package-info.java @@ -9,4 +9,4 @@ /** * Action to retrieve flight info from nodes */ -package org.opensearch.arrow.flight.api; +package org.opensearch.arrow.flight.api.flightinfo; diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/FlightClientManager.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/FlightClientManager.java index a81033f580a03..c81f4d3c270e7 100644 --- a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/FlightClientManager.java +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/FlightClientManager.java @@ -15,10 +15,10 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.Version; -import org.opensearch.arrow.flight.api.NodeFlightInfo; -import org.opensearch.arrow.flight.api.NodesFlightInfoAction; -import org.opensearch.arrow.flight.api.NodesFlightInfoRequest; -import org.opensearch.arrow.flight.api.NodesFlightInfoResponse; +import org.opensearch.arrow.flight.api.flightinfo.NodeFlightInfo; +import org.opensearch.arrow.flight.api.flightinfo.NodesFlightInfoAction; +import org.opensearch.arrow.flight.api.flightinfo.NodesFlightInfoRequest; +import org.opensearch.arrow.flight.api.flightinfo.NodesFlightInfoResponse; import org.opensearch.arrow.flight.bootstrap.tls.SslContextProvider; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterStateListener; @@ -59,8 +59,9 @@ public class FlightClientManager implements ClusterStateListener, AutoCloseable static final int LOCATION_TIMEOUT_MS = 1000; private final ExecutorService grpcExecutor; private final ClientConfiguration clientConfig; - private final Map flightClients = new ConcurrentHashMap<>(); + private final Map flightClients = new ConcurrentHashMap<>(); private final Client client; + private volatile boolean closed = false; /** * Creates a new FlightClientManager instance. @@ -99,7 +100,19 @@ public FlightClientManager( * @return An OpenSearchFlightClient instance for the specified node */ public Optional getFlightClient(String nodeId) { - return Optional.ofNullable(flightClients.get(nodeId)); + ClientHolder clientHolder = flightClients.get(nodeId); + return clientHolder == null ? Optional.empty() : Optional.of(clientHolder.flightClient); + } + + /** + * Returns the location of a Flight client for a given node ID. + * + * @param nodeId The ID of the node for which to retrieve the location + * @return The Location of the Flight client for the specified node + */ + public Optional getFlightClientLocation(String nodeId) { + ClientHolder clientHolder = flightClients.get(nodeId); + return clientHolder == null ? Optional.empty() : Optional.of(clientHolder.location); } /** @@ -128,13 +141,15 @@ private void buildClientAndAddToPool(Location location, DiscoveryNode node) { ); return; } - flightClients.computeIfAbsent(node.getId(), key -> buildClient(location)); + if (closed) { + return; + } + flightClients.computeIfAbsent(node.getId(), nodeId -> new ClientHolder(location, buildClient(location))); } private void requestNodeLocation(String nodeId, CompletableFuture future) { NodesFlightInfoRequest request = new NodesFlightInfoRequest(nodeId); try { - client.execute(NodesFlightInfoAction.INSTANCE, request, new ActionListener<>() { @Override public void onResponse(NodesFlightInfoResponse response) { @@ -184,13 +199,21 @@ private DiscoveryNode getNodeFromClusterState(String nodeId) { */ @Override public void close() throws Exception { - for (FlightClient flightClient : flightClients.values()) { - flightClient.close(); + if (closed) { + return; + } + closed = true; + for (ClientHolder clientHolder : flightClients.values()) { + clientHolder.flightClient.close(); } flightClients.clear(); grpcExecutor.shutdown(); - grpcExecutor.awaitTermination(5, TimeUnit.SECONDS); - clientConfig.clusterService.removeListener(this); + if (grpcExecutor.awaitTermination(5, TimeUnit.SECONDS) == false) { + grpcExecutor.shutdownNow(); + } + } + + private record ClientHolder(Location location, FlightClient flightClient) { } /** @@ -229,7 +252,7 @@ private Set getCurrentClusterNodes() { } @VisibleForTesting - Map getFlightClients() { + Map getFlightClients() { return flightClients; } diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/FlightService.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/FlightService.java index fdcbbf43d75bf..19671955f385f 100644 --- a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/FlightService.java +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/FlightService.java @@ -8,7 +8,6 @@ package org.opensearch.arrow.flight.bootstrap; -import org.apache.arrow.flight.NoOpFlightProducer; import org.apache.arrow.memory.BufferAllocator; import org.apache.arrow.memory.RootAllocator; import org.apache.arrow.util.AutoCloseables; @@ -17,6 +16,8 @@ import org.apache.logging.log4j.Logger; import org.opensearch.arrow.flight.bootstrap.tls.DefaultSslContextProvider; import org.opensearch.arrow.flight.bootstrap.tls.SslContextProvider; +import org.opensearch.arrow.flight.impl.BaseFlightProducer; +import org.opensearch.arrow.flight.impl.FlightStreamManager; import org.opensearch.arrow.spi.StreamManager; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.network.NetworkService; @@ -39,7 +40,7 @@ public class FlightService extends NetworkPlugin.AuxTransport { private static final Logger logger = LogManager.getLogger(FlightService.class); private final ServerComponents serverComponents; - private StreamManager streamManager; + private FlightStreamManager streamManager; private Client client; private FlightClientManager clientManager; private SecureTransportSettingsProvider secureTransportSettingsProvider; @@ -58,6 +59,7 @@ public FlightService(Settings settings) { throw new RuntimeException("Failed to initialize Arrow Flight server", e); } this.serverComponents = new ServerComponents(settings); + this.streamManager = new FlightStreamManager(); } void setClusterService(ClusterService clusterService) { @@ -104,7 +106,7 @@ protected void doStart() { client ); initializeStreamManager(clientManager); - serverComponents.setFlightProducer(new NoOpFlightProducer()); + serverComponents.setFlightProducer(new BaseFlightProducer(clientManager, streamManager, allocator)); serverComponents.start(); } catch (Exception e) { @@ -166,6 +168,7 @@ protected void doClose() { } private void initializeStreamManager(FlightClientManager clientManager) { - streamManager = null; + streamManager.setAllocatorSupplier(() -> allocator); + streamManager.setClientManager(clientManager); } } diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/FlightStreamPlugin.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/FlightStreamPlugin.java index bb7edf491cf02..e2e7ef289eaf6 100644 --- a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/FlightStreamPlugin.java +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/bootstrap/FlightStreamPlugin.java @@ -8,9 +8,9 @@ package org.opensearch.arrow.flight.bootstrap; -import org.opensearch.arrow.flight.api.FlightServerInfoAction; -import org.opensearch.arrow.flight.api.NodesFlightInfoAction; -import org.opensearch.arrow.flight.api.TransportNodesFlightInfoAction; +import org.opensearch.arrow.flight.api.flightinfo.FlightServerInfoAction; +import org.opensearch.arrow.flight.api.flightinfo.NodesFlightInfoAction; +import org.opensearch.arrow.flight.api.flightinfo.TransportNodesFlightInfoAction; import org.opensearch.arrow.spi.StreamManager; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.node.DiscoveryNode; @@ -31,6 +31,7 @@ import org.opensearch.env.NodeEnvironment; import org.opensearch.plugins.ActionPlugin; import org.opensearch.plugins.ClusterPlugin; +import org.opensearch.plugins.ExtensiblePlugin; import org.opensearch.plugins.NetworkPlugin; import org.opensearch.plugins.Plugin; import org.opensearch.plugins.SecureTransportSettingsProvider; @@ -52,12 +53,19 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.function.Supplier; /** * FlightStreamPlugin class extends BaseFlightStreamPlugin and provides implementation for FlightStream plugin. */ -public class FlightStreamPlugin extends Plugin implements StreamManagerPlugin, NetworkPlugin, ActionPlugin, ClusterPlugin { +public class FlightStreamPlugin extends Plugin + implements + StreamManagerPlugin, + NetworkPlugin, + ActionPlugin, + ClusterPlugin, + ExtensiblePlugin { private final FlightService flightService; private final boolean isArrowStreamsEnabled; @@ -221,11 +229,8 @@ public void onNodeStarted(DiscoveryNode localNode) { * Gets the StreamManager instance for managing flight streams. */ @Override - public Supplier getStreamManager() { - if (!isArrowStreamsEnabled) { - return null; - } - return flightService::getStreamManager; + public Optional getStreamManager() { + return isArrowStreamsEnabled ? Optional.ofNullable(flightService.getStreamManager()) : Optional.empty(); } /** diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/impl/BaseFlightProducer.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/impl/BaseFlightProducer.java new file mode 100644 index 0000000000000..08f20d5448511 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/impl/BaseFlightProducer.java @@ -0,0 +1,259 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.impl; + +import org.apache.arrow.flight.BackpressureStrategy; +import org.apache.arrow.flight.CallStatus; +import org.apache.arrow.flight.FlightClient; +import org.apache.arrow.flight.FlightDescriptor; +import org.apache.arrow.flight.FlightEndpoint; +import org.apache.arrow.flight.FlightInfo; +import org.apache.arrow.flight.FlightRuntimeException; +import org.apache.arrow.flight.FlightStream; +import org.apache.arrow.flight.Location; +import org.apache.arrow.flight.NoOpFlightProducer; +import org.apache.arrow.flight.Ticket; +import org.apache.arrow.memory.BufferAllocator; +import org.apache.arrow.vector.VectorSchemaRoot; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.arrow.flight.bootstrap.FlightClientManager; +import org.opensearch.arrow.spi.StreamProducer; +import org.opensearch.arrow.spi.StreamTicket; + +import java.io.IOException; +import java.util.Collections; +import java.util.Optional; + +/** + * BaseFlightProducer extends NoOpFlightProducer to provide stream management functionality + * for Arrow Flight in OpenSearch. This class handles data streaming based on tickets, + * manages backpressure, and coordinates between stream providers and server stream listeners. + * It runs on the gRPC transport thread. + *

+ * Error handling strategy: + * 1. Add all errors to listener. + * 2. All FlightRuntimeException which are not INTERNAL should not be logged. + * 3. All FlightRuntimeException which are INTERNAL should be logged with error or warn (depending on severity). + */ +public class BaseFlightProducer extends NoOpFlightProducer { + private static final Logger logger = LogManager.getLogger(BaseFlightProducer.class); + private final FlightClientManager flightClientManager; + private final FlightStreamManager streamManager; + private final BufferAllocator allocator; + + /** + * Constructs a new BaseFlightProducer. + * + * @param flightClientManager The manager for handling client connections + * @param streamManager The manager for stream operations + * @param allocator The buffer allocator for Arrow memory management + */ + public BaseFlightProducer(FlightClientManager flightClientManager, FlightStreamManager streamManager, BufferAllocator allocator) { + this.flightClientManager = flightClientManager; + this.streamManager = streamManager; + this.allocator = allocator; + } + + /** + * Handles data streaming for a given Arrow Flight Ticket. This method runs on the gRPC transport thread + * and manages the entire streaming process, including backpressure and error handling. + * @param context The call context (unused in this implementation) + * @param ticket The Arrow Flight Ticket containing stream information + * @param listener The server stream listener for data flow + */ + @Override + public void getStream(CallContext context, Ticket ticket, ServerStreamListener listener) { + try { + StreamTicket streamTicket = parseTicket(ticket); + FlightStreamManager.StreamProducerHolder producerHolder = acquireStreamProducer(streamTicket, ticket).orElseThrow(() -> { + FlightRuntimeException ex = CallStatus.NOT_FOUND.withDescription("Stream not found").toRuntimeException(); + listener.error(ex); + return ex; + }); + processStreamWithProducer(context, producerHolder, listener); + } catch (FlightRuntimeException ex) { + listener.error(ex); + throw ex; + } catch (Exception ex) { + logger.error("Unexpected error during stream processing", ex); + FlightRuntimeException fre = CallStatus.INTERNAL.withCause(ex).withDescription("Unexpected server error").toRuntimeException(); + listener.error(fre); + throw fre; + } + } + + /** + * Retrieves FlightInfo for a given descriptor, handling both local and remote cases. + * The descriptor's command is expected to contain a serialized StreamTicket. + * + * @param context The call context + * @param descriptor The flight descriptor containing stream information + * @return FlightInfo for the requested stream + * @throws RuntimeException if the requested info cannot be retrieved + */ + @Override + public FlightInfo getFlightInfo(CallContext context, FlightDescriptor descriptor) { + StreamTicket streamTicket = parseDescriptor(descriptor); + return streamTicket.getNodeId().equals(flightClientManager.getLocalNodeId()) + ? getLocalFlightInfo(streamTicket, descriptor) + : getRemoteFlightInfo(streamTicket, descriptor); + } + + private StreamTicket parseTicket(Ticket ticket) { + try { + return streamManager.getStreamTicketFactory().fromBytes(ticket.getBytes()); + } catch (Exception e) { + logger.debug("Failed to parse Arrow Flight Ticket", e); + throw CallStatus.INVALID_ARGUMENT.withCause(e).withDescription("Invalid ticket format: " + e.getMessage()).toRuntimeException(); + } + } + + private StreamTicket parseDescriptor(FlightDescriptor descriptor) { + try { + return streamManager.getStreamTicketFactory().fromBytes(descriptor.getCommand()); + } catch (Exception e) { + logger.debug("Failed to parse flight descriptor command", e); + throw CallStatus.INVALID_ARGUMENT.withCause(e) + .withDescription("Invalid descriptor format: " + e.getMessage()) + .toRuntimeException(); + } + } + + private Optional acquireStreamProducer(StreamTicket streamTicket, Ticket ticket) { + if (streamTicket.getNodeId().equals(flightClientManager.getLocalNodeId())) { + return streamManager.removeStreamProducer(streamTicket); + } + return flightClientManager.getFlightClient(streamTicket.getNodeId()) + .map(client -> createProxyProducer(client, ticket)) + .filter(Optional::isPresent) + .orElse(Optional.empty()); + } + + private Optional createProxyProducer(FlightClient remoteClient, Ticket ticket) { + try (FlightStream flightStream = remoteClient.getStream(ticket)) { + return Optional.ofNullable(flightStream) + .map(fs -> new ProxyStreamProducer(new FlightStreamReader(fs))) + .map(proxy -> FlightStreamManager.StreamProducerHolder.create(proxy, allocator)) + .or(() -> { + logger.warn("Remote client returned null flight stream for ticket"); + return Optional.empty(); + }); + } catch (Exception e) { + logger.warn("Failed to create proxy producer for remote stream", e); + throw CallStatus.INTERNAL.withCause(e).withDescription("Unable to create proxy stream: " + e.getMessage()).toRuntimeException(); + } + } + + private void processStreamWithProducer( + CallContext context, + FlightStreamManager.StreamProducerHolder producerHolder, + ServerStreamListener listener + ) throws IOException { + try (StreamProducer producer = producerHolder.producer()) { + StreamProducer.BatchedJob batchedJob = producer.createJob(allocator); + if (context.isCancelled()) { + handleCancellation(batchedJob, listener); + return; + } + processStream(producerHolder, batchedJob, listener); + } + } + + private void processStream( + FlightStreamManager.StreamProducerHolder producerHolder, + StreamProducer.BatchedJob batchedJob, + ServerStreamListener listener + ) { + BackpressureStrategy backpressureStrategy = new CustomCallbackBackpressureStrategy(null, batchedJob::onCancel); + backpressureStrategy.register(listener); + StreamProducer.FlushSignal flushSignal = createFlushSignal(batchedJob, listener, backpressureStrategy); + + try (VectorSchemaRoot root = producerHolder.getRoot()) { + listener.start(root); + batchedJob.run(root, flushSignal); + listener.completed(); + } + } + + private StreamProducer.FlushSignal createFlushSignal( + StreamProducer.BatchedJob batchedJob, + ServerStreamListener listener, + BackpressureStrategy backpressureStrategy + ) { + return timeout -> { + BackpressureStrategy.WaitResult result = backpressureStrategy.waitForListener(timeout.millis()); + switch (result) { + case READY: + listener.putNext(); + break; + case TIMEOUT: + batchedJob.onCancel(); + throw CallStatus.TIMED_OUT.withDescription("Stream deadline exceeded").toRuntimeException(); + case CANCELLED: + batchedJob.onCancel(); + throw CallStatus.CANCELLED.withDescription("Stream cancelled by client").toRuntimeException(); + default: + batchedJob.onCancel(); + logger.error("Unexpected backpressure result: {}", result); + throw CallStatus.INTERNAL.withDescription("Unexpected backpressure error: " + result).toRuntimeException(); + } + }; + } + + private void handleCancellation(StreamProducer.BatchedJob batchedJob, ServerStreamListener listener) { + try { + batchedJob.onCancel(); + throw CallStatus.CANCELLED.withDescription("Stream cancelled before processing").toRuntimeException(); + } catch (Exception e) { + logger.error("Unexpected error during cancellation", e); + throw CallStatus.INTERNAL.withCause(e).withDescription("Error during cancellation: " + e.getMessage()).toRuntimeException(); + } + } + + private FlightInfo getLocalFlightInfo(StreamTicket streamTicket, FlightDescriptor descriptor) { + FlightStreamManager.StreamProducerHolder producerHolder = streamManager.getStreamProducer(streamTicket).orElseThrow(() -> { + logger.debug("FlightInfo not found for ticket: {}", streamTicket); + return CallStatus.NOT_FOUND.withDescription("FlightInfo not found").toRuntimeException(); + }); + + Location location = flightClientManager.getFlightClientLocation(streamTicket.getNodeId()).orElseThrow(() -> { + logger.warn("Failed to determine location for node: {}", streamTicket.getNodeId()); + return CallStatus.INTERNAL.withDescription("Internal error determining location").toRuntimeException(); + }); + + try { + Ticket ticket = new Ticket(descriptor.getCommand()); + var schema = producerHolder.getRoot().getSchema(); + FlightEndpoint endpoint = new FlightEndpoint(ticket, location); + return FlightInfo.builder(schema, descriptor, Collections.singletonList(endpoint)) + .setRecords(producerHolder.producer().estimatedRowCount()) + .build(); + } catch (Exception e) { + logger.error("Failed to build FlightInfo", e); + throw CallStatus.INTERNAL.withCause(e).withDescription("Error creating FlightInfo: " + e.getMessage()).toRuntimeException(); + } + } + + private FlightInfo getRemoteFlightInfo(StreamTicket streamTicket, FlightDescriptor descriptor) { + FlightClient remoteClient = flightClientManager.getFlightClient(streamTicket.getNodeId()).orElseThrow(() -> { + logger.warn("No remote client available for node: {}", streamTicket.getNodeId()); + return CallStatus.INTERNAL.withDescription("Client doesn't support Stream").toRuntimeException(); + }); + + try { + return remoteClient.getInfo(descriptor); + } catch (Exception e) { + logger.error("Failed to get remote FlightInfo", e); + throw CallStatus.INTERNAL.withCause(e) + .withDescription("Error retrieving remote FlightInfo: " + e.getMessage()) + .toRuntimeException(); + } + } +} diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/impl/CustomCallbackBackpressureStrategy.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/impl/CustomCallbackBackpressureStrategy.java new file mode 100644 index 0000000000000..0c49ddd78ce30 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/impl/CustomCallbackBackpressureStrategy.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.impl; + +import org.apache.arrow.flight.BackpressureStrategy; + +/** + * Base class for backpressure strategy. + */ +public class CustomCallbackBackpressureStrategy extends BackpressureStrategy.CallbackBackpressureStrategy { + private final Runnable readyCallback; + private final Runnable cancelCallback; + + /** + * Constructor for BaseBackpressureStrategy. + * + * @param readyCallback Callback to execute when the listener is ready. + * @param cancelCallback Callback to execute when the listener is cancelled. + */ + CustomCallbackBackpressureStrategy(Runnable readyCallback, Runnable cancelCallback) { + this.readyCallback = readyCallback; + this.cancelCallback = cancelCallback; + } + + /** Callback to execute when the listener is ready. */ + protected void readyCallback() { + if (readyCallback != null) { + readyCallback.run(); + } + } + + /** Callback to execute when the listener is cancelled. */ + protected void cancelCallback() { + if (cancelCallback != null) { + cancelCallback.run(); + } + } +} diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/impl/FlightStreamManager.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/impl/FlightStreamManager.java new file mode 100644 index 0000000000000..1130d59227aab --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/impl/FlightStreamManager.java @@ -0,0 +1,212 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.impl; + +import org.apache.arrow.flight.FlightClient; +import org.apache.arrow.flight.FlightStream; +import org.apache.arrow.flight.Ticket; +import org.apache.arrow.memory.BufferAllocator; +import org.apache.arrow.vector.VectorSchemaRoot; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.arrow.flight.bootstrap.FlightClientManager; +import org.opensearch.arrow.spi.StreamManager; +import org.opensearch.arrow.spi.StreamProducer; +import org.opensearch.arrow.spi.StreamReader; +import org.opensearch.arrow.spi.StreamTicket; +import org.opensearch.arrow.spi.StreamTicketFactory; +import org.opensearch.common.cache.Cache; +import org.opensearch.common.cache.CacheBuilder; +import org.opensearch.common.cache.RemovalReason; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.tasks.TaskId; + +import java.io.IOException; +import java.util.Objects; +import java.util.Optional; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Supplier; + +/** + * FlightStreamManager is a concrete implementation of StreamManager that provides + * an abstraction layer for managing Arrow Flight streams in OpenSearch. + * It encapsulates the details of Flight client operations, allowing consumers to + * work with streams without direct exposure to Flight internals. + */ +public class FlightStreamManager implements StreamManager { + private static final Logger logger = LogManager.getLogger(FlightStreamManager.class); + + private FlightStreamTicketFactory ticketFactory; + private FlightClientManager clientManager; + private Supplier allocatorSupplier; + private final Cache streamProducers; + + // Default cache settings (TODO: Make configurable via settings) + private static final TimeValue DEFAULT_CACHE_EXPIRE = TimeValue.timeValueMinutes(10); + private static final int MAX_WEIGHT = 1000; + + /** + * Holds a StreamProducer along with its metadata and resources + */ + record StreamProducerHolder(StreamProducer producer, BufferAllocator allocator, long creationTime, + AtomicReference root) { + public StreamProducerHolder { + Objects.requireNonNull(producer, "StreamProducer cannot be null"); + Objects.requireNonNull(allocator, "BufferAllocator cannot be null"); + } + + static StreamProducerHolder create(StreamProducer producer, BufferAllocator allocator) { + return new StreamProducerHolder(producer, allocator, System.nanoTime(), new AtomicReference<>(null)); + } + + boolean isExpired() { + return System.nanoTime() - creationTime > producer.getJobDeadline().getNanos(); + } + + /** + * Gets the VectorSchemaRoot associated with the StreamProducer. + * If the root is not set, it creates a new one using the provided BufferAllocator. + */ + public VectorSchemaRoot getRoot() { + return root.updateAndGet(current -> current != null ? current : producer.createRoot(allocator)); + } + } + + /** + * Constructs a new FlightStreamManager. + */ + public FlightStreamManager() { + this.streamProducers = CacheBuilder.builder() + .setExpireAfterWrite(DEFAULT_CACHE_EXPIRE) + .setMaximumWeight(MAX_WEIGHT) + .removalListener(n -> { + if (n.getRemovalReason() != RemovalReason.EXPLICIT) { + try (var unused = n.getValue().producer()) {} catch (IOException e) { + logger.error("Error closing stream producer, this may cause memory leaks.", e); + } + } + }) + .build(); + } + + /** + * Sets the allocator supplier for this FlightStreamManager. + * @param allocatorSupplier The supplier for BufferAllocator instances used for memory management. + * This parameter is required to be non-null. + */ + public void setAllocatorSupplier(Supplier allocatorSupplier) { + this.allocatorSupplier = Objects.requireNonNull(allocatorSupplier, "Allocator supplier cannot be null"); + } + + /** + * Sets the FlightClientManager for managing Flight clients. + * + * @param clientManager The FlightClientManager instance (must be non-null). + */ + public void setClientManager(FlightClientManager clientManager) { + this.clientManager = Objects.requireNonNull(clientManager, "FlightClientManager cannot be null"); + this.ticketFactory = new FlightStreamTicketFactory(clientManager::getLocalNodeId); + } + + /** + * Registers a new stream producer with the StreamManager. + * @param provider The StreamProducer instance to register. + * @param parentTaskId The parent task ID associated with the stream. + * @return A StreamTicket representing the registered stream. + */ + @Override + @SuppressWarnings("unchecked") + public StreamTicket registerStream(StreamProducer provider, TaskId parentTaskId) { + StreamTicket ticket = ticketFactory.newTicket(); + try { + streamProducers.computeIfAbsent( + ticket.getTicketId(), + ticketId -> StreamProducerHolder.create( + (StreamProducer) provider, + allocatorSupplier.get() + ) + ); + } catch (ExecutionException e) { + throw new RuntimeException(e); + } + return ticket; + } + + /** + * Retrieves a StreamReader for the given StreamTicket. + * @param ticket The StreamTicket representing the stream to retrieve. + * @return A StreamReader instance for the specified stream. + */ + @Override + @SuppressWarnings("unchecked") + public StreamReader getStreamReader(StreamTicket ticket) { + FlightClient flightClient = clientManager.getFlightClient(ticket.getNodeId()) + .orElseThrow(() -> new RuntimeException("Flight client not found for node [" + ticket.getNodeId() + "].")); + FlightStream stream = flightClient.getStream(new Ticket(ticket.toBytes())); + return (StreamReader) new FlightStreamReader(stream); + } + + /** + * Retrieves the StreamTicketFactory used by this StreamManager. + * @return The StreamTicketFactory instance associated with this StreamManager. + */ + @Override + public StreamTicketFactory getStreamTicketFactory() { + return ticketFactory; + } + + /** + * Gets the StreamProducer associated with a ticket if it hasn't expired based on its deadline. + * + * @param ticket The StreamTicket identifying the stream + * @return Optional of StreamProducerHolder containing the producer if found and not expired + */ + Optional getStreamProducer(StreamTicket ticket) { + String ticketId = ticket.getTicketId(); + StreamProducerHolder holder = streamProducers.get(ticketId); + if (holder == null) { + logger.debug("No stream producer found for ticket [{}]", ticketId); + return Optional.empty(); + } + + if (holder.isExpired()) { + logger.debug("Stream producer for ticket [{}] has expired", ticketId); + streamProducers.remove(ticketId); + return Optional.empty(); + } + return Optional.of(holder); + } + + /** + * Gets and removes the StreamProducer associated with a ticket. + * Ensure that close is called on the StreamProducer after use. + * @param ticket The StreamTicket identifying the stream + * @return Optional of StreamProducerHolder containing the producer if found + */ + public Optional removeStreamProducer(StreamTicket ticket) { + String ticketId = ticket.getTicketId(); + StreamProducerHolder holder = streamProducers.get(ticketId); + if (holder == null) { + return Optional.empty(); + } + streamProducers.remove(ticketId); + return Optional.of(holder); + } + + /** + * Closes the StreamManager and cancels all associated streams. + * This method should be called when the StreamManager is no longer needed to clean up resources. + * It is recommended to implement this method to cancel all threads and clear the streamManager queue. + */ + @Override + public void close() throws Exception { + streamProducers.invalidateAll(); + } +} diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/impl/FlightStreamReader.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/impl/FlightStreamReader.java new file mode 100644 index 0000000000000..d9e366dca30e2 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/impl/FlightStreamReader.java @@ -0,0 +1,61 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.impl; + +import org.apache.arrow.flight.FlightRuntimeException; +import org.apache.arrow.flight.FlightStream; +import org.apache.arrow.vector.VectorSchemaRoot; +import org.opensearch.ExceptionsHelper; +import org.opensearch.arrow.spi.StreamReader; + +/** + * FlightStreamReader is a wrapper class that adapts the FlightStream interface + * to the StreamReader interface. + */ +public class FlightStreamReader implements StreamReader { + + private final FlightStream flightStream; + + /** + * Constructs a FlightStreamReader with the given FlightStream. + * + * @param flightStream The FlightStream to be adapted. + */ + public FlightStreamReader(FlightStream flightStream) { + this.flightStream = flightStream; + } + + /** + * Moves the flightStream to the next batch of data. + * @return true if there is a next batch of data, false otherwise. + * @throws FlightRuntimeException if an error occurs while advancing to the next batch like early termination of stream + */ + @Override + public boolean next() throws FlightRuntimeException { + return flightStream.next(); + } + + /** + * Returns the VectorSchemaRoot containing the current batch of data. + * @return The VectorSchemaRoot containing the current batch of data. + * @throws FlightRuntimeException if an error occurs while retrieving the root like early termination of stream + */ + @Override + public VectorSchemaRoot getRoot() throws FlightRuntimeException { + return flightStream.getRoot(); + } + + /** + * Closes the flightStream. + */ + @Override + public void close() { + ExceptionsHelper.catchAsRuntimeException(flightStream::close); + } +} diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/impl/FlightStreamTicket.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/impl/FlightStreamTicket.java new file mode 100644 index 0000000000000..baa9e79fec6a1 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/impl/FlightStreamTicket.java @@ -0,0 +1,111 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.impl; + +import org.opensearch.arrow.spi.StreamTicket; + +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.Base64; +import java.util.Objects; + +class FlightStreamTicket implements StreamTicket { + private static final int MAX_TOTAL_SIZE = 4096; + private static final int MAX_ID_LENGTH = 256; + + private final String ticketID; + private final String nodeID; + + public FlightStreamTicket(String ticketID, String nodeID) { + this.ticketID = ticketID; + this.nodeID = nodeID; + } + + @Override + public String getTicketId() { + return ticketID; + } + + @Override + public String getNodeId() { + return nodeID; + } + + @Override + public byte[] toBytes() { + byte[] ticketIDBytes = ticketID.getBytes(StandardCharsets.UTF_8); + byte[] nodeIDBytes = nodeID.getBytes(StandardCharsets.UTF_8); + + if (ticketIDBytes.length > Short.MAX_VALUE || nodeIDBytes.length > Short.MAX_VALUE) { + throw new IllegalArgumentException("Field lengths exceed the maximum allowed size."); + } + ByteBuffer buffer = ByteBuffer.allocate(2 + ticketIDBytes.length + 2 + nodeIDBytes.length); + buffer.putShort((short) ticketIDBytes.length); + buffer.putShort((short) nodeIDBytes.length); + buffer.put(ticketIDBytes); + buffer.put(nodeIDBytes); + return Base64.getEncoder().encode(buffer.array()); + } + + static StreamTicket fromBytes(byte[] bytes) { + if (bytes == null || bytes.length < 4) { + throw new IllegalArgumentException("Invalid byte array input."); + } + + if (bytes.length > MAX_TOTAL_SIZE) { + throw new IllegalArgumentException("Input exceeds maximum allowed size"); + } + + ByteBuffer buffer = ByteBuffer.wrap(Base64.getDecoder().decode(bytes)); + + short ticketIDLength = buffer.getShort(); + if (ticketIDLength < 0 || ticketIDLength > MAX_ID_LENGTH) { + throw new IllegalArgumentException("Invalid ticketID length: " + ticketIDLength); + } + + short nodeIDLength = buffer.getShort(); + if (nodeIDLength < 0 || nodeIDLength > MAX_ID_LENGTH) { + throw new IllegalArgumentException("Invalid nodeID length: " + nodeIDLength); + } + + byte[] ticketIDBytes = new byte[ticketIDLength]; + if (buffer.remaining() < ticketIDLength) { + throw new IllegalArgumentException("Malformed byte array. Not enough data for TicketId."); + } + buffer.get(ticketIDBytes); + + byte[] nodeIDBytes = new byte[nodeIDLength]; + if (buffer.remaining() < nodeIDLength) { + throw new IllegalArgumentException("Malformed byte array. Not enough data for NodeId."); + } + buffer.get(nodeIDBytes); + + String ticketID = new String(ticketIDBytes, StandardCharsets.UTF_8); + String nodeID = new String(nodeIDBytes, StandardCharsets.UTF_8); + return new FlightStreamTicket(ticketID, nodeID); + } + + @Override + public int hashCode() { + return Objects.hash(ticketID, nodeID); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null || getClass() != obj.getClass()) return false; + FlightStreamTicket that = (FlightStreamTicket) obj; + return Objects.equals(ticketID, that.ticketID) && Objects.equals(nodeID, that.nodeID); + } + + @Override + public String toString() { + return "FlightStreamTicket{ticketID='" + ticketID + "', nodeID='" + nodeID + "'}"; + } +} diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/impl/FlightStreamTicketFactory.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/impl/FlightStreamTicketFactory.java new file mode 100644 index 0000000000000..473eb92cf2db3 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/impl/FlightStreamTicketFactory.java @@ -0,0 +1,60 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.impl; + +import org.opensearch.arrow.spi.StreamTicket; +import org.opensearch.arrow.spi.StreamTicketFactory; +import org.opensearch.common.annotation.ExperimentalApi; + +import java.util.UUID; +import java.util.function.Supplier; + +/** + * Default implementation of StreamTicketFactory + */ +@ExperimentalApi +public class FlightStreamTicketFactory implements StreamTicketFactory { + + private final Supplier nodeId; + + /** + * Constructs a new DefaultStreamTicketFactory instance. + * + * @param nodeId A Supplier that provides the node ID for the StreamTicket + */ + public FlightStreamTicketFactory(Supplier nodeId) { + this.nodeId = nodeId; + } + + /** + * Creates a new StreamTicket with a unique ticket ID. + * + * @return A new StreamTicket instance + */ + @Override + public StreamTicket newTicket() { + return new FlightStreamTicket(generateUniqueTicket(), nodeId.get()); + } + + /** + * Deserializes a StreamTicket from its byte representation. + * + * @param bytes The byte array containing the serialized ticket data + * @return A StreamTicket instance reconstructed from the byte array + * @throws IllegalArgumentException if bytes is null or invalid + */ + @Override + public StreamTicket fromBytes(byte[] bytes) { + return FlightStreamTicket.fromBytes(bytes); + } + + private String generateUniqueTicket() { + return UUID.randomUUID().toString(); + } +} diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/impl/ProxyStreamProducer.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/impl/ProxyStreamProducer.java new file mode 100644 index 0000000000000..75a8d07266e07 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/impl/ProxyStreamProducer.java @@ -0,0 +1,122 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.impl; + +import org.apache.arrow.memory.BufferAllocator; +import org.apache.arrow.vector.VectorSchemaRoot; +import org.opensearch.ExceptionsHelper; +import org.opensearch.arrow.spi.StreamProducer; +import org.opensearch.arrow.spi.StreamReader; +import org.opensearch.arrow.spi.StreamTicket; +import org.opensearch.common.unit.TimeValue; + +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * ProxyStreamProvider acts as forward proxy for FlightStream. + * It creates a BatchedJob to handle the streaming of data from the remote FlightStream. + * This is useful when stream is not present locally and needs to be fetched from a node + * retrieved using {@link StreamTicket#getNodeId()} where it is present. + */ +public class ProxyStreamProducer implements StreamProducer { + + private final StreamReader remoteStream; + + /** + * Constructs a new ProxyStreamProducer instance. + * + * @param remoteStream The remote FlightStream to be proxied. + */ + public ProxyStreamProducer(StreamReader remoteStream) { + this.remoteStream = remoteStream; + } + + /** + * Creates a VectorSchemaRoot for the remote FlightStream. + * @param allocator The allocator to use for creating vectors + * @return A VectorSchemaRoot representing the schema of the remote FlightStream + */ + @Override + public VectorSchemaRoot createRoot(BufferAllocator allocator) { + return remoteStream.getRoot(); + } + + /** + * Creates a BatchedJob + * @param allocator The allocator to use for any additional memory allocations + */ + @Override + public BatchedJob createJob(BufferAllocator allocator) { + return new ProxyBatchedJob(remoteStream); + } + + /** + * Returns the deadline for the remote FlightStream. + * Since the stream is not present locally, the deadline is set to -1. It piggybacks on remote stream expiration + * @return The deadline for the remote FlightStream + */ + @Override + public TimeValue getJobDeadline() { + return TimeValue.MINUS_ONE; + } + + /** + * Provides an estimate of the total number of rows that will be produced. + */ + @Override + public int estimatedRowCount() { + return remoteStream.getRoot().getRowCount(); + } + + /** + * Task action name + */ + @Override + public String getAction() { + // TODO get it from remote flight stream + throw new UnsupportedOperationException("Not implemented yet"); + } + + /** + * Closes the remote FlightStream. + */ + @Override + public void close() { + ExceptionsHelper.catchAsRuntimeException(remoteStream::close); + } + + static class ProxyBatchedJob implements BatchedJob { + + private final StreamReader remoteStream; + private final AtomicBoolean isCancelled = new AtomicBoolean(false); + + ProxyBatchedJob(StreamReader remoteStream) { + this.remoteStream = remoteStream; + } + + @Override + public void run(VectorSchemaRoot root, FlushSignal flushSignal) { + while (!isCancelled.get() && remoteStream.next()) { + flushSignal.awaitConsumption(TimeValue.timeValueMillis(1000)); + } + } + + @Override + public void onCancel() { + isCancelled.set(true); + } + + @Override + public boolean isCancelled() { + // Proxy stream don't have any business logic to set this flag, + // they piggyback on remote stream getting cancelled. + return isCancelled.get(); + } + } +} diff --git a/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/impl/package-info.java b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/impl/package-info.java new file mode 100644 index 0000000000000..90ca54b44a55d --- /dev/null +++ b/plugins/arrow-flight-rpc/src/main/java/org/opensearch/arrow/flight/impl/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Core components and implementations for OpenSearch Flight service, including base producers and consumers. + */ +package org.opensearch.arrow.flight.impl; diff --git a/plugins/arrow-flight-rpc/src/main/plugin-metadata/plugin-security.policy b/plugins/arrow-flight-rpc/src/main/plugin-metadata/plugin-security.policy index 803350a578009..40d584198fd48 100644 --- a/plugins/arrow-flight-rpc/src/main/plugin-metadata/plugin-security.policy +++ b/plugins/arrow-flight-rpc/src/main/plugin-metadata/plugin-security.policy @@ -6,17 +6,10 @@ * compatible open source license. */ -grant codeBase "${codebase.netty-common}" { - permission java.net.SocketPermission "*", "accept,connect,listen,resolve"; - permission java.lang.RuntimePermission "*", "setContextClassLoader"; -}; - -grant codeBase "${codebase.grpc-core}" { - permission java.net.SocketPermission "*", "accept,connect,listen,resolve"; - permission java.lang.RuntimePermission "*", "setContextClassLoader"; -}; - grant { + // Memory access + permission java.lang.RuntimePermission "accessClassInPackage.sun.misc"; + // arrow flight service permissions permission java.util.PropertyPermission "arrow.allocation.manager.type", "write"; permission java.util.PropertyPermission "arrow.enable_null_check_for_get", "write"; @@ -29,7 +22,6 @@ grant { permission java.util.PropertyPermission "io.netty.tryUnsafe", "write"; // Needed for netty based arrow flight server for netty configs related to buffer allocator - permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; permission java.util.PropertyPermission "arrow.allocation.manager.type", "write"; permission java.lang.RuntimePermission "modifyThreadGroup"; @@ -39,7 +31,11 @@ grant { // Reflection access needed by Arrow permission java.lang.RuntimePermission "accessDeclaredMembers"; permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; + permission java.lang.RuntimePermission "getClassLoader"; // Memory access permission java.lang.RuntimePermission "accessClassInPackage.sun.misc"; + + // needed by netty-common + permission java.lang.RuntimePermission "*", "setContextClassLoader"; }; diff --git a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/FlightStreamPluginTests.java b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/FlightStreamPluginTests.java index 2573f0032f45b..e1d7d7d95d4a9 100644 --- a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/FlightStreamPluginTests.java +++ b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/FlightStreamPluginTests.java @@ -8,8 +8,8 @@ package org.opensearch.arrow.flight; -import org.opensearch.arrow.flight.api.FlightServerInfoAction; -import org.opensearch.arrow.flight.api.NodesFlightInfoAction; +import org.opensearch.arrow.flight.api.flightinfo.FlightServerInfoAction; +import org.opensearch.arrow.flight.api.flightinfo.NodesFlightInfoAction; import org.opensearch.arrow.flight.bootstrap.FlightService; import org.opensearch.arrow.flight.bootstrap.FlightStreamPlugin; import org.opensearch.arrow.spi.StreamManager; @@ -27,7 +27,7 @@ import java.io.IOException; import java.util.Collection; import java.util.List; -import java.util.function.Supplier; +import java.util.Optional; import static org.opensearch.common.util.FeatureFlags.ARROW_STREAMS; import static org.opensearch.plugins.NetworkPlugin.AuxTransport.AUX_TRANSPORT_TYPES_KEY; @@ -76,8 +76,8 @@ public void testPluginEnabled() throws IOException { assertFalse(executorBuilders.isEmpty()); assertEquals(2, executorBuilders.size()); - Supplier streamManager = plugin.getStreamManager(); - assertNotNull(streamManager); + Optional streamManager = plugin.getStreamManager(); + assertTrue(streamManager.isPresent()); List> settings = plugin.getSettings(); assertNotNull(settings); diff --git a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/FlightServerInfoActionTests.java b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/flightinfo/FlightServerInfoActionTests.java similarity index 98% rename from plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/FlightServerInfoActionTests.java rename to plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/flightinfo/FlightServerInfoActionTests.java index 6cb75d4a93dbe..d3115fc745475 100644 --- a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/FlightServerInfoActionTests.java +++ b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/flightinfo/FlightServerInfoActionTests.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.arrow.flight.api; +package org.opensearch.arrow.flight.api.flightinfo; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.node.DiscoveryNode; diff --git a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/NodeFlightInfoTests.java b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/flightinfo/NodeFlightInfoTests.java similarity index 99% rename from plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/NodeFlightInfoTests.java rename to plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/flightinfo/NodeFlightInfoTests.java index 2f8d7deb06f3f..59e695313c16e 100644 --- a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/NodeFlightInfoTests.java +++ b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/flightinfo/NodeFlightInfoTests.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.arrow.flight.api; +package org.opensearch.arrow.flight.api.flightinfo; import org.opensearch.Version; import org.opensearch.cluster.node.DiscoveryNode; diff --git a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/NodesFlightInfoRequestTests.java b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/flightinfo/NodesFlightInfoRequestTests.java similarity index 96% rename from plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/NodesFlightInfoRequestTests.java rename to plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/flightinfo/NodesFlightInfoRequestTests.java index 756177423fe6f..ef8f88b78c3ee 100644 --- a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/NodesFlightInfoRequestTests.java +++ b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/flightinfo/NodesFlightInfoRequestTests.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.arrow.flight.api; +package org.opensearch.arrow.flight.api.flightinfo; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; diff --git a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/NodesFlightInfoResponseTests.java b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/flightinfo/NodesFlightInfoResponseTests.java similarity index 99% rename from plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/NodesFlightInfoResponseTests.java rename to plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/flightinfo/NodesFlightInfoResponseTests.java index 49a6cc6bacf40..707a222fe381f 100644 --- a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/NodesFlightInfoResponseTests.java +++ b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/flightinfo/NodesFlightInfoResponseTests.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.arrow.flight.api; +package org.opensearch.arrow.flight.api.flightinfo; import org.opensearch.Version; import org.opensearch.action.FailedNodeException; diff --git a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/TransportNodesFlightInfoActionTests.java b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/flightinfo/TransportNodesFlightInfoActionTests.java similarity index 99% rename from plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/TransportNodesFlightInfoActionTests.java rename to plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/flightinfo/TransportNodesFlightInfoActionTests.java index d9d8af5920d61..6bd70eec4ad3a 100644 --- a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/TransportNodesFlightInfoActionTests.java +++ b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/api/flightinfo/TransportNodesFlightInfoActionTests.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.arrow.flight.api; +package org.opensearch.arrow.flight.api.flightinfo; import org.opensearch.Version; import org.opensearch.action.FailedNodeException; diff --git a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/bootstrap/FlightClientManagerTests.java b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/bootstrap/FlightClientManagerTests.java index ce2f0df7f5f55..e077acc8e390a 100644 --- a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/bootstrap/FlightClientManagerTests.java +++ b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/bootstrap/FlightClientManagerTests.java @@ -11,10 +11,10 @@ import org.apache.arrow.memory.BufferAllocator; import org.apache.arrow.memory.RootAllocator; import org.opensearch.Version; -import org.opensearch.arrow.flight.api.NodeFlightInfo; -import org.opensearch.arrow.flight.api.NodesFlightInfoAction; -import org.opensearch.arrow.flight.api.NodesFlightInfoRequest; -import org.opensearch.arrow.flight.api.NodesFlightInfoResponse; +import org.opensearch.arrow.flight.api.flightinfo.NodeFlightInfo; +import org.opensearch.arrow.flight.api.flightinfo.NodesFlightInfoAction; +import org.opensearch.arrow.flight.api.flightinfo.NodesFlightInfoRequest; +import org.opensearch.arrow.flight.api.flightinfo.NodesFlightInfoResponse; import org.opensearch.arrow.flight.bootstrap.tls.SslContextProvider; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterName; @@ -42,6 +42,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -107,7 +108,9 @@ public void setUp() throws Exception { clientManager.clusterChanged(event); assertBusy(() -> { assertEquals("Flight client isn't built in time limit", 2, clientManager.getFlightClients().size()); + assertTrue("local_node should exist", clientManager.getFlightClient("local_node").isPresent()); assertNotNull("local_node should exist", clientManager.getFlightClient("local_node").get()); + assertTrue("remote_node should exist", clientManager.getFlightClient("remote_node").isPresent()); assertNotNull("remote_node should exist", clientManager.getFlightClient("remote_node").get()); }, 2, TimeUnit.SECONDS); } @@ -375,8 +378,9 @@ public void testFailedClusterUpdateButSuccessfulDirectRequest() throws Exception private void validateNodes() { for (DiscoveryNode node : state.nodes()) { - FlightClient client = clientManager.getFlightClient(node.getId()).get(); - assertNotNull("Flight client should be created for existing node", client); + Optional client = clientManager.getFlightClient(node.getId()); + assertTrue("Flight client should be created for node [" + node.getId() + "].", client.isPresent()); + assertNotNull("Flight client should be created for node [" + node.getId() + "].", client.get()); } } diff --git a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/bootstrap/FlightServiceTests.java b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/bootstrap/FlightServiceTests.java index d8f5d5ba6b45b..a7274eb756458 100644 --- a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/bootstrap/FlightServiceTests.java +++ b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/bootstrap/FlightServiceTests.java @@ -94,7 +94,7 @@ public void testStartAndStop() throws Exception { testService.start(); testService.stop(); testService.start(); - assertNull(testService.getStreamManager()); + assertNotNull(testService.getStreamManager()); } } diff --git a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/impl/BaseFlightProducerTests.java b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/impl/BaseFlightProducerTests.java new file mode 100644 index 0000000000000..65caae55e9e40 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/impl/BaseFlightProducerTests.java @@ -0,0 +1,732 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.impl; + +import org.apache.arrow.flight.FlightClient; +import org.apache.arrow.flight.FlightDescriptor; +import org.apache.arrow.flight.FlightInfo; +import org.apache.arrow.flight.FlightProducer; +import org.apache.arrow.flight.FlightRuntimeException; +import org.apache.arrow.flight.FlightStream; +import org.apache.arrow.flight.Location; +import org.apache.arrow.flight.Ticket; +import org.apache.arrow.memory.ArrowBuf; +import org.apache.arrow.memory.BufferAllocator; +import org.apache.arrow.vector.VectorSchemaRoot; +import org.apache.arrow.vector.dictionary.DictionaryProvider; +import org.apache.arrow.vector.ipc.message.IpcOption; +import org.opensearch.arrow.flight.bootstrap.FlightClientManager; +import org.opensearch.arrow.spi.StreamProducer; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Optional; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.opensearch.common.util.FeatureFlags.ARROW_STREAMS; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class BaseFlightProducerTests extends OpenSearchTestCase { + + private BaseFlightProducer baseFlightProducer; + private FlightStreamManager streamManager; + private StreamProducer streamProducer; + private StreamProducer.BatchedJob batchedJob; + private static final String LOCAL_NODE_ID = "localNodeId"; + private static final FlightClientManager flightClientManager = mock(FlightClientManager.class); + private final Ticket ticket = new Ticket((new FlightStreamTicket("test-ticket", LOCAL_NODE_ID)).toBytes()); + private BufferAllocator allocator; + + @LockFeatureFlag(ARROW_STREAMS) + @Override + @SuppressWarnings("unchecked") + public void setUp() throws Exception { + super.setUp(); + streamManager = mock(FlightStreamManager.class); + when(streamManager.getStreamTicketFactory()).thenReturn(new FlightStreamTicketFactory(() -> LOCAL_NODE_ID)); + when(flightClientManager.getLocalNodeId()).thenReturn(LOCAL_NODE_ID); + allocator = mock(BufferAllocator.class); + streamProducer = mock(StreamProducer.class); + batchedJob = mock(StreamProducer.BatchedJob.class); + baseFlightProducer = new BaseFlightProducer(flightClientManager, streamManager, allocator); + } + + private static class TestServerStreamListener implements FlightProducer.ServerStreamListener { + private final CountDownLatch completionLatch = new CountDownLatch(1); + private final AtomicInteger putNextCount = new AtomicInteger(0); + private final AtomicBoolean isCancelled = new AtomicBoolean(false); + private Throwable error; + private final AtomicBoolean dataConsumed = new AtomicBoolean(false); + private final AtomicBoolean ready = new AtomicBoolean(false); + private Runnable onReadyHandler; + private Runnable onCancelHandler; + + @Override + public void putNext() { + assertFalse(dataConsumed.get()); + putNextCount.incrementAndGet(); + dataConsumed.set(true); + } + + @Override + public boolean isReady() { + return ready.get(); + } + + public void setReady(boolean val) { + ready.set(val); + if (this.onReadyHandler != null) { + this.onReadyHandler.run(); + } + } + + @Override + public void start(VectorSchemaRoot root) { + // No-op for this test + } + + @Override + public void start(VectorSchemaRoot root, DictionaryProvider dictionaries, IpcOption option) {} + + @Override + public void putNext(ArrowBuf metadata) { + putNext(); + } + + @Override + public void putMetadata(ArrowBuf metadata) {} + + @Override + public void completed() { + completionLatch.countDown(); + } + + @Override + public void error(Throwable t) { + error = t; + completionLatch.countDown(); + } + + @Override + public boolean isCancelled() { + return isCancelled.get(); + } + + @Override + public void setOnReadyHandler(Runnable handler) { + this.onReadyHandler = handler; + } + + @Override + public void setOnCancelHandler(Runnable handler) { + this.onCancelHandler = handler; + } + + public void resetConsumptionLatch() { + dataConsumed.set(false); + } + + public boolean getDataConsumed() { + return dataConsumed.get(); + } + + public int getPutNextCount() { + return putNextCount.get(); + } + + public Throwable getError() { + return error; + } + + public void cancel() { + isCancelled.set(true); + if (this.onCancelHandler != null) { + this.onCancelHandler.run(); + } + } + } + + public void testGetStream_SuccessfulFlow() throws Exception { + final VectorSchemaRoot root = mock(VectorSchemaRoot.class); + when(streamManager.removeStreamProducer(any(FlightStreamTicket.class))).thenReturn( + Optional.of(FlightStreamManager.StreamProducerHolder.create(streamProducer, allocator)) + ); + when(streamProducer.createJob(any(BufferAllocator.class))).thenReturn(batchedJob); + when(streamProducer.createRoot(any(BufferAllocator.class))).thenReturn(root); + + AtomicInteger flushCount = new AtomicInteger(0); + TestServerStreamListener listener = new TestServerStreamListener(); + doAnswer(invocation -> { + StreamProducer.FlushSignal flushSignal = invocation.getArgument(1); + for (int i = 0; i < 3; i++) { + Thread clientThread = new Thread(() -> { + listener.setReady(false); + listener.setReady(true); + }); + listener.setReady(false); + clientThread.start(); + flushSignal.awaitConsumption(TimeValue.timeValueMillis(100)); + assertTrue(listener.getDataConsumed()); + flushCount.incrementAndGet(); + listener.resetConsumptionLatch(); + } + return null; + }).when(batchedJob).run(any(VectorSchemaRoot.class), any(StreamProducer.FlushSignal.class)); + baseFlightProducer.getStream(mock(FlightProducer.CallContext.class), ticket, listener); + + assertNull(listener.getError()); + assertEquals(3, listener.getPutNextCount()); + assertEquals(3, flushCount.get()); + + verify(streamManager).removeStreamProducer(any(FlightStreamTicket.class)); + verify(root).close(); + } + + public void testGetStream_WithSlowClient() throws Exception { + final VectorSchemaRoot root = mock(VectorSchemaRoot.class); + when(streamManager.removeStreamProducer(any(FlightStreamTicket.class))).thenReturn( + Optional.of(FlightStreamManager.StreamProducerHolder.create(streamProducer, allocator)) + ); + when(streamProducer.createJob(any(BufferAllocator.class))).thenReturn(batchedJob); + when(streamProducer.createRoot(any(BufferAllocator.class))).thenReturn(root); + + AtomicInteger flushCount = new AtomicInteger(0); + TestServerStreamListener listener = new TestServerStreamListener(); + AtomicBoolean isCancelled = new AtomicBoolean(false); + doAnswer(invocation -> { + StreamProducer.FlushSignal flushSignal = invocation.getArgument(1); + for (int i = 0; i < 5; i++) { + Thread clientThread = new Thread(() -> { + try { + listener.setReady(false); + Thread.sleep(100); + listener.setReady(true); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + }); + listener.setReady(false); + clientThread.start(); + flushSignal.awaitConsumption(TimeValue.timeValueMillis(300)); // Longer than client sleep + if (isCancelled.get()) { + break; + } + assertTrue(listener.getDataConsumed()); + flushCount.incrementAndGet(); + listener.resetConsumptionLatch(); + } + return null; + }).when(batchedJob).run(any(), any()); + doAnswer(invocation -> { + isCancelled.set(true); + return null; + }).when(batchedJob).onCancel(); + + baseFlightProducer.getStream(mock(FlightProducer.CallContext.class), ticket, listener); + assertNull(listener.getError()); + assertEquals(5, listener.getPutNextCount()); + assertEquals(5, flushCount.get()); + + verify(streamManager).removeStreamProducer(any(FlightStreamTicket.class)); + verify(root).close(); + } + + public void testGetStream_WithSlowClientTimeout() throws Exception { + final VectorSchemaRoot root = mock(VectorSchemaRoot.class); + when(streamManager.removeStreamProducer(any(FlightStreamTicket.class))).thenReturn( + Optional.of(FlightStreamManager.StreamProducerHolder.create(streamProducer, allocator)) + ); + when(streamProducer.createJob(any(BufferAllocator.class))).thenReturn(batchedJob); + when(streamProducer.createRoot(any(BufferAllocator.class))).thenReturn(root); + + AtomicInteger flushCount = new AtomicInteger(0); + TestServerStreamListener listener = new TestServerStreamListener(); + AtomicBoolean isCancelled = new AtomicBoolean(false); + + doAnswer(invocation -> { + StreamProducer.FlushSignal flushSignal = invocation.getArgument(1); + Thread clientThread = new Thread(() -> { + try { + listener.setReady(false); + Thread.sleep(400); // Longer than timeout + listener.setReady(true); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + }); + listener.setReady(false); + clientThread.start(); + flushSignal.awaitConsumption(TimeValue.timeValueMillis(100)); // Shorter than client sleep + return null; + }).when(batchedJob).run(any(), any()); + doAnswer(invocation -> { + isCancelled.set(true); + return null; + }).when(batchedJob).onCancel(); + + FlightRuntimeException exception = expectThrows( + FlightRuntimeException.class, + () -> baseFlightProducer.getStream(mock(FlightProducer.CallContext.class), ticket, listener) + ); + + assertEquals("Stream deadline exceeded", exception.getMessage()); + assertNotNull(listener.getError()); + assertEquals("Stream deadline exceeded", listener.getError().getMessage()); + assertEquals(0, listener.getPutNextCount()); + assertEquals(0, flushCount.get()); + + verify(streamManager).removeStreamProducer(any(FlightStreamTicket.class)); + verify(root).close(); + } + + public void testGetStream_WithClientCancel() throws Exception { + final VectorSchemaRoot root = mock(VectorSchemaRoot.class); + when(streamManager.removeStreamProducer(any(FlightStreamTicket.class))).thenReturn( + Optional.of(FlightStreamManager.StreamProducerHolder.create(streamProducer, allocator)) + ); + when(streamProducer.createJob(any(BufferAllocator.class))).thenReturn(batchedJob); + when(streamProducer.createRoot(any(BufferAllocator.class))).thenReturn(root); + + AtomicInteger flushCount = new AtomicInteger(0); + TestServerStreamListener listener = new TestServerStreamListener(); + AtomicBoolean isCancelled = new AtomicBoolean(false); + + doAnswer(invocation -> { + StreamProducer.FlushSignal flushSignal = invocation.getArgument(1); + for (int i = 0; i < 5; i++) { + int finalI = i; + Thread clientThread = new Thread(() -> { + if (finalI == 4) { + listener.cancel(); + } else { + listener.setReady(true); + } + }); + listener.setReady(false); + clientThread.start(); + flushSignal.awaitConsumption(TimeValue.timeValueMillis(100)); + if (isCancelled.get()) { + break; + } + assertTrue(listener.getDataConsumed()); + flushCount.incrementAndGet(); + listener.resetConsumptionLatch(); + } + return null; + }).when(batchedJob).run(any(), any()); + doAnswer(invocation -> { + isCancelled.set(true); + return null; + }).when(batchedJob).onCancel(); + + FlightRuntimeException exception = expectThrows( + FlightRuntimeException.class, + () -> baseFlightProducer.getStream(mock(FlightProducer.CallContext.class), ticket, listener) + ); + + assertEquals("Stream cancelled by client", exception.getMessage()); + assertNotNull(listener.getError()); + assertEquals("Stream cancelled by client", listener.getError().getMessage()); + assertEquals(4, listener.getPutNextCount()); + assertEquals(4, flushCount.get()); + + verify(streamManager).removeStreamProducer(any(FlightStreamTicket.class)); + verify(root).close(); + } + + public void testGetStream_WithUnresponsiveClient() throws Exception { + final VectorSchemaRoot root = mock(VectorSchemaRoot.class); + when(streamManager.removeStreamProducer(any(FlightStreamTicket.class))).thenReturn( + Optional.of(FlightStreamManager.StreamProducerHolder.create(streamProducer, allocator)) + ); + when(streamProducer.createJob(any(BufferAllocator.class))).thenReturn(batchedJob); + when(streamProducer.createRoot(any(BufferAllocator.class))).thenReturn(root); + + AtomicInteger flushCount = new AtomicInteger(0); + TestServerStreamListener listener = new TestServerStreamListener(); + AtomicBoolean isCancelled = new AtomicBoolean(false); + + doAnswer(invocation -> { + StreamProducer.FlushSignal flushSignal = invocation.getArgument(1); + Thread clientThread = new Thread(() -> listener.setReady(false)); // Never sets ready + listener.setReady(false); + clientThread.start(); + flushSignal.awaitConsumption(TimeValue.timeValueMillis(100)); + return null; + }).when(batchedJob).run(any(), any()); + doAnswer(invocation -> { + isCancelled.set(true); + return null; + }).when(batchedJob).onCancel(); + + FlightRuntimeException exception = expectThrows( + FlightRuntimeException.class, + () -> baseFlightProducer.getStream(mock(FlightProducer.CallContext.class), ticket, listener) + ); + + assertEquals("Stream deadline exceeded", exception.getMessage()); + assertNotNull(listener.getError()); + assertEquals("Stream deadline exceeded", listener.getError().getMessage()); + assertEquals(0, listener.getPutNextCount()); + assertEquals(0, flushCount.get()); + + verify(streamManager).removeStreamProducer(any(FlightStreamTicket.class)); + verify(root).close(); + } + + public void testGetStream_WithServerBackpressure() throws Exception { + final VectorSchemaRoot root = mock(VectorSchemaRoot.class); + when(streamManager.removeStreamProducer(any(FlightStreamTicket.class))).thenReturn( + Optional.of(FlightStreamManager.StreamProducerHolder.create(streamProducer, allocator)) + ); + when(streamProducer.createJob(any(BufferAllocator.class))).thenReturn(batchedJob); + when(streamProducer.createRoot(any(BufferAllocator.class))).thenReturn(root); + + TestServerStreamListener listener = new TestServerStreamListener(); + AtomicInteger flushCount = new AtomicInteger(0); + doAnswer(invocation -> { + StreamProducer.FlushSignal flushSignal = invocation.getArgument(1); + for (int i = 0; i < 5; i++) { + Thread clientThread = new Thread(() -> { + listener.setReady(false); + listener.setReady(true); + }); + listener.setReady(false); + clientThread.start(); + Thread.sleep(100); // Simulate server backpressure + flushSignal.awaitConsumption(TimeValue.timeValueMillis(200)); // Longer than sleep + assertTrue(listener.getDataConsumed()); + flushCount.incrementAndGet(); + listener.resetConsumptionLatch(); + } + return null; + }).when(batchedJob).run(any(VectorSchemaRoot.class), any(StreamProducer.FlushSignal.class)); + + baseFlightProducer.getStream(mock(FlightProducer.CallContext.class), ticket, listener); + + assertNull(listener.getError()); + assertEquals(5, listener.getPutNextCount()); + assertEquals(5, flushCount.get()); + + verify(streamManager).removeStreamProducer(any(FlightStreamTicket.class)); + verify(root).close(); + } + + public void testGetStream_WithServerError() throws Exception { + final VectorSchemaRoot root = mock(VectorSchemaRoot.class); + when(streamManager.removeStreamProducer(any(FlightStreamTicket.class))).thenReturn( + Optional.of(FlightStreamManager.StreamProducerHolder.create(streamProducer, allocator)) + ); + when(streamProducer.createJob(any(BufferAllocator.class))).thenReturn(batchedJob); + when(streamProducer.createRoot(any(BufferAllocator.class))).thenReturn(root); + + TestServerStreamListener listener = new TestServerStreamListener(); + AtomicInteger flushCount = new AtomicInteger(0); + doAnswer(invocation -> { + StreamProducer.FlushSignal flushSignal = invocation.getArgument(1); + for (int i = 0; i < 5; i++) { + Thread clientThread = new Thread(() -> { + listener.setReady(false); + listener.setReady(true); + }); + listener.setReady(false); + clientThread.start(); + if (i == 4) { + throw new RuntimeException("Server error"); + } + flushSignal.awaitConsumption(TimeValue.timeValueMillis(100)); + assertTrue(listener.getDataConsumed()); + flushCount.incrementAndGet(); + listener.resetConsumptionLatch(); + } + return null; + }).when(batchedJob).run(any(VectorSchemaRoot.class), any(StreamProducer.FlushSignal.class)); + + FlightRuntimeException exception = expectThrows( + FlightRuntimeException.class, + () -> baseFlightProducer.getStream(mock(FlightProducer.CallContext.class), ticket, listener) + ); + + assertEquals("Unexpected server error", exception.getMessage()); + assertNotNull(listener.getError()); + assertEquals("Unexpected server error", listener.getError().getMessage()); + assertEquals(4, listener.getPutNextCount()); + assertEquals(4, flushCount.get()); + + verify(streamManager).removeStreamProducer(any(FlightStreamTicket.class)); + verify(root).close(); + } + + public void testGetStream_StreamNotFound() throws Exception { + when(streamManager.removeStreamProducer(any(FlightStreamTicket.class))).thenReturn(Optional.empty()); + TestServerStreamListener listener = new TestServerStreamListener(); + + FlightRuntimeException exception = expectThrows( + FlightRuntimeException.class, + () -> baseFlightProducer.getStream(mock(FlightProducer.CallContext.class), ticket, listener) + ); + + assertEquals("Stream not found", exception.getMessage()); + assertNotNull(listener.getError()); + assertEquals("Stream not found", listener.getError().getMessage()); + assertEquals(0, listener.getPutNextCount()); + + verify(streamManager).removeStreamProducer(any(FlightStreamTicket.class)); + } + + public void testGetStreamRemoteNode() throws Exception { + final String remoteNodeId = "remote-node"; + FlightStreamTicket remoteTicket = new FlightStreamTicket("test-id", remoteNodeId); + FlightClient remoteClient = mock(FlightClient.class); + FlightStream mockFlightStream = mock(FlightStream.class); + + when(flightClientManager.getFlightClient(remoteNodeId)).thenReturn(Optional.of(remoteClient)); + when(remoteClient.getStream(any(Ticket.class))).thenReturn(mockFlightStream); + TestServerStreamListener listener = new TestServerStreamListener(); + + baseFlightProducer.getStream(mock(FlightProducer.CallContext.class), new Ticket(remoteTicket.toBytes()), listener); + verify(remoteClient).getStream(any(Ticket.class)); + } + + public void testGetStreamRemoteNodeWithNonExistentClient() throws Exception { + final String remoteNodeId = "remote-node-5"; + FlightStreamTicket remoteTicket = new FlightStreamTicket("test-id", remoteNodeId); + when(flightClientManager.getFlightClient(remoteNodeId)).thenReturn(Optional.empty()); + TestServerStreamListener listener = new TestServerStreamListener(); + + FlightRuntimeException exception = expectThrows( + FlightRuntimeException.class, + () -> baseFlightProducer.getStream(mock(FlightProducer.CallContext.class), new Ticket(remoteTicket.toBytes()), listener) + ); + + assertEquals("Stream not found", exception.getMessage()); + assertNotNull(listener.getError()); + assertEquals("Stream not found", listener.getError().getMessage()); + assertEquals(0, listener.getPutNextCount()); + } + + public void testGetFlightInfo() { + final VectorSchemaRoot root = mock(VectorSchemaRoot.class); + when(streamManager.getStreamProducer(any(FlightStreamTicket.class))).thenReturn( + Optional.of(FlightStreamManager.StreamProducerHolder.create(streamProducer, allocator)) + ); + when(streamProducer.createRoot(any(BufferAllocator.class))).thenReturn(root); + + Location location = Location.forGrpcInsecure(LOCAL_NODE_ID, 8815); + when(flightClientManager.getFlightClientLocation(LOCAL_NODE_ID)).thenReturn(Optional.of(location)); + when(streamProducer.estimatedRowCount()).thenReturn(100); + FlightDescriptor descriptor = FlightDescriptor.command(ticket.getBytes()); + FlightInfo flightInfo = baseFlightProducer.getFlightInfo(null, descriptor); + + assertNotNull(flightInfo); + assertEquals(100L, flightInfo.getRecords()); + assertEquals(1, flightInfo.getEndpoints().size()); + assertEquals(location, flightInfo.getEndpoints().getFirst().getLocations().getFirst()); + } + + public void testGetFlightInfo_NotFound() { + when(streamManager.getStreamProducer(any(FlightStreamTicket.class))).thenReturn(Optional.empty()); + FlightDescriptor descriptor = FlightDescriptor.command(ticket.getBytes()); + FlightRuntimeException exception = expectThrows( + FlightRuntimeException.class, + () -> baseFlightProducer.getFlightInfo(null, descriptor) + ); + + assertEquals("FlightInfo not found", exception.getMessage()); + } + + public void testGetFlightInfo_LocationNotFound() { + final VectorSchemaRoot root = mock(VectorSchemaRoot.class); + when(streamManager.getStreamProducer(any(FlightStreamTicket.class))).thenReturn( + Optional.of(FlightStreamManager.StreamProducerHolder.create(streamProducer, allocator)) + ); + when(streamProducer.createRoot(any(BufferAllocator.class))).thenReturn(root); + when(flightClientManager.getFlightClientLocation(LOCAL_NODE_ID)).thenReturn(Optional.empty()); + + FlightDescriptor descriptor = FlightDescriptor.command(ticket.getBytes()); + FlightRuntimeException exception = expectThrows( + FlightRuntimeException.class, + () -> baseFlightProducer.getFlightInfo(null, descriptor) + ); + + assertEquals("Internal error determining location", exception.getMessage()); + } + + public void testGetFlightInfo_SchemaError() { + when(streamManager.getStreamProducer(any(FlightStreamTicket.class))) + .thenReturn(Optional.of(FlightStreamManager.StreamProducerHolder.create(streamProducer, allocator))); + Location location = Location.forGrpcInsecure("localhost", 8815); + when(flightClientManager.getFlightClientLocation(LOCAL_NODE_ID)).thenReturn(Optional.of(location)); + when(streamProducer.createRoot(allocator)).thenReturn(mock(VectorSchemaRoot.class)); + when(streamProducer.estimatedRowCount()).thenThrow(new RuntimeException("Schema error")); + + FlightDescriptor descriptor = FlightDescriptor.command(ticket.getBytes()); + FlightRuntimeException exception = expectThrows( + FlightRuntimeException.class, + () -> baseFlightProducer.getFlightInfo(null, descriptor) + ); + + assertTrue(exception.getMessage(), exception.getMessage().contains("Error creating FlightInfo: Schema error")); + } + + public void testGetFlightInfo_NonLocalNode() { + final String remoteNodeId = "remote-node"; + FlightStreamTicket remoteTicket = new FlightStreamTicket("test-id", remoteNodeId); + FlightClient remoteClient = mock(FlightClient.class); + FlightInfo mockFlightInfo = mock(FlightInfo.class); + when(flightClientManager.getFlightClient(remoteNodeId)).thenReturn(Optional.of(remoteClient)); + when(remoteClient.getInfo(any(FlightDescriptor.class))).thenReturn(mockFlightInfo); + + FlightDescriptor descriptor = FlightDescriptor.command(remoteTicket.toBytes()); + FlightInfo flightInfo = baseFlightProducer.getFlightInfo(null, descriptor); + assertEquals(mockFlightInfo, flightInfo); + } + + public void testGetFlightInfo_NonLocalNode_LocationNotFound() { + final String remoteNodeId = "remote-node-2"; + FlightStreamTicket remoteTicket = new FlightStreamTicket("test-id", remoteNodeId); + when(flightClientManager.getFlightClient(remoteNodeId)).thenReturn(Optional.empty()); + FlightDescriptor descriptor = FlightDescriptor.command(remoteTicket.toBytes()); + FlightRuntimeException exception = expectThrows( + FlightRuntimeException.class, + () -> baseFlightProducer.getFlightInfo(null, descriptor) + ); + assertEquals("Client doesn't support Stream", exception.getMessage()); + } + + public void testGetStream_InvalidTicketFormat() throws Exception { + Ticket invalidTicket = new Ticket(new byte[] { 1, 2, 3 }); // Invalid byte array + TestServerStreamListener listener = new TestServerStreamListener(); + + FlightRuntimeException exception = expectThrows( + FlightRuntimeException.class, + () -> baseFlightProducer.getStream(mock(FlightProducer.CallContext.class), invalidTicket, listener) + ); + + assertTrue(exception.getMessage().contains("Invalid ticket format")); + assertNotNull(listener.getError()); + assertTrue(listener.getError().getMessage().contains("Invalid ticket format")); + assertEquals(0, listener.getPutNextCount()); + } + + public void testGetFlightInfo_InvalidDescriptorFormat() { + FlightDescriptor invalidDescriptor = FlightDescriptor.command(new byte[] { 1, 2, 3 }); // Invalid byte array + FlightRuntimeException exception = expectThrows( + FlightRuntimeException.class, + () -> baseFlightProducer.getFlightInfo(mock(FlightProducer.CallContext.class), invalidDescriptor) + ); + + assertTrue(exception.getMessage().contains("Invalid descriptor format")); + } + + public void testGetStream_FailedToCreateStreamProducer_Remote() throws Exception { + final String remoteNodeId = "remote-node"; + FlightStreamTicket remoteTicket = new FlightStreamTicket("test-id", remoteNodeId); + FlightClient remoteClient = mock(FlightClient.class); + + when(flightClientManager.getFlightClient(remoteNodeId)).thenReturn(Optional.of(remoteClient)); + when(remoteClient.getStream(any(Ticket.class))).thenThrow(new RuntimeException("Remote stream error")); + + TestServerStreamListener listener = new TestServerStreamListener(); + FlightRuntimeException exception = expectThrows( + FlightRuntimeException.class, + () -> baseFlightProducer.getStream(mock(FlightProducer.CallContext.class), new Ticket(remoteTicket.toBytes()), listener) + ); + + assertTrue(exception.getMessage().contains("Unable to create proxy stream: Remote stream error")); + assertNotNull(listener.getError()); + assertTrue(listener.getError().getMessage().contains("Unable to create proxy stream: Remote stream error")); + assertEquals(0, listener.getPutNextCount()); + } + + public void testGetStream_RemoteFlightStreamNull() throws Exception { + final String remoteNodeId = "remote-node"; + FlightStreamTicket remoteTicket = new FlightStreamTicket("test-id", remoteNodeId); + FlightClient remoteClient = mock(FlightClient.class); + + when(flightClientManager.getFlightClient(remoteNodeId)).thenReturn(Optional.of(remoteClient)); + when(remoteClient.getStream(any(Ticket.class))).thenReturn(null); // Simulate null FlightStream + + TestServerStreamListener listener = new TestServerStreamListener(); + FlightRuntimeException exception = expectThrows( + FlightRuntimeException.class, + () -> baseFlightProducer.getStream(mock(FlightProducer.CallContext.class), new Ticket(remoteTicket.toBytes()), listener) + ); + + assertEquals("Stream not found", exception.getMessage()); + assertNotNull(listener.getError()); + assertEquals("Stream not found", listener.getError().getMessage()); + assertEquals(0, listener.getPutNextCount()); + } + + public void testGetStream_CreateProxyProducerException() throws Exception { + final String remoteNodeId = "remote-node"; + FlightStreamTicket remoteTicket = new FlightStreamTicket("test-id", remoteNodeId); + FlightClient remoteClient = mock(FlightClient.class); + + when(flightClientManager.getFlightClient(remoteNodeId)).thenReturn(Optional.of(remoteClient)); + when(remoteClient.getStream(any(Ticket.class))).thenThrow(new RuntimeException("Proxy creation error")); + + TestServerStreamListener listener = new TestServerStreamListener(); + FlightRuntimeException exception = expectThrows( + FlightRuntimeException.class, + () -> baseFlightProducer.getStream(mock(FlightProducer.CallContext.class), new Ticket(remoteTicket.toBytes()), listener) + ); + + assertTrue(exception.getMessage().contains("Unable to create proxy stream: Proxy creation error")); + assertNotNull(listener.getError()); + assertTrue(listener.getError().getMessage().contains("Unable to create proxy stream: Proxy creation error")); + assertEquals(0, listener.getPutNextCount()); + } + + public void testGetStream_CancellationException() throws Exception { + FlightProducer.CallContext context = mock(FlightProducer.CallContext.class); + when(context.isCancelled()).thenReturn(true); // Simulate cancellation + + when(streamManager.removeStreamProducer(any(FlightStreamTicket.class))).thenReturn( + Optional.of(FlightStreamManager.StreamProducerHolder.create(streamProducer, allocator)) + ); + when(streamProducer.createJob(any(BufferAllocator.class))).thenReturn(batchedJob); + doThrow(new RuntimeException("Cancellation error")).when(batchedJob).onCancel(); + + TestServerStreamListener listener = new TestServerStreamListener(); + FlightRuntimeException exception = expectThrows( + FlightRuntimeException.class, + () -> baseFlightProducer.getStream(context, ticket, listener) + ); + + assertTrue(exception.getMessage().contains("Error during cancellation: Cancellation error")); + assertNotNull(listener.getError()); + assertTrue(listener.getError().getMessage().contains("Error during cancellation: Cancellation error")); + } + + public void testGetFlightInfo_RemoteFlightInfoException() { + final String remoteNodeId = "remote-node"; + FlightStreamTicket remoteTicket = new FlightStreamTicket("test-id", remoteNodeId); + FlightClient remoteClient = mock(FlightClient.class); + + when(flightClientManager.getFlightClient(remoteNodeId)).thenReturn(Optional.of(remoteClient)); + when(remoteClient.getInfo(any(FlightDescriptor.class))).thenThrow(new RuntimeException("Remote info error")); + + FlightDescriptor descriptor = FlightDescriptor.command(remoteTicket.toBytes()); + FlightRuntimeException exception = expectThrows( + FlightRuntimeException.class, + () -> baseFlightProducer.getFlightInfo(mock(FlightProducer.CallContext.class), descriptor) + ); + + assertTrue(exception.getMessage().contains("Error retrieving remote FlightInfo: Remote info error")); + } +} diff --git a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/impl/FlightStreamManagerTests.java b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/impl/FlightStreamManagerTests.java new file mode 100644 index 0000000000000..f194f9ba0860a --- /dev/null +++ b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/impl/FlightStreamManagerTests.java @@ -0,0 +1,176 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.impl; + +import org.apache.arrow.flight.FlightClient; +import org.apache.arrow.flight.FlightStream; +import org.apache.arrow.flight.Ticket; +import org.apache.arrow.memory.BufferAllocator; +import org.apache.arrow.vector.VectorSchemaRoot; +import org.apache.arrow.vector.types.pojo.Schema; +import org.opensearch.arrow.flight.bootstrap.FlightClientManager; +import org.opensearch.arrow.spi.StreamProducer; +import org.opensearch.arrow.spi.StreamReader; +import org.opensearch.arrow.spi.StreamTicket; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.Collections; +import java.util.Optional; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class FlightStreamManagerTests extends OpenSearchTestCase { + + private FlightClient flightClient; + private FlightStreamManager flightStreamManager; + private static final String NODE_ID = "testNodeId"; + private static final String TICKET_ID = "testTicketId"; + + @Override + public void setUp() throws Exception { + super.setUp(); + flightClient = mock(FlightClient.class); + FlightClientManager clientManager = mock(FlightClientManager.class); + when(clientManager.getLocalNodeId()).thenReturn(NODE_ID); + when(clientManager.getFlightClient(NODE_ID)).thenReturn(Optional.of(flightClient)); + BufferAllocator allocator = mock(BufferAllocator.class); + flightStreamManager = new FlightStreamManager(); + flightStreamManager.setAllocatorSupplier(() -> allocator); + flightStreamManager.setClientManager(clientManager); + } + + public void testGetStreamReader() throws Exception { + StreamTicket ticket = new FlightStreamTicket(TICKET_ID, NODE_ID); + FlightStream mockFlightStream = mock(FlightStream.class); + VectorSchemaRoot mockRoot = mock(VectorSchemaRoot.class); + when(flightClient.getStream(new Ticket(ticket.toBytes()))).thenReturn(mockFlightStream); + when(mockFlightStream.getRoot()).thenReturn(mockRoot); + when(mockRoot.getSchema()).thenReturn(new Schema(Collections.emptyList())); + + StreamReader streamReader = flightStreamManager.getStreamReader(ticket); + + assertNotNull(streamReader); + assertNotNull(streamReader.getRoot()); + assertEquals(new Schema(Collections.emptyList()), streamReader.getRoot().getSchema()); + verify(flightClient).getStream(new Ticket(ticket.toBytes())); + } + + public void testGetVectorSchemaRootWithException() { + StreamTicket ticket = new FlightStreamTicket(TICKET_ID, NODE_ID); + when(flightClient.getStream(new Ticket(ticket.toBytes()))).thenThrow(new RuntimeException("Test exception")); + + expectThrows(RuntimeException.class, () -> flightStreamManager.getStreamReader(ticket)); + } + + public void testRegisterStream() throws IOException { + try (TestStreamProducer producer = new TestStreamProducer()) { + assertNotNull(flightStreamManager.getStreamTicketFactory()); + StreamTicket resultTicket = flightStreamManager.registerStream(producer, null); + assertNotNull(resultTicket); + assertTrue(resultTicket instanceof FlightStreamTicket); + FlightStreamTicket flightTicket = (FlightStreamTicket) resultTicket; + assertEquals(NODE_ID, flightTicket.getNodeId()); + assertNotNull(flightTicket.getTicketId()); + Optional retrievedProducer = flightStreamManager.getStreamProducer(resultTicket); + assertTrue(retrievedProducer.isPresent()); + assertEquals(producer, retrievedProducer.get().producer()); + assertNotNull(retrievedProducer.get().getRoot()); + } + } + + public void testGetStreamProducerNotFound() { + StreamTicket ticket = new FlightStreamTicket("nonexistent", NODE_ID); + assertFalse(flightStreamManager.getStreamProducer(ticket).isPresent()); + StreamTicket ticket2 = new FlightStreamTicket("nonexistent", "unknown"); + try { + flightStreamManager.getStreamReader(ticket2); + fail("RuntimeException expected"); + } catch (RuntimeException e) { + assertEquals("Flight client not found for node [unknown].", e.getMessage()); + } + } + + public void testRemoveStreamProducer() throws IOException { + try (TestStreamProducer producer = new TestStreamProducer()) { + StreamTicket resultTicket = flightStreamManager.registerStream(producer, null); + assertNotNull(resultTicket); + assertTrue(resultTicket instanceof FlightStreamTicket); + FlightStreamTicket flightTicket = (FlightStreamTicket) resultTicket; + assertEquals(NODE_ID, flightTicket.getNodeId()); + assertNotNull(flightTicket.getTicketId()); + + Optional retrievedProducer = flightStreamManager.removeStreamProducer(resultTicket); + assertTrue(retrievedProducer.isPresent()); + assertEquals(producer, retrievedProducer.get().producer()); + assertNotNull(retrievedProducer.get().getRoot()); + assertFalse(flightStreamManager.getStreamProducer(resultTicket).isPresent()); + } + } + + public void testRemoveNonExistentStreamProducer() { + StreamTicket ticket = new FlightStreamTicket("nonexistent", NODE_ID); + Optional removedProducer = flightStreamManager.removeStreamProducer(ticket); + assertFalse(removedProducer.isPresent()); + } + + public void testStreamProducerExpired() { + TestStreamProducer producer = new TestStreamProducer() { + @Override + public TimeValue getJobDeadline() { + return TimeValue.timeValueMillis(0); + } + }; + StreamTicket ticket = flightStreamManager.registerStream(producer, null); + Optional expiredProducer = flightStreamManager.getStreamProducer(ticket); + assertFalse(expiredProducer.isPresent()); + } + + public void testClose() throws Exception { + TestStreamProducer producer = new TestStreamProducer(); + StreamTicket ticket = flightStreamManager.registerStream(producer, null); + flightStreamManager.close(); + assertFalse(flightStreamManager.getStreamProducer(ticket).isPresent()); + } + + static class TestStreamProducer implements StreamProducer { + @Override + public VectorSchemaRoot createRoot(BufferAllocator bufferAllocator) { + return mock(VectorSchemaRoot.class); + } + + @Override + public BatchedJob createJob(BufferAllocator bufferAllocator) { + return null; + } + + @Override + public TimeValue getJobDeadline() { + return TimeValue.timeValueMillis(1000); + } + + @Override + public int estimatedRowCount() { + return 0; + } + + @Override + public String getAction() { + return ""; + } + + @Override + public void close() throws IOException { + + } + } +} diff --git a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/impl/FlightStreamReaderTests.java b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/impl/FlightStreamReaderTests.java new file mode 100644 index 0000000000000..f8bb592662a85 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/impl/FlightStreamReaderTests.java @@ -0,0 +1,86 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.impl; + +import org.apache.arrow.flight.FlightStream; +import org.apache.arrow.memory.BufferAllocator; +import org.apache.arrow.memory.RootAllocator; +import org.apache.arrow.vector.VectorSchemaRoot; +import org.apache.arrow.vector.types.pojo.ArrowType; +import org.apache.arrow.vector.types.pojo.Field; +import org.apache.arrow.vector.types.pojo.FieldType; +import org.apache.arrow.vector.types.pojo.Schema; +import org.opensearch.arrow.flight.bootstrap.ServerConfig; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.List; + +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class FlightStreamReaderTests extends OpenSearchTestCase { + + private FlightStream mockFlightStream; + + private FlightStreamReader iterator; + private VectorSchemaRoot root; + private BufferAllocator allocator; + + @Override + public void setUp() throws Exception { + super.setUp(); + ServerConfig.init(Settings.EMPTY); + mockFlightStream = mock(FlightStream.class); + allocator = new RootAllocator(100000); + Field field = new Field("id", FieldType.nullable(new ArrowType.Int(32, true)), null); + Schema schema = new Schema(List.of(field)); + root = VectorSchemaRoot.create(schema, allocator); + when(mockFlightStream.getRoot()).thenReturn(root); + iterator = new FlightStreamReader(mockFlightStream); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + root.close(); + allocator.close(); + } + + public void testNext_ReturnsTrue_WhenFlightStreamHasNext() throws Exception { + when(mockFlightStream.next()).thenReturn(true); + assertTrue(iterator.next()); + assert(mockFlightStream).next(); + } + + public void testNext_ReturnsFalse_WhenFlightStreamHasNoNext() throws Exception { + when(mockFlightStream.next()).thenReturn(false); + assertFalse(iterator.next()); + verify(mockFlightStream).next(); + } + + public void testGetRoot_ReturnsRootFromFlightStream() throws Exception { + VectorSchemaRoot returnedRoot = iterator.getRoot(); + assertEquals(root, returnedRoot); + verify(mockFlightStream).getRoot(); + } + + public void testClose_CallsCloseOnFlightStream() throws Exception { + iterator.close(); + verify(mockFlightStream).close(); + } + + public void testClose_WrapsExceptionInRuntimeException() throws Exception { + doThrow(new Exception("Test exception")).when(mockFlightStream).close(); + assertThrows(RuntimeException.class, () -> iterator.close()); + verify(mockFlightStream).close(); + } +} diff --git a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/impl/FlightStreamTicketTests.java b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/impl/FlightStreamTicketTests.java new file mode 100644 index 0000000000000..819da2826c173 --- /dev/null +++ b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/impl/FlightStreamTicketTests.java @@ -0,0 +1,111 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.impl; + +import org.opensearch.arrow.spi.StreamTicket; +import org.opensearch.test.OpenSearchTestCase; + +import java.nio.charset.StandardCharsets; +import java.util.Base64; + +public class FlightStreamTicketTests extends OpenSearchTestCase { + + public void testConstructorAndGetters() { + String ticketID = "ticket123"; + String nodeID = "node456"; + StreamTicket ticket = new FlightStreamTicket(ticketID, nodeID); + + assertEquals(ticketID, ticket.getTicketId()); + assertEquals(nodeID, ticket.getNodeId()); + } + + public void testToBytes() { + StreamTicket ticket = new FlightStreamTicket("ticket123", "node456"); + byte[] bytes = ticket.toBytes(); + + assertNotNull(bytes); + assertTrue(bytes.length > 0); + + // Decode the Base64 and check the structure + byte[] decoded = Base64.getDecoder().decode(bytes); + assertEquals(2 + 9 + 2 + 7, decoded.length); // 2 shorts + "ticket123" + "node456" + } + + public void testFromBytes() { + StreamTicket original = new FlightStreamTicket("ticket123", "node456"); + byte[] bytes = original.toBytes(); + + StreamTicket reconstructed = FlightStreamTicket.fromBytes(bytes); + + assertEquals(original.getTicketId(), reconstructed.getTicketId()); + assertEquals(original.getNodeId(), reconstructed.getNodeId()); + } + + public void testToBytesWithLongStrings() { + String longString = randomAlphaOfLength(Short.MAX_VALUE + 1); + StreamTicket ticket = new FlightStreamTicket(longString, "node456"); + + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, ticket::toBytes); + assertEquals("Field lengths exceed the maximum allowed size.", exception.getMessage()); + } + + public void testNullInput() { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> FlightStreamTicket.fromBytes(null)); + assertEquals("Invalid byte array input.", e.getMessage()); + } + + public void testEmptyInput() { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> FlightStreamTicket.fromBytes(new byte[0])); + assertEquals("Invalid byte array input.", e.getMessage()); + } + + public void testMalformedBase64() { + byte[] invalidBase64 = "Invalid Base64!@#$".getBytes(StandardCharsets.UTF_8); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> FlightStreamTicket.fromBytes(invalidBase64)); + assertEquals("Illegal base64 character 20", e.getMessage()); + } + + public void testModifiedLengthFields() { + StreamTicket original = new FlightStreamTicket("ticket123", "node456"); + byte[] bytes = original.toBytes(); + byte[] decoded = Base64.getDecoder().decode(bytes); + + // Modify the length field to be larger than actual data + decoded[0] = (byte) 0xFF; + decoded[1] = (byte) 0xFF; + + byte[] modified = Base64.getEncoder().encode(decoded); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> FlightStreamTicket.fromBytes(modified)); + assertEquals("Invalid ticketID length: -1", e.getMessage()); + } + + public void testEquals() { + StreamTicket ticket1 = new FlightStreamTicket("ticket123", "node456"); + StreamTicket ticket2 = new FlightStreamTicket("ticket123", "node456"); + StreamTicket ticket3 = new FlightStreamTicket("ticket789", "node456"); + + assertEquals(ticket1, ticket2); + assertNotEquals(ticket1, ticket3); + assertNotEquals(null, ticket1); + assertNotEquals("Not a StreamTicket", ticket1); + } + + public void testHashCode() { + StreamTicket ticket1 = new FlightStreamTicket("ticket123", "node456"); + StreamTicket ticket2 = new FlightStreamTicket("ticket123", "node456"); + + assertEquals(ticket1.hashCode(), ticket2.hashCode()); + } + + public void testToString() { + StreamTicket ticket = new FlightStreamTicket("ticket123", "node456"); + String expected = "FlightStreamTicket{ticketID='ticket123', nodeID='node456'}"; + assertEquals(expected, ticket.toString()); + } +} diff --git a/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/impl/ProxyStreamProducerTests.java b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/impl/ProxyStreamProducerTests.java new file mode 100644 index 0000000000000..55905c435365d --- /dev/null +++ b/plugins/arrow-flight-rpc/src/test/java/org/opensearch/arrow/flight/impl/ProxyStreamProducerTests.java @@ -0,0 +1,120 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.arrow.flight.impl; + +import org.apache.arrow.flight.FlightStream; +import org.apache.arrow.memory.BufferAllocator; +import org.apache.arrow.vector.VectorSchemaRoot; +import org.opensearch.arrow.spi.StreamProducer; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.After; + +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class ProxyStreamProducerTests extends OpenSearchTestCase { + + private FlightStream mockRemoteStream; + private BufferAllocator mockAllocator; + private ProxyStreamProducer proxyStreamProducer; + + @Override + public void setUp() throws Exception { + super.setUp(); + mockRemoteStream = mock(FlightStream.class); + mockAllocator = mock(BufferAllocator.class); + proxyStreamProducer = new ProxyStreamProducer(new FlightStreamReader(mockRemoteStream)); + } + + public void testCreateRoot() throws Exception { + VectorSchemaRoot mockRoot = mock(VectorSchemaRoot.class); + when(mockRemoteStream.getRoot()).thenReturn(mockRoot); + + VectorSchemaRoot result = proxyStreamProducer.createRoot(mockAllocator); + + assertEquals(mockRoot, result); + verify(mockRemoteStream).getRoot(); + } + + public void testDefaults() { + VectorSchemaRoot mockRoot = mock(VectorSchemaRoot.class); + when(mockRoot.getRowCount()).thenReturn(100); + when(mockRemoteStream.getRoot()).thenReturn(mockRoot); + assertEquals(100, proxyStreamProducer.estimatedRowCount()); + try { + proxyStreamProducer.getAction(); + fail("Expected UnsupportedOperationException"); + } catch (UnsupportedOperationException e) { + assertEquals("Not implemented yet", e.getMessage()); + } + } + + public void testCreateJob() { + StreamProducer.BatchedJob job = proxyStreamProducer.createJob(mockAllocator); + + assertNotNull(job); + assertTrue(job instanceof ProxyStreamProducer.ProxyBatchedJob); + } + + public void testProxyBatchedJob() throws Exception { + StreamProducer.BatchedJob job = proxyStreamProducer.createJob(mockAllocator); + VectorSchemaRoot mockRoot = mock(VectorSchemaRoot.class); + StreamProducer.FlushSignal mockFlushSignal = mock(StreamProducer.FlushSignal.class); + + when(mockRemoteStream.next()).thenReturn(true, true, false); + + job.run(mockRoot, mockFlushSignal); + + verify(mockRemoteStream, times(3)).next(); + verify(mockFlushSignal, times(2)).awaitConsumption(TimeValue.timeValueMillis(1000)); + } + + public void testProxyBatchedJobWithException() throws Exception { + StreamProducer.BatchedJob job = proxyStreamProducer.createJob(mockAllocator); + VectorSchemaRoot mockRoot = mock(VectorSchemaRoot.class); + StreamProducer.FlushSignal mockFlushSignal = mock(StreamProducer.FlushSignal.class); + + doThrow(new RuntimeException("Test exception")).when(mockRemoteStream).next(); + + try { + job.run(mockRoot, mockFlushSignal); + fail("Expected RuntimeException"); + } catch (RuntimeException e) { + assertEquals("Test exception", e.getMessage()); + } + + verify(mockRemoteStream, times(1)).next(); + } + + public void testProxyBatchedJobOnCancel() throws Exception { + StreamProducer.BatchedJob job = proxyStreamProducer.createJob(mockAllocator); + VectorSchemaRoot mockRoot = mock(VectorSchemaRoot.class); + StreamProducer.FlushSignal mockFlushSignal = mock(StreamProducer.FlushSignal.class); + when(mockRemoteStream.next()).thenReturn(true, true, false); + + // cancel the job + job.onCancel(); + job.run(mockRoot, mockFlushSignal); + verify(mockRemoteStream, times(0)).next(); + verify(mockFlushSignal, times(0)).awaitConsumption(TimeValue.timeValueMillis(1000)); + assertTrue(job.isCancelled()); + } + + @After + public void tearDown() throws Exception { + if (proxyStreamProducer != null) { + proxyStreamProducer.close(); + } + super.tearDown(); + } +} diff --git a/server/build.gradle b/server/build.gradle index d3e55c4d8f784..faf49c88c3505 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -69,7 +69,6 @@ dependencies { api project(":libs:opensearch-geo") api project(":libs:opensearch-telemetry") api project(":libs:opensearch-task-commons") - implementation project(':libs:opensearch-arrow-spi') compileOnly project(":libs:agent-sm:bootstrap") compileOnly project(':libs:opensearch-plugin-classloader') diff --git a/libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/StreamManager.java b/server/src/main/java/org/opensearch/arrow/spi/StreamManager.java similarity index 100% rename from libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/StreamManager.java rename to server/src/main/java/org/opensearch/arrow/spi/StreamManager.java diff --git a/libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/StreamProducer.java b/server/src/main/java/org/opensearch/arrow/spi/StreamProducer.java similarity index 94% rename from libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/StreamProducer.java rename to server/src/main/java/org/opensearch/arrow/spi/StreamProducer.java index 6ca5b8944319b..955ae9ed8913a 100644 --- a/libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/StreamProducer.java +++ b/server/src/main/java/org/opensearch/arrow/spi/StreamProducer.java @@ -9,6 +9,7 @@ package org.opensearch.arrow.spi; import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.tasks.TaskId; import java.io.Closeable; @@ -95,6 +96,14 @@ public interface StreamProducer extends Closeable { */ BatchedJob createJob(Allocator allocator); + /** + * Returns the deadline for the job execution. + * After this deadline, the job should be considered expired. + * + * @return TimeValue representing the job's deadline + */ + TimeValue getJobDeadline(); + /** * Provides an estimate of the total number of rows that will be produced. * @@ -111,6 +120,7 @@ public interface StreamProducer extends Closeable { /** * BatchedJob interface for producing stream data in batches. */ + @ExperimentalApi interface BatchedJob { /** @@ -144,12 +154,13 @@ interface BatchedJob { * Functional interface for managing stream consumption signals. */ @FunctionalInterface + @ExperimentalApi interface FlushSignal { /** * Blocks until the current batch has been consumed or timeout occurs. * * @param timeout Maximum milliseconds to wait */ - void awaitConsumption(int timeout); + void awaitConsumption(TimeValue timeout); } } diff --git a/libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/StreamReader.java b/server/src/main/java/org/opensearch/arrow/spi/StreamReader.java similarity index 100% rename from libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/StreamReader.java rename to server/src/main/java/org/opensearch/arrow/spi/StreamReader.java diff --git a/libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/StreamTicket.java b/server/src/main/java/org/opensearch/arrow/spi/StreamTicket.java similarity index 100% rename from libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/StreamTicket.java rename to server/src/main/java/org/opensearch/arrow/spi/StreamTicket.java diff --git a/libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/StreamTicketFactory.java b/server/src/main/java/org/opensearch/arrow/spi/StreamTicketFactory.java similarity index 100% rename from libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/StreamTicketFactory.java rename to server/src/main/java/org/opensearch/arrow/spi/StreamTicketFactory.java diff --git a/libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/package-info.java b/server/src/main/java/org/opensearch/arrow/spi/package-info.java similarity index 100% rename from libs/arrow-spi/src/main/java/org/opensearch/arrow/spi/package-info.java rename to server/src/main/java/org/opensearch/arrow/spi/package-info.java diff --git a/server/src/main/java/org/opensearch/common/cache/Cache.java b/server/src/main/java/org/opensearch/common/cache/Cache.java index e01a1223955ed..679c402434c15 100644 --- a/server/src/main/java/org/opensearch/common/cache/Cache.java +++ b/server/src/main/java/org/opensearch/common/cache/Cache.java @@ -566,6 +566,19 @@ private void put(K key, V value, long now) { } }; + private final Consumer>> removalConsumer = f -> { + try { + Entry entry = f.get(); + try (ReleasableLock ignored = lruLock.acquire()) { + delete(entry, RemovalReason.EXPLICIT); + } + } catch (ExecutionException e) { + // ok + } catch (InterruptedException e) { + throw new IllegalStateException(e); + } + }; + /** * Invalidate the association for the specified key. A removal notification will be issued for invalidated * entries with {@link RemovalReason} INVALIDATED. @@ -577,6 +590,17 @@ public void invalidate(K key) { segment.remove(key, invalidationConsumer); } + /** + * Removes the association for the specified key. A removal notification will be issued for removed + * entry with {@link RemovalReason} EXPLICIT. + * + * @param key the key whose mapping is to be removed from the cache + */ + public void remove(K key) { + CacheSegment segment = getCacheSegment(key); + segment.remove(key, removalConsumer); + } + /** * Invalidate the entry for the specified key and value. If the value provided is not equal to the value in * the cache, no removal will occur. A removal notification will be issued for invalidated diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index e7bbc3b2d5723..569216073cf2f 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -56,6 +56,7 @@ import org.opensearch.action.search.SearchTransportService; import org.opensearch.action.support.TransportAction; import org.opensearch.action.update.UpdateHelper; +import org.opensearch.arrow.spi.StreamManager; import org.opensearch.bootstrap.BootstrapCheck; import org.opensearch.bootstrap.BootstrapContext; import org.opensearch.cluster.ClusterInfoService; @@ -218,6 +219,7 @@ import org.opensearch.plugins.SearchPipelinePlugin; import org.opensearch.plugins.SearchPlugin; import org.opensearch.plugins.SecureSettingsFactory; +import org.opensearch.plugins.StreamManagerPlugin; import org.opensearch.plugins.SystemIndexPlugin; import org.opensearch.plugins.TaskManagerClientPlugin; import org.opensearch.plugins.TelemetryAwarePlugin; @@ -314,6 +316,7 @@ import java.util.stream.Stream; import static java.util.stream.Collectors.toList; +import static org.opensearch.common.util.FeatureFlags.ARROW_STREAMS_SETTING; import static org.opensearch.common.util.FeatureFlags.BACKGROUND_TASK_EXECUTION_EXPERIMENTAL; import static org.opensearch.common.util.FeatureFlags.TELEMETRY; import static org.opensearch.env.NodeEnvironment.collectFileCacheDataPath; @@ -1386,6 +1389,25 @@ protected Node( cacheService ); + if (FeatureFlags.isEnabled(ARROW_STREAMS_SETTING)) { + final List streamManagerPlugins = pluginsService.filterPlugins(StreamManagerPlugin.class); + + final List streamManagers = streamManagerPlugins.stream() + .map(StreamManagerPlugin::getStreamManager) + .filter(Optional::isPresent) + .map(Optional::get) + .toList(); + + if (streamManagers.size() > 1) { + throw new IllegalStateException( + String.format(Locale.ROOT, "Only one StreamManagerPlugin can be installed. Found: %d", streamManagerPlugins.size()) + ); + } else if (streamManagers.isEmpty() == false) { + StreamManager streamManager = streamManagers.getFirst(); + streamManagerPlugins.forEach(plugin -> plugin.onStreamManagerInitialized(streamManager)); + } + } + final SearchService searchService = newSearchService( clusterService, indicesService, diff --git a/server/src/main/java/org/opensearch/plugins/StreamManagerPlugin.java b/server/src/main/java/org/opensearch/plugins/StreamManagerPlugin.java index 60bdb789b3750..929ec96950f08 100644 --- a/server/src/main/java/org/opensearch/plugins/StreamManagerPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/StreamManagerPlugin.java @@ -10,11 +10,13 @@ import org.opensearch.arrow.spi.StreamManager; -import java.util.function.Supplier; +import java.util.Optional; /** * An interface for OpenSearch plugins to implement to provide a StreamManager. - * Plugins can implement this interface to provide custom StreamManager implementation. + * Plugins can implement this interface to provide custom StreamManager implementation + * or get a reference to the StreamManager instance provided by OpenSearch. + * * @see StreamManager */ public interface StreamManagerPlugin { @@ -23,5 +25,13 @@ public interface StreamManagerPlugin { * * @return The StreamManager instance */ - Supplier getStreamManager(); + default Optional getStreamManager() { + return Optional.empty(); + } + + /** + * Called when the StreamManager is initialized. + * @param streamManager Supplier of the StreamManager instance + */ + default void onStreamManagerInitialized(StreamManager streamManager) {} } From bc50da2133a578d86b26172d77cfd9b341dd76d0 Mon Sep 17 00:00:00 2001 From: ajleong623 <75554284+ajleong623@users.noreply.github.com> Date: Fri, 11 Apr 2025 14:14:04 -0700 Subject: [PATCH 203/550] Cardinality Aggregation dynamic pruning NPE catch (#17775) * Update CardinalityAggregator.java Signed-off-by: ajleong623 <75554284+ajleong623@users.noreply.github.com> * Signed-off-by: Anthony Leong Signed-off-by: ajleong623 <75554284+ajleong623@users.noreply.github.com> * Update CardinalityAggregator.java Signed-off-by: ajleong623 <75554284+ajleong623@users.noreply.github.com> * Checked Cardinality Aggregation dynamic pruning null pointer exception Signed-off-by: Anthony Leong Signed-off-by: ajleong623 <75554284+ajleong623@users.noreply.github.com> --------- Signed-off-by: ajleong623 <75554284+ajleong623@users.noreply.github.com> --- .../search/aggregations/metrics/CardinalityAggregator.java | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregator.java index f95dbf67fe8af..f091b28ba5184 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregator.java @@ -381,7 +381,13 @@ public void collect(int doc, long owningBucketOrd) throws IOException { * Note: the queue may be empty or the queue top may be null after pruning */ private void prune(int doc) { + if (queue.size() == 0) { + return; + } DisiWrapper top = queue.top(); + if (top == null) { + return; + } int curTopDoc = top.doc; if (curTopDoc == doc) { do { From b37fad805109c7ad9e1fb31f1dc5ffdb390abf03 Mon Sep 17 00:00:00 2001 From: Karen X Date: Fri, 11 Apr 2025 17:26:13 -0400 Subject: [PATCH 204/550] [GRPC] Add terms query support in Search GRPC endpoint (#17888) Signed-off-by: Karen Xu Signed-off-by: Karen X --- CHANGELOG.md | 1 + .../query/AbstractQueryBuilderProtoUtils.java | 2 + .../search/query/TermsLookupProtoUtils.java | 50 +++ .../query/TermsQueryBuilderProtoUtils.java | 156 +++++++++ .../AbstractQueryBuilderProtoUtilsTests.java | 129 ++++++++ .../query/TermsLookupProtoUtilsTests.java | 138 ++++++++ .../TermsQueryBuilderProtoUtilsTests.java | 310 ++++++++++++++++++ .../index/query/AbstractQueryBuilder.java | 2 +- .../index/query/TermsQueryBuilder.java | 6 +- 9 files changed, 792 insertions(+), 2 deletions(-) create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/TermsLookupProtoUtils.java create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/TermsQueryBuilderProtoUtils.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/AbstractQueryBuilderProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/TermsLookupProtoUtilsTests.java create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/TermsQueryBuilderProtoUtilsTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index c7e24a16d5be7..9e30e070c9d58 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -36,6 +36,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add update and delete support in pull-based ingestion ([#17822](https://github.com/opensearch-project/OpenSearch/pull/17822)) - Allow maxPollSize and pollTimeout in IngestionSource to be configurable ([#17863](https://github.com/opensearch-project/OpenSearch/pull/17863)) - [Star Tree] [Search] Add query changes to support unsigned-long in star tree ([#17275](https://github.com/opensearch-project/OpenSearch/pull/17275)) +- Add TermsQuery support to Search GRPC endpoint ([#17888](https://github.com/opensearch-project/OpenSearch/pull/17888)) ### Changed - Migrate BC libs to their FIPS counterparts ([#14912](https://github.com/opensearch-project/OpenSearch/pull/14912)) diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/AbstractQueryBuilderProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/AbstractQueryBuilderProtoUtils.java index 92c0985da2a21..a0bfbc70313a1 100644 --- a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/AbstractQueryBuilderProtoUtils.java +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/AbstractQueryBuilderProtoUtils.java @@ -42,6 +42,8 @@ public static QueryBuilder parseInnerQueryBuilderProto(QueryContainer queryConta result = MatchNoneQueryBuilderProtoUtils.fromProto(queryContainer.getMatchNone()); } else if (queryContainer.getTermCount() > 0) { result = TermQueryBuilderProtoUtils.fromProto(queryContainer.getTermMap()); + } else if (queryContainer.hasTerms()) { + result = TermsQueryBuilderProtoUtils.fromProto(queryContainer.getTerms()); } // TODO add more query types else { diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/TermsLookupProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/TermsLookupProtoUtils.java new file mode 100644 index 0000000000000..4d85bae6aaac8 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/TermsLookupProtoUtils.java @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.request.search.query; + +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.indices.TermsLookup; +import org.opensearch.protobufs.TermsLookupField; + +/** + * Utility class for converting TermsLookup Protocol Buffers to OpenSearch objects. + * This class provides methods to transform Protocol Buffer representations of terms lookups + * into their corresponding OpenSearch TermsLookup implementations for search operations. + */ +public class TermsLookupProtoUtils { + + private TermsLookupProtoUtils() { + // Utility class, no instances + } + + /** + * Converts a Protocol Buffer TermsLookupField to an OpenSearch TermsLookup object. + * Similar to {@link TermsLookup#parseTermsLookup(XContentParser)} + * + * @param termsLookupFieldProto The Protocol Buffer TermsLookupField object containing index, id, path, and optional routing/store values + * @return A configured TermsLookup instance with the appropriate settings + */ + protected static TermsLookup parseTermsLookup(TermsLookupField termsLookupFieldProto) { + + String index = termsLookupFieldProto.getIndex(); + String id = termsLookupFieldProto.getId(); + String path = termsLookupFieldProto.getPath(); + + TermsLookup termsLookup = new TermsLookup(index, id, path); + + if (termsLookupFieldProto.hasRouting()) { + termsLookup.routing(termsLookupFieldProto.getRouting()); + } + + if (termsLookupFieldProto.hasStore()) { + termsLookup.store(termsLookupFieldProto.getStore()); + } + + return termsLookup; + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/TermsQueryBuilderProtoUtils.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/TermsQueryBuilderProtoUtils.java new file mode 100644 index 0000000000000..34bea81af9699 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/TermsQueryBuilderProtoUtils.java @@ -0,0 +1,156 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.plugin.transport.grpc.proto.request.search.query; + +import com.google.protobuf.ProtocolStringList; +import org.apache.lucene.util.BytesRef; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.index.query.AbstractQueryBuilder; +import org.opensearch.index.query.TermsQueryBuilder; +import org.opensearch.indices.TermsLookup; +import org.opensearch.protobufs.TermsLookupField; +import org.opensearch.protobufs.TermsLookupFieldStringArrayMap; +import org.opensearch.protobufs.TermsQueryField; +import org.opensearch.protobufs.ValueType; + +import java.util.ArrayList; +import java.util.Base64; +import java.util.List; +import java.util.Map; + +import static org.opensearch.index.query.AbstractQueryBuilder.maybeConvertToBytesRef; + +/** + * Utility class for converting TermQuery Protocol Buffers to OpenSearch objects. + * This class provides methods to transform Protocol Buffer representations of term queries + * into their corresponding OpenSearch TermQueryBuilder implementations for search operations. + */ +public class TermsQueryBuilderProtoUtils { + + private TermsQueryBuilderProtoUtils() { + // Utility class, no instances + } + + /** + * Converts a Protocol Buffer TermQuery map to an OpenSearch TermQueryBuilder. + * Similar to {@link TermsQueryBuilder#fromXContent(XContentParser)}, this method + * parses the Protocol Buffer representation and creates a properly configured + * TermQueryBuilder with the appropriate field name, value, boost, query name, + * and case sensitivity settings. + * + * @param termsQueryProto The map of field names to Protocol Buffer TermsQuery objects + * @return A configured TermQueryBuilder instance + * @throws IllegalArgumentException if the term query map has more than one element, + * if the field value type is not supported, or if the term query field value is not recognized + */ + protected static TermsQueryBuilder fromProto(TermsQueryField termsQueryProto) { + + String fieldName = null; + List values = null; + TermsLookup termsLookup = null; + + String queryName = null; + float boost = AbstractQueryBuilder.DEFAULT_BOOST; + String valueTypeStr = TermsQueryBuilder.ValueType.DEFAULT.name(); + + if (termsQueryProto.hasBoost()) { + boost = termsQueryProto.getBoost(); + } + + if (termsQueryProto.hasName()) { + queryName = termsQueryProto.getName(); + } + + // TODO: remove this parameter when backporting to under OS 2.17 + if (termsQueryProto.hasValueType()) { + valueTypeStr = parseValueType(termsQueryProto.getValueType()).name(); + } + + if (termsQueryProto.getTermsLookupFieldStringArrayMapMap().size() > 1) { + throw new IllegalArgumentException("[" + TermsQueryBuilder.NAME + "] query does not support more than one field. "); + } + + for (Map.Entry entry : termsQueryProto.getTermsLookupFieldStringArrayMapMap().entrySet()) { + fieldName = entry.getKey(); + TermsLookupFieldStringArrayMap termsLookupFieldStringArrayMap = entry.getValue(); + + if (termsLookupFieldStringArrayMap.hasTermsLookupField()) { + TermsLookupField termsLookupField = termsLookupFieldStringArrayMap.getTermsLookupField(); + termsLookup = TermsLookupProtoUtils.parseTermsLookup(termsLookupField); + } else if (termsLookupFieldStringArrayMap.hasStringArray()) { + values = parseValues(termsLookupFieldStringArrayMap.getStringArray().getStringArrayList()); + } else { + throw new IllegalArgumentException("termsLookupField and stringArray fields cannot both be null"); + } + } + + TermsQueryBuilder.ValueType valueType = TermsQueryBuilder.ValueType.fromString(valueTypeStr); + + if (valueType == TermsQueryBuilder.ValueType.BITMAP) { + if (values != null && values.size() == 1 && values.get(0) instanceof BytesRef) { + values.set(0, new BytesArray(Base64.getDecoder().decode(((BytesRef) values.get(0)).utf8ToString()))); + } else if (termsLookup == null) { + throw new IllegalArgumentException( + "Invalid value for bitmap type: Expected a single-element array with a base64 encoded serialized bitmap." + ); + } + } + + TermsQueryBuilder termsQueryBuilder; + if (values == null) { + termsQueryBuilder = new TermsQueryBuilder(fieldName, termsLookup); + } else if (termsLookup == null) { + termsQueryBuilder = new TermsQueryBuilder(fieldName, values); + } else { + throw new IllegalArgumentException("values and termsLookup cannot both be null"); + } + + return termsQueryBuilder.boost(boost).queryName(queryName).valueType(valueType); + } + + /** + * Parses a protobuf ScriptLanguage to a String representation + * + * See {@link org.opensearch.index.query.TermsQueryBuilder.ValueType#fromString(String)} } + * * + * @param valueType the Protocol Buffer ValueType to convert + * @return the string representation of the script language + * @throws UnsupportedOperationException if no language was specified + */ + public static TermsQueryBuilder.ValueType parseValueType(ValueType valueType) { + switch (valueType) { + case VALUE_TYPE_BITMAP: + return TermsQueryBuilder.ValueType.BITMAP; + case VALUE_TYPE_DEFAULT: + return TermsQueryBuilder.ValueType.DEFAULT; + case VALUE_TYPE_UNSPECIFIED: + default: + return TermsQueryBuilder.ValueType.DEFAULT; + } + } + + /** + * Similar to {@link TermsQueryBuilder#parseValues(XContentParser)} + * @param termsLookupFieldStringArray + * @return + * @throws IllegalArgumentException + */ + static List parseValues(ProtocolStringList termsLookupFieldStringArray) throws IllegalArgumentException { + List values = new ArrayList<>(); + + for (Object value : termsLookupFieldStringArray) { + Object convertedValue = maybeConvertToBytesRef(value); + if (value == null) { + throw new IllegalArgumentException("No value specified for terms query"); + } + values.add(convertedValue); + } + return values; + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/AbstractQueryBuilderProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/AbstractQueryBuilderProtoUtilsTests.java new file mode 100644 index 0000000000000..0419f3f4976bb --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/AbstractQueryBuilderProtoUtilsTests.java @@ -0,0 +1,129 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.search.query; + +import org.opensearch.index.query.MatchAllQueryBuilder; +import org.opensearch.index.query.MatchNoneQueryBuilder; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.index.query.TermsQueryBuilder; +import org.opensearch.protobufs.FieldValue; +import org.opensearch.protobufs.MatchAllQuery; +import org.opensearch.protobufs.MatchNoneQuery; +import org.opensearch.protobufs.QueryContainer; +import org.opensearch.protobufs.StringArray; +import org.opensearch.protobufs.TermQuery; +import org.opensearch.protobufs.TermsLookupFieldStringArrayMap; +import org.opensearch.protobufs.TermsQueryField; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.HashMap; +import java.util.Map; + +public class AbstractQueryBuilderProtoUtilsTests extends OpenSearchTestCase { + + public void testParseInnerQueryBuilderProtoWithMatchAll() { + // Create a QueryContainer with MatchAllQuery + MatchAllQuery matchAllQuery = MatchAllQuery.newBuilder().build(); + QueryContainer queryContainer = QueryContainer.newBuilder().setMatchAll(matchAllQuery).build(); + + // Call parseInnerQueryBuilderProto + QueryBuilder queryBuilder = AbstractQueryBuilderProtoUtils.parseInnerQueryBuilderProto(queryContainer); + + // Verify the result + assertNotNull("QueryBuilder should not be null", queryBuilder); + assertTrue("QueryBuilder should be a MatchAllQueryBuilder", queryBuilder instanceof MatchAllQueryBuilder); + } + + public void testParseInnerQueryBuilderProtoWithMatchNone() { + // Create a QueryContainer with MatchNoneQuery + MatchNoneQuery matchNoneQuery = MatchNoneQuery.newBuilder().build(); + QueryContainer queryContainer = QueryContainer.newBuilder().setMatchNone(matchNoneQuery).build(); + + // Call parseInnerQueryBuilderProto + QueryBuilder queryBuilder = AbstractQueryBuilderProtoUtils.parseInnerQueryBuilderProto(queryContainer); + + // Verify the result + assertNotNull("QueryBuilder should not be null", queryBuilder); + assertTrue("QueryBuilder should be a MatchNoneQueryBuilder", queryBuilder instanceof MatchNoneQueryBuilder); + } + + public void testParseInnerQueryBuilderProtoWithTerm() { + // Create a QueryContainer with Term query + Map termMap = new HashMap<>(); + + // Create a FieldValue for the term value + FieldValue fieldValue = FieldValue.newBuilder().setStringValue("test-value").build(); + + // Create a TermQuery with the FieldValue + TermQuery termQuery = TermQuery.newBuilder().setValue(fieldValue).build(); + + termMap.put("test-field", termQuery); + + QueryContainer queryContainer = QueryContainer.newBuilder().putAllTerm(termMap).build(); + + // Call parseInnerQueryBuilderProto + QueryBuilder queryBuilder = AbstractQueryBuilderProtoUtils.parseInnerQueryBuilderProto(queryContainer); + + // Verify the result + assertNotNull("QueryBuilder should not be null", queryBuilder); + assertTrue("QueryBuilder should be a TermQueryBuilder", queryBuilder instanceof TermQueryBuilder); + TermQueryBuilder termQueryBuilder = (TermQueryBuilder) queryBuilder; + assertEquals("Field name should match", "test-field", termQueryBuilder.fieldName()); + assertEquals("Value should match", "test-value", termQueryBuilder.value()); + } + + public void testParseInnerQueryBuilderProtoWithTerms() { + // Create a StringArray for terms values + StringArray stringArray = StringArray.newBuilder().addStringArray("value1").addStringArray("value2").build(); + + // Create a TermsLookupFieldStringArrayMap + TermsLookupFieldStringArrayMap termsLookupFieldStringArrayMap = TermsLookupFieldStringArrayMap.newBuilder() + .setStringArray(stringArray) + .build(); + + // Create a map for TermsLookupFieldStringArrayMap + Map termsLookupFieldStringArrayMapMap = new HashMap<>(); + termsLookupFieldStringArrayMapMap.put("test-field", termsLookupFieldStringArrayMap); + + // Create a TermsQueryField + TermsQueryField termsQueryField = TermsQueryField.newBuilder() + .putAllTermsLookupFieldStringArrayMap(termsLookupFieldStringArrayMapMap) + .build(); + + // Create a QueryContainer with Terms query + QueryContainer queryContainer = QueryContainer.newBuilder().setTerms(termsQueryField).build(); + + // Call parseInnerQueryBuilderProto + QueryBuilder queryBuilder = AbstractQueryBuilderProtoUtils.parseInnerQueryBuilderProto(queryContainer); + + // Verify the result + assertNotNull("QueryBuilder should not be null", queryBuilder); + assertTrue("QueryBuilder should be a TermsQueryBuilder", queryBuilder instanceof TermsQueryBuilder); + TermsQueryBuilder termsQueryBuilder = (TermsQueryBuilder) queryBuilder; + assertEquals("Field name should match", "test-field", termsQueryBuilder.fieldName()); + assertEquals("Values size should match", 2, termsQueryBuilder.values().size()); + assertEquals("First value should match", "value1", termsQueryBuilder.values().get(0)); + assertEquals("Second value should match", "value2", termsQueryBuilder.values().get(1)); + } + + public void testParseInnerQueryBuilderProtoWithUnsupportedQueryType() { + // Create an empty QueryContainer (no query type specified) + QueryContainer queryContainer = QueryContainer.newBuilder().build(); + + // Call parseInnerQueryBuilderProto, should throw UnsupportedOperationException + UnsupportedOperationException exception = expectThrows( + UnsupportedOperationException.class, + () -> AbstractQueryBuilderProtoUtils.parseInnerQueryBuilderProto(queryContainer) + ); + + // Verify the exception message + assertTrue("Exception message should mention 'not supported yet'", exception.getMessage().contains("not supported yet")); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/TermsLookupProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/TermsLookupProtoUtilsTests.java new file mode 100644 index 0000000000000..9475897452962 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/TermsLookupProtoUtilsTests.java @@ -0,0 +1,138 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.search.query; + +import org.opensearch.indices.TermsLookup; +import org.opensearch.protobufs.TermsLookupField; +import org.opensearch.test.OpenSearchTestCase; + +public class TermsLookupProtoUtilsTests extends OpenSearchTestCase { + + public void testParseTermsLookupWithBasicFields() { + // Create a TermsLookupField instance with basic fields + TermsLookupField termsLookupField = TermsLookupField.newBuilder() + .setIndex("test_index") + .setId("test_id") + .setPath("test_path") + .build(); + + // Call the method under test + TermsLookup termsLookup = TermsLookupProtoUtils.parseTermsLookup(termsLookupField); + + // Verify the result + assertNotNull("TermsLookup should not be null", termsLookup); + assertEquals("Index should match", "test_index", termsLookup.index()); + assertEquals("ID should match", "test_id", termsLookup.id()); + assertEquals("Path should match", "test_path", termsLookup.path()); + assertNull("Routing should be null", termsLookup.routing()); + assertFalse("Store should be false by default", termsLookup.store()); + } + + public void testParseTermsLookupWithStore() { + // Create a TermsLookupField instance with store field + TermsLookupField termsLookupField = TermsLookupField.newBuilder() + .setIndex("test_index") + .setId("test_id") + .setPath("test_path") + .setStore(true) + .build(); + + // Call the method under test + TermsLookup termsLookup = TermsLookupProtoUtils.parseTermsLookup(termsLookupField); + + // Verify the result + assertNotNull("TermsLookup should not be null", termsLookup); + assertEquals("Index should match", "test_index", termsLookup.index()); + assertEquals("ID should match", "test_id", termsLookup.id()); + assertEquals("Path should match", "test_path", termsLookup.path()); + assertNull("Routing should be null", termsLookup.routing()); + assertTrue("Store should be true", termsLookup.store()); + } + + public void testParseTermsLookupWithNullInput() { + // Call the method under test with null input, should throw NullPointerException + NullPointerException exception = expectThrows(NullPointerException.class, () -> TermsLookupProtoUtils.parseTermsLookup(null)); + } + + // This test verifies the bug fix for using index instead of id + public void testParseTermsLookupWithDifferentIndexAndId() { + // Create a TermsLookupField instance with different index and id values + TermsLookupField termsLookupField = TermsLookupField.newBuilder() + .setIndex("test_index") + .setId("test_id") + .setPath("test_path") + .build(); + + // Call the method under test + TermsLookup termsLookup = TermsLookupProtoUtils.parseTermsLookup(termsLookupField); + + // Verify the result + assertNotNull("TermsLookup should not be null", termsLookup); + assertEquals("Index should match", "test_index", termsLookup.index()); + assertEquals("ID should match", "test_id", termsLookup.id()); + assertEquals("Path should match", "test_path", termsLookup.path()); + } + + public void testParseTermsLookupWithEmptyFields() { + // Create a TermsLookupField instance with empty fields + TermsLookupField termsLookupField = TermsLookupField.newBuilder().setIndex("").setId("").setPath("").build(); + + // Call the method under test + TermsLookup termsLookup = TermsLookupProtoUtils.parseTermsLookup(termsLookupField); + + // Verify the result + assertNotNull("TermsLookup should not be null", termsLookup); + assertEquals("Index should be empty", "", termsLookup.index()); + assertEquals("ID should be empty", "", termsLookup.id()); + assertEquals("Path should be empty", "", termsLookup.path()); + } + + public void testParseTermsLookupWithRouting() { + // Create a TermsLookupField instance with routing field + TermsLookupField termsLookupField = TermsLookupField.newBuilder() + .setIndex("test_index") + .setId("test_id") + .setPath("test_path") + .setRouting("test_routing") + .build(); + + // Call the method under test + TermsLookup termsLookup = TermsLookupProtoUtils.parseTermsLookup(termsLookupField); + + // Verify the result + assertNotNull("TermsLookup should not be null", termsLookup); + assertEquals("Index should match", "test_index", termsLookup.index()); + assertEquals("ID should match", "test_id", termsLookup.id()); + assertEquals("Path should match", "test_path", termsLookup.path()); + assertEquals("Routing should match", "test_routing", termsLookup.routing()); + assertFalse("Store should be false by default", termsLookup.store()); + } + + public void testParseTermsLookupWithRoutingAndStore() { + // Create a TermsLookupField instance with both routing and store fields + TermsLookupField termsLookupField = TermsLookupField.newBuilder() + .setIndex("test_index") + .setId("test_id") + .setPath("test_path") + .setRouting("test_routing") + .setStore(true) + .build(); + + // Call the method under test + TermsLookup termsLookup = TermsLookupProtoUtils.parseTermsLookup(termsLookupField); + + // Verify the result + assertNotNull("TermsLookup should not be null", termsLookup); + assertEquals("Index should match", "test_index", termsLookup.index()); + assertEquals("ID should match", "test_id", termsLookup.id()); + assertEquals("Path should match", "test_path", termsLookup.path()); + assertEquals("Routing should match", "test_routing", termsLookup.routing()); + assertTrue("Store should be true", termsLookup.store()); + } +} diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/TermsQueryBuilderProtoUtilsTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/TermsQueryBuilderProtoUtilsTests.java new file mode 100644 index 0000000000000..e117d24a62188 --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/proto/request/search/query/TermsQueryBuilderProtoUtilsTests.java @@ -0,0 +1,310 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.transport.grpc.proto.request.search.query; + +import org.opensearch.index.query.TermsQueryBuilder; +import org.opensearch.indices.TermsLookup; +import org.opensearch.protobufs.StringArray; +import org.opensearch.protobufs.TermsLookupField; +import org.opensearch.protobufs.TermsLookupFieldStringArrayMap; +import org.opensearch.protobufs.TermsQueryField; +import org.opensearch.protobufs.ValueType; +import org.opensearch.test.OpenSearchTestCase; + +import java.nio.charset.StandardCharsets; +import java.util.Base64; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class TermsQueryBuilderProtoUtilsTests extends OpenSearchTestCase { + + public void testFromProtoWithStringValues() { + // Create a StringArray + StringArray stringArray = StringArray.newBuilder() + .addStringArray("value1") + .addStringArray("value2") + .addStringArray("value3") + .build(); + + // Create a TermsLookupFieldStringArrayMap + TermsLookupFieldStringArrayMap termsLookupFieldStringArrayMap = TermsLookupFieldStringArrayMap.newBuilder() + .setStringArray(stringArray) + .build(); + + // Create a map for TermsLookupFieldStringArrayMap + Map termsLookupFieldStringArrayMapMap = new HashMap<>(); + termsLookupFieldStringArrayMapMap.put("test_field", termsLookupFieldStringArrayMap); + + // Create a TermsQueryField + TermsQueryField termsQueryField = TermsQueryField.newBuilder() + .putAllTermsLookupFieldStringArrayMap(termsLookupFieldStringArrayMapMap) + .setBoost(2.0f) + .setName("test_query") + .build(); + + // Call the method under test + TermsQueryBuilder termsQueryBuilder = TermsQueryBuilderProtoUtils.fromProto(termsQueryField); + + // Verify the result + assertNotNull("TermsQueryBuilder should not be null", termsQueryBuilder); + assertEquals("Field name should match", "test_field", termsQueryBuilder.fieldName()); + List values = termsQueryBuilder.values(); + assertNotNull("Values should not be null", values); + assertEquals("Values size should match", 3, values.size()); + assertEquals("First value should match", "value1", values.get(0)); + assertEquals("Second value should match", "value2", values.get(1)); + assertEquals("Third value should match", "value3", values.get(2)); + assertEquals("Boost should match", 2.0f, termsQueryBuilder.boost(), 0.0f); + assertEquals("Query name should match", "test_query", termsQueryBuilder.queryName()); + } + + public void testFromProtoWithTermsLookup() { + // Create a TermsLookupField + TermsLookupField termsLookupField = TermsLookupField.newBuilder() + .setIndex("test_index") + .setId("test_id") + .setPath("test_path") + .build(); + + // Create a TermsLookupFieldStringArrayMap + TermsLookupFieldStringArrayMap termsLookupFieldStringArrayMap = TermsLookupFieldStringArrayMap.newBuilder() + .setTermsLookupField(termsLookupField) + .build(); + + // Create a map for TermsLookupFieldStringArrayMap + Map termsLookupFieldStringArrayMapMap = new HashMap<>(); + termsLookupFieldStringArrayMapMap.put("test_field", termsLookupFieldStringArrayMap); + + // Create a TermsQueryField + TermsQueryField termsQueryField = TermsQueryField.newBuilder() + .putAllTermsLookupFieldStringArrayMap(termsLookupFieldStringArrayMapMap) + .setBoost(2.0f) + .setName("test_query") + .build(); + + // Call the method under test + TermsQueryBuilder termsQueryBuilder = TermsQueryBuilderProtoUtils.fromProto(termsQueryField); + + // Verify the result + assertNotNull("TermsQueryBuilder should not be null", termsQueryBuilder); + assertEquals("Field name should match", "test_field", termsQueryBuilder.fieldName()); + // assertNull("Values should be null", termsQueryBuilder.values()); + + TermsLookup termsLookup = termsQueryBuilder.termsLookup(); + assertNotNull("TermsLookup should not be null", termsLookup); + assertEquals("TermsLookup index should match", "test_index", termsLookup.index()); + assertEquals("TermsLookup id should match", "test_id", termsLookup.id()); + assertEquals("TermsLookup path should match", "test_path", termsLookup.path()); + assertEquals("Boost should match", 2.0f, termsQueryBuilder.boost(), 0.0f); + assertEquals("Query name should match", "test_query", termsQueryBuilder.queryName()); + } + + public void testFromProtoWithDefaultValues() { + // Create a StringArray + StringArray stringArray = StringArray.newBuilder().addStringArray("value1").build(); + + // Create a TermsLookupFieldStringArrayMap + TermsLookupFieldStringArrayMap termsLookupFieldStringArrayMap = TermsLookupFieldStringArrayMap.newBuilder() + .setStringArray(stringArray) + .build(); + + // Create a map for TermsLookupFieldStringArrayMap + Map termsLookupFieldStringArrayMapMap = new HashMap<>(); + termsLookupFieldStringArrayMapMap.put("test_field", termsLookupFieldStringArrayMap); + + // Create a TermsQueryField with minimal values + TermsQueryField termsQueryField = TermsQueryField.newBuilder() + .putAllTermsLookupFieldStringArrayMap(termsLookupFieldStringArrayMapMap) + .build(); + + // Call the method under test + TermsQueryBuilder termsQueryBuilder = TermsQueryBuilderProtoUtils.fromProto(termsQueryField); + + // Verify the result + assertNotNull("TermsQueryBuilder should not be null", termsQueryBuilder); + assertEquals("Field name should match", "test_field", termsQueryBuilder.fieldName()); + List values = termsQueryBuilder.values(); + assertNotNull("Values should not be null", values); + assertEquals("Values size should match", 1, values.size()); + assertEquals("First value should match", "value1", values.get(0)); + assertEquals("Boost should be default", 1.0f, termsQueryBuilder.boost(), 0.0f); + assertNull("Query name should be null", termsQueryBuilder.queryName()); + } + + public void testFromProtoWithTooManyFields() { + // Create a TermsLookupFieldStringArrayMap + TermsLookupFieldStringArrayMap termsLookupFieldStringArrayMap = TermsLookupFieldStringArrayMap.newBuilder().build(); + + // Create a map for TermsLookupFieldStringArrayMap with too many entries + Map termsLookupFieldStringArrayMapMap = new HashMap<>(); + termsLookupFieldStringArrayMapMap.put("field1", termsLookupFieldStringArrayMap); + termsLookupFieldStringArrayMapMap.put("field2", termsLookupFieldStringArrayMap); + + // Create a TermsQueryField + TermsQueryField termsQueryField = TermsQueryField.newBuilder() + .putAllTermsLookupFieldStringArrayMap(termsLookupFieldStringArrayMapMap) + .build(); + + // Call the method under test, should throw IllegalArgumentException + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> TermsQueryBuilderProtoUtils.fromProto(termsQueryField) + ); + + assertTrue( + "Exception message should mention not supporting more than one field", + exception.getMessage().contains("does not support more than one field") + ); + } + + public void testFromProtoWithNullInput() { + // Call the method under test with null input, should throw NullPointerException + NullPointerException exception = expectThrows(NullPointerException.class, () -> TermsQueryBuilderProtoUtils.fromProto(null)); + } + + public void testFromProtoWithValueTypeBitmap() { + // Create a base64 encoded bitmap + String base64Bitmap = Base64.getEncoder().encodeToString("test_bitmap".getBytes(StandardCharsets.UTF_8)); + + // Create a StringArray + StringArray stringArray = StringArray.newBuilder().addStringArray(base64Bitmap).build(); + + // Create a TermsLookupFieldStringArrayMap + TermsLookupFieldStringArrayMap termsLookupFieldStringArrayMap = TermsLookupFieldStringArrayMap.newBuilder() + .setStringArray(stringArray) + .build(); + + // Create a map for TermsLookupFieldStringArrayMap + Map termsLookupFieldStringArrayMapMap = new HashMap<>(); + termsLookupFieldStringArrayMapMap.put("test_field", termsLookupFieldStringArrayMap); + + // Create a TermsQueryField + TermsQueryField termsQueryField = TermsQueryField.newBuilder() + .putAllTermsLookupFieldStringArrayMap(termsLookupFieldStringArrayMapMap) + .setValueType(ValueType.VALUE_TYPE_BITMAP) + .build(); + + // Call the method under test + TermsQueryBuilder termsQueryBuilder = TermsQueryBuilderProtoUtils.fromProto(termsQueryField); + + // Verify the result + assertNotNull("TermsQueryBuilder should not be null", termsQueryBuilder); + assertEquals("Field name should match", "test_field", termsQueryBuilder.fieldName()); + } + + public void testFromProtoWithValueTypeDefault() { + // Create a StringArray + StringArray stringArray = StringArray.newBuilder().addStringArray("value1").build(); + + // Create a TermsLookupFieldStringArrayMap + TermsLookupFieldStringArrayMap termsLookupFieldStringArrayMap = TermsLookupFieldStringArrayMap.newBuilder() + .setStringArray(stringArray) + .build(); + + // Create a map for TermsLookupFieldStringArrayMap + Map termsLookupFieldStringArrayMapMap = new HashMap<>(); + termsLookupFieldStringArrayMapMap.put("test_field", termsLookupFieldStringArrayMap); + + // Create a TermsQueryField + TermsQueryField termsQueryField = TermsQueryField.newBuilder() + .putAllTermsLookupFieldStringArrayMap(termsLookupFieldStringArrayMapMap) + .setValueType(ValueType.VALUE_TYPE_DEFAULT) + .build(); + + // Call the method under test + TermsQueryBuilder termsQueryBuilder = TermsQueryBuilderProtoUtils.fromProto(termsQueryField); + + // Verify the result + assertNotNull("TermsQueryBuilder should not be null", termsQueryBuilder); + assertEquals("Field name should match", "test_field", termsQueryBuilder.fieldName()); + } + + public void testFromProtoWithValueTypeUnspecified() { + // Create a StringArray + StringArray stringArray = StringArray.newBuilder().addStringArray("value1").build(); + + // Create a TermsLookupFieldStringArrayMap + TermsLookupFieldStringArrayMap termsLookupFieldStringArrayMap = TermsLookupFieldStringArrayMap.newBuilder() + .setStringArray(stringArray) + .build(); + + // Create a map for TermsLookupFieldStringArrayMap + Map termsLookupFieldStringArrayMapMap = new HashMap<>(); + termsLookupFieldStringArrayMapMap.put("test_field", termsLookupFieldStringArrayMap); + + // Create a TermsQueryField + TermsQueryField termsQueryField = TermsQueryField.newBuilder() + .putAllTermsLookupFieldStringArrayMap(termsLookupFieldStringArrayMapMap) + .setValueType(ValueType.VALUE_TYPE_UNSPECIFIED) + .build(); + + // Call the method under test + TermsQueryBuilder termsQueryBuilder = TermsQueryBuilderProtoUtils.fromProto(termsQueryField); + + // Verify the result + assertNotNull("TermsQueryBuilder should not be null", termsQueryBuilder); + assertEquals("Field name should match", "test_field", termsQueryBuilder.fieldName()); + } + + public void testParseValueTypeWithBitmap() { + // Call the method under test + TermsQueryBuilder.ValueType valueType = TermsQueryBuilderProtoUtils.parseValueType(ValueType.VALUE_TYPE_BITMAP); + + // Verify the result + assertEquals("Value type should be BITMAP", TermsQueryBuilder.ValueType.BITMAP, valueType); + } + + public void testParseValueTypeWithDefault() { + // Call the method under test + TermsQueryBuilder.ValueType valueType = TermsQueryBuilderProtoUtils.parseValueType(ValueType.VALUE_TYPE_DEFAULT); + + // Verify the result + assertEquals("Value type should be DEFAULT", TermsQueryBuilder.ValueType.DEFAULT, valueType); + } + + public void testParseValueTypeWithUnspecified() { + // Call the method under test + TermsQueryBuilder.ValueType valueType = TermsQueryBuilderProtoUtils.parseValueType(ValueType.VALUE_TYPE_UNSPECIFIED); + + // Verify the result + assertEquals("Value type should be DEFAULT for UNSPECIFIED", TermsQueryBuilder.ValueType.DEFAULT, valueType); + } + + public void testFromProtoWithInvalidBitmapValue() { + // Create a StringArray with multiple values for bitmap type + StringArray stringArray = StringArray.newBuilder().addStringArray("value1").addStringArray("value2").build(); + + // Create a TermsLookupFieldStringArrayMap + TermsLookupFieldStringArrayMap termsLookupFieldStringArrayMap = TermsLookupFieldStringArrayMap.newBuilder() + .setStringArray(stringArray) + .build(); + + // Create a map for TermsLookupFieldStringArrayMap + Map termsLookupFieldStringArrayMapMap = new HashMap<>(); + termsLookupFieldStringArrayMapMap.put("test_field", termsLookupFieldStringArrayMap); + + // Create a TermsQueryField + TermsQueryField termsQueryField = TermsQueryField.newBuilder() + .putAllTermsLookupFieldStringArrayMap(termsLookupFieldStringArrayMapMap) + .setValueType(ValueType.VALUE_TYPE_BITMAP) + .build(); + + // Call the method under test, should throw IllegalArgumentException + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> TermsQueryBuilderProtoUtils.fromProto(termsQueryField) + ); + + assertTrue( + "Exception message should mention invalid value for bitmap type", + exception.getMessage().contains("Invalid value for bitmap type") + ); + } +} diff --git a/server/src/main/java/org/opensearch/index/query/AbstractQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/AbstractQueryBuilder.java index cd133798faa6d..f3dbb3c7ad65c 100644 --- a/server/src/main/java/org/opensearch/index/query/AbstractQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/AbstractQueryBuilder.java @@ -235,7 +235,7 @@ public final int hashCode() { * @param obj the input object * @return the same input object or a {@link BytesRef} representation if input was of type string */ - static Object maybeConvertToBytesRef(Object obj) { + public static Object maybeConvertToBytesRef(Object obj) { if (obj instanceof String) { return BytesRefs.toBytesRef(obj); } else if (obj instanceof CharBuffer) { diff --git a/server/src/main/java/org/opensearch/index/query/TermsQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/TermsQueryBuilder.java index 3f9c912434dfc..774755dc97946 100644 --- a/server/src/main/java/org/opensearch/index/query/TermsQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/TermsQueryBuilder.java @@ -105,7 +105,7 @@ public enum ValueType { this.type = type; } - static ValueType fromString(String type) { + public static ValueType fromString(String type) { for (ValueType valueType : ValueType.values()) { if (valueType.type.equalsIgnoreCase(type)) { return valueType; @@ -274,6 +274,10 @@ public String fieldName() { return this.fieldName; } + public ValueType valueType() { + return this.valueType; + } + public List values() { return convertBack(this.values); } From 5a0d5c5abc7df4417a89c5a336864d593f31bf12 Mon Sep 17 00:00:00 2001 From: Prudhvi Godithi Date: Fri, 11 Apr 2025 15:12:41 -0700 Subject: [PATCH 205/550] Custom Gradle plugin to leverage java agent (#17900) Signed-off-by: Prudhvi Godithi --- .../opensearch/gradle/agent/JavaAgent.java | 71 +++++++++++++++++++ .../opensearch.java-agent.properties | 9 +++ .../gradle/agent/JavaAgentTests.java | 66 +++++++++++++++++ 3 files changed, 146 insertions(+) create mode 100644 buildSrc/src/main/java/org/opensearch/gradle/agent/JavaAgent.java create mode 100644 buildSrc/src/main/resources/META-INF/gradle-plugins/opensearch.java-agent.properties create mode 100644 buildSrc/src/test/java/org/opensearch/gradle/agent/JavaAgentTests.java diff --git a/buildSrc/src/main/java/org/opensearch/gradle/agent/JavaAgent.java b/buildSrc/src/main/java/org/opensearch/gradle/agent/JavaAgent.java new file mode 100644 index 0000000000000..f78af9545fc15 --- /dev/null +++ b/buildSrc/src/main/java/org/opensearch/gradle/agent/JavaAgent.java @@ -0,0 +1,71 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gradle.agent; + +import org.gradle.api.Plugin; +import org.gradle.api.Project; +import org.gradle.api.artifacts.Configuration; +import org.gradle.api.tasks.Copy; +import org.gradle.api.tasks.TaskProvider; +import org.gradle.api.tasks.testing.Test; + +import java.io.File; +import java.util.Objects; + +/** + * Gradle plugin to automatically configure the OpenSearch Java agent + * for test tasks in OpenSearch plugin projects. + */ +public class JavaAgent implements Plugin { + + /** + * Plugin implementation that sets up java agent configuration and applies it to test tasks. + */ + @Override + public void apply(Project project) { + Configuration agentConfiguration = project.getConfigurations().findByName("agent"); + if (agentConfiguration == null) { + agentConfiguration = project.getConfigurations().create("agent"); + } + + project.afterEvaluate(p -> { + String opensearchVersion = getOpensearchVersion(p); + p.getDependencies().add("agent", "org.opensearch:opensearch-agent-bootstrap:" + opensearchVersion); + p.getDependencies().add("agent", "org.opensearch:opensearch-agent:" + opensearchVersion); + }); + + Configuration finalAgentConfiguration = agentConfiguration; + TaskProvider prepareJavaAgent = project.getTasks().register("prepareJavaAgent", Copy.class, task -> { + task.from(finalAgentConfiguration); + task.into(new File(project.getBuildDir(), "agent")); + }); + + project.getTasks().withType(Test.class).configureEach(testTask -> { + testTask.dependsOn(prepareJavaAgent); + + final String opensearchVersion = getOpensearchVersion(project); + + testTask.doFirst(task -> { + File agentJar = new File(project.getBuildDir(), "agent/opensearch-agent-" + opensearchVersion + ".jar"); + + testTask.jvmArgs("-javaagent:" + agentJar.getAbsolutePath()); + }); + }); + } + + /** + * Gets the OpenSearch version from project properties, with a fallback default. + * + * @param project The Gradle project + * @return The OpenSearch version to use + */ + private String getOpensearchVersion(Project project) { + return Objects.requireNonNull(project.property("opensearch_version")).toString(); + } +} diff --git a/buildSrc/src/main/resources/META-INF/gradle-plugins/opensearch.java-agent.properties b/buildSrc/src/main/resources/META-INF/gradle-plugins/opensearch.java-agent.properties new file mode 100644 index 0000000000000..5eb7ea0a99c48 --- /dev/null +++ b/buildSrc/src/main/resources/META-INF/gradle-plugins/opensearch.java-agent.properties @@ -0,0 +1,9 @@ +# +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. +# + +implementation-class=org.opensearch.gradle.agent.JavaAgent diff --git a/buildSrc/src/test/java/org/opensearch/gradle/agent/JavaAgentTests.java b/buildSrc/src/test/java/org/opensearch/gradle/agent/JavaAgentTests.java new file mode 100644 index 0000000000000..fcb3b6939c7c3 --- /dev/null +++ b/buildSrc/src/test/java/org/opensearch/gradle/agent/JavaAgentTests.java @@ -0,0 +1,66 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gradle.agent; + +import org.opensearch.gradle.test.GradleUnitTestCase; +import org.gradle.api.Project; +import org.gradle.api.artifacts.Configuration; +import org.gradle.api.tasks.Copy; +import org.gradle.testfixtures.ProjectBuilder; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.io.File; +import java.io.IOException; + +public class JavaAgentTests extends GradleUnitTestCase { + private TemporaryFolder projectDir; + private final String PREPARE_JAVA_AGENT_TASK = "prepareJavaAgent"; + + @Before + public void setUp() throws IOException { + projectDir = new TemporaryFolder(); + projectDir.create(); + } + + @After + public void tearDown() { + projectDir.delete(); + } + + /** + * This test is used to verify that adding the 'opensearch.java-agent' to the project + * creates the necessary agent configuration and tasks. This is basically + * a behavioral test of the {@link JavaAgent#apply(Project)} method. + */ + @Test + public void applyJavaAgentPlugin() { + // Create an empty project and apply the JavaAgent plugin + Project project = ProjectBuilder.builder().build(); + project.getPluginManager().apply(JavaAgent.class); + + // Verify the agent configuration was created + Configuration agentConfig = project.getConfigurations().findByName("agent"); + assertNotNull("Agent configuration should be created", agentConfig); + + // Verify the prepareJavaAgent task was created and is of the right type + assertNotNull("prepareJavaAgent task should be created", project.getTasks().findByName(PREPARE_JAVA_AGENT_TASK)); + assertTrue("prepareJavaAgent task should be of type Copy", project.getTasks().findByName(PREPARE_JAVA_AGENT_TASK) instanceof Copy); + + // Verify the destination directory of the Copy task + Copy prepareTask = (Copy) project.getTasks().findByName(PREPARE_JAVA_AGENT_TASK); + assertEquals( + "Destination directory should be build/agent", + new File(project.getBuildDir(), "agent"), + prepareTask.getDestinationDir() + ); + } +} From b842e48562d3ee1678dff62ad75a57924fd4a9a8 Mon Sep 17 00:00:00 2001 From: Mikhail Khludnev Date: Sat, 12 Apr 2025 01:25:16 +0300 Subject: [PATCH 206/550] Fix BytesRefsCollectionBuilderTests.testBuildSortedNotSorted Followup for #17714: Remove redundant tests (#17902) * Remove redundant tests Signed-off-by: Mikhail Khludnev * Fix empty collection test Signed-off-by: Mikhail Khludnev --------- Signed-off-by: Mikhail Khludnev --- .../BytesRefsCollectionBuilderTests.java | 6 +- .../index/mapper/KeywordFieldTypeTests.java | 62 ------------------- 2 files changed, 4 insertions(+), 64 deletions(-) diff --git a/server/src/test/java/org/opensearch/index/mapper/BytesRefsCollectionBuilderTests.java b/server/src/test/java/org/opensearch/index/mapper/BytesRefsCollectionBuilderTests.java index 83c4877a1949e..c3860a3f8a66f 100644 --- a/server/src/test/java/org/opensearch/index/mapper/BytesRefsCollectionBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/BytesRefsCollectionBuilderTests.java @@ -28,8 +28,10 @@ public void testBuildSortedNotSorted() { Collection sortedSet = assertCollectionBuilt(sortedBytesRefs); assertCollectionBuilt(bytesRefList); - assertTrue(sortedSet instanceof SortedSet); - assertNull(((SortedSet) sortedSet).comparator()); + assertTrue(sortedSet.isEmpty() || sortedSet instanceof SortedSet); + if (!sortedSet.isEmpty()) { + assertNull(((SortedSet) sortedSet).comparator()); + } } public void testBuildFooBar() { diff --git a/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java index 4074cb0497e6c..02fd0d825c26f 100644 --- a/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java @@ -33,8 +33,6 @@ import com.carrotsearch.randomizedtesting.generators.RandomStrings; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.LowerCaseFilter; import org.apache.lucene.analysis.TokenFilter; @@ -79,18 +77,9 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; -import java.util.Random; -import java.util.SortedSet; -import java.util.stream.Stream; - -import org.mockito.MockedConstruction; -import org.mockito.stubbing.Answer; - -import static org.mockito.Mockito.mockConstructionWithAnswer; public class KeywordFieldTypeTests extends FieldTypeTestCase { @@ -229,57 +218,6 @@ public void testTermsSortedQuery() { assertEquals(expectedDocValues, onlyDocValues.termsQuery(sortedStrings, null)); } - @AwaitsFix(bugUrl = "no commit") - public void testMockTermsSortedQuery() { - String[] seedStrings = generateRandomStringArray(10, 10, false, false); - if (seedStrings.length == 1) { - seedStrings = Stream.concat(Arrays.stream(seedStrings), Arrays.stream(generateRandomStringArray(10, 10, false, false))) - .toArray(String[]::new); - } - List bytesRefList = Arrays.stream(seedStrings).map(BytesRef::new).toList(); - List sortedStrings = bytesRefList.stream().sorted().map(BytesRef::utf8ToString).toList(); - Answer asseretSortedSetArg = invocationOnMock -> { - Object[] args = invocationOnMock.getArguments(); - for (int i = 0; i < args.length; i++) { - if (args[i] instanceof Collection) { - assertTrue(args[i] instanceof SortedSet); - return invocationOnMock.callRealMethod(); - } - } - fail(); - return null; - }; - try (MockedConstruction ignored = mockConstructionWithAnswer(TermInSetQuery.class, asseretSortedSetArg)) { - MappedFieldType ft = new KeywordFieldType("field"); - assertNotNull(ft.termsQuery(sortedStrings, MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); - MappedFieldType onlyIndexed = new KeywordFieldType("field", true, false, Collections.emptyMap()); - assertNotNull(onlyIndexed.termsQuery(sortedStrings, null)); - MappedFieldType onlyDocValues = new KeywordFieldType("field", false, true, Collections.emptyMap()); - assertNotNull(onlyDocValues.termsQuery(sortedStrings, null)); - } - } - - @AwaitsFix(bugUrl = "no commit") - public void testHeavyWeight() { - int arraySize = 10000000; - BytesRef[] array = new BytesRef[arraySize]; - Random random = random(); - for (int i = 0; i < arraySize; i++) { - String str = RandomStrings.randomAsciiOfLength(random, 10); - array[i] = new BytesRef(str); - } - BytesRefsCollectionBuilder outofOrder = new BytesRefsCollectionBuilder(arraySize); - BytesRefsCollectionBuilder inOrder = new BytesRefsCollectionBuilder(arraySize); - Arrays.stream(array).forEach(outofOrder); - Arrays.stream(array).sorted().forEachOrdered(inOrder); - Logger logger = LogManager.getLogger(KeywordFieldTypeTests.class); - long start = System.currentTimeMillis(), intermid; - new TermInSetQuery("foo", outofOrder.get()); - logger.info("out of order {} ms", (intermid = System.currentTimeMillis()) - start); - new TermInSetQuery("foo", inOrder.get()); - logger.info("in order{} ms", System.currentTimeMillis() - intermid); - } - public void testExistsQuery() { { KeywordFieldType ft = new KeywordFieldType("field"); From 5938bc8b55f759d82f3dfd3a2623d0fa3acc5aaa Mon Sep 17 00:00:00 2001 From: Harsha Vamsi Kalluri Date: Fri, 11 Apr 2025 15:55:49 -0700 Subject: [PATCH 207/550] Approximate match all query with a sort (#17772) * Adding approximate match all query Signed-off-by: Harsha Vamsi Kalluri * Minor refactor Signed-off-by: Harsha Vamsi Kalluri * Fix doc field sorting Signed-off-by: Harsha Vamsi Kalluri * Fix tests Signed-off-by: Harsha Vamsi Kalluri * Fix more tests Signed-off-by: Harsha Vamsi Kalluri * Fix more tests Signed-off-by: Harsha Vamsi Kalluri * Fix backward tests Signed-off-by: Harsha Vamsi Kalluri --------- Signed-off-by: Harsha Vamsi Kalluri Signed-off-by: Michael Froh Co-authored-by: Michael Froh --- CHANGELOG.md | 1 + .../test/indices.validate_query/10_basic.yml | 6 +- .../opensearch/search/sort/FieldSortIT.java | 3 +- .../validate/SimpleValidateQueryIT.java | 5 +- .../index/query/MatchAllQueryBuilder.java | 4 +- .../approximate/ApproximateMatchAllQuery.java | 95 +++++++++++++++ .../approximate/ApproximateScoreQuery.java | 3 - .../search/sort/FieldSortBuilder.java | 1 + .../index/query/BoolQueryBuilderTests.java | 7 +- .../query/MatchAllQueryBuilderTests.java | 7 +- .../index/query/NestedQueryBuilderTests.java | 7 +- .../index/query/WrapperQueryBuilderTests.java | 9 +- .../FunctionScoreQueryBuilderTests.java | 7 +- .../index/search/NestedHelperTests.java | 8 +- .../ApproximateMatchAllQueryTests.java | 108 ++++++++++++++++++ .../sort/GeoDistanceSortBuilderTests.java | 13 ++- .../search/sort/ScriptSortBuilderTests.java | 13 ++- 17 files changed, 277 insertions(+), 20 deletions(-) create mode 100644 server/src/main/java/org/opensearch/search/approximate/ApproximateMatchAllQuery.java create mode 100644 server/src/test/java/org/opensearch/search/approximate/ApproximateMatchAllQueryTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 9e30e070c9d58..e3bc526dea181 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -36,6 +36,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add update and delete support in pull-based ingestion ([#17822](https://github.com/opensearch-project/OpenSearch/pull/17822)) - Allow maxPollSize and pollTimeout in IngestionSource to be configurable ([#17863](https://github.com/opensearch-project/OpenSearch/pull/17863)) - [Star Tree] [Search] Add query changes to support unsigned-long in star tree ([#17275](https://github.com/opensearch-project/OpenSearch/pull/17275)) +- Add `ApproximateMatchAllQuery` that targets match_all queries and approximates sorts ([#17772](https://github.com/opensearch-project/OpenSearch/pull/17772)) - Add TermsQuery support to Search GRPC endpoint ([#17888](https://github.com/opensearch-project/OpenSearch/pull/17888)) ### Changed diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/10_basic.yml index d3d78c3c60a00..a7f6c2adea921 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/10_basic.yml @@ -13,8 +13,8 @@ setup: --- "Validate query api": - skip: - version: ' - 7.6.99' - reason: message changed in 7.7.0 + version: ' - 2.99.99' + reason: message changed in 3.0.0 - do: indices.validate_query: @@ -66,7 +66,7 @@ setup: - is_true: valid - match: {_shards.failed: 0} - match: {explanations.0.index: 'testing'} - - match: {explanations.0.explanation: '*:*'} + - match: {explanations.0.explanation: 'ApproximateScoreQuery(originalQuery=*:*, approximationQuery=Approximate(*:*))'} --- "Validate body without query element": diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java index 2017557852865..53692292e4169 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java @@ -1145,11 +1145,10 @@ public void testSortMissingNumbersMinMax() throws Exception { .get(); assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value(), equalTo(3L)); + assertThat(searchResponse.getHits().getTotalHits().value(), equalTo(2L)); assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); // The order here could be unstable (depends on document order) since missing == field value assertThat(searchResponse.getHits().getAt(1).getId(), is(oneOf("3", "2"))); - assertThat(searchResponse.getHits().getAt(2).getId(), is(oneOf("2", "3"))); logger.info("--> sort with missing _last"); searchResponse = client().prepareSearch() diff --git a/server/src/internalClusterTest/java/org/opensearch/validate/SimpleValidateQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/validate/SimpleValidateQueryIT.java index 22647c59d42cf..735d201a94f2a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/validate/SimpleValidateQueryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/validate/SimpleValidateQueryIT.java @@ -294,7 +294,10 @@ public void testExplainNoQuery() { assertThat(validateQueryResponse.isValid(), equalTo(true)); assertThat(validateQueryResponse.getQueryExplanation().size(), equalTo(1)); assertThat(validateQueryResponse.getQueryExplanation().get(0).getIndex(), equalTo("test")); - assertThat(validateQueryResponse.getQueryExplanation().get(0).getExplanation(), equalTo("*:*")); + assertThat( + validateQueryResponse.getQueryExplanation().get(0).getExplanation(), + equalTo("ApproximateScoreQuery(originalQuery=*:*, approximationQuery=Approximate(*:*))") + ); } public void testExplainFilteredAlias() { diff --git a/server/src/main/java/org/opensearch/index/query/MatchAllQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/MatchAllQueryBuilder.java index c62ee0ac39584..d7bc63db4fecb 100644 --- a/server/src/main/java/org/opensearch/index/query/MatchAllQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/MatchAllQueryBuilder.java @@ -40,6 +40,8 @@ import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.search.approximate.ApproximateMatchAllQuery; +import org.opensearch.search.approximate.ApproximateScoreQuery; import java.io.IOException; @@ -88,7 +90,7 @@ public static MatchAllQueryBuilder fromXContent(XContentParser parser) { @Override protected Query doToQuery(QueryShardContext context) { - return Queries.newMatchAllQuery(); + return new ApproximateScoreQuery(Queries.newMatchAllQuery(), new ApproximateMatchAllQuery()); } @Override diff --git a/server/src/main/java/org/opensearch/search/approximate/ApproximateMatchAllQuery.java b/server/src/main/java/org/opensearch/search/approximate/ApproximateMatchAllQuery.java new file mode 100644 index 0000000000000..55d011d578b66 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/approximate/ApproximateMatchAllQuery.java @@ -0,0 +1,95 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.approximate; + +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryVisitor; +import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.search.internal.SearchContext; +import org.opensearch.search.sort.FieldSortBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * Replaces match-all query with a less expensive query if possible. + *

+ * Currently, will rewrite to a bounded range query over the high/low end of a field if a primary sort is specified + * on that field. + */ +public class ApproximateMatchAllQuery extends ApproximateQuery { + private ApproximateQuery approximation = null; + + @Override + protected boolean canApproximate(SearchContext context) { + approximation = null; + if (context == null) { + return false; + } + if (context.aggregations() != null) { + return false; + } + + if (context.request() != null && context.request().source() != null && context.innerHits().getInnerHits().isEmpty()) { + FieldSortBuilder primarySortField = FieldSortBuilder.getPrimaryFieldSortOrNull(context.request().source()); + if (primarySortField != null + && primarySortField.missing() == null + && !primarySortField.fieldName().equals(FieldSortBuilder.DOC_FIELD_NAME) + && !primarySortField.fieldName().equals(FieldSortBuilder.ID_FIELD_NAME)) { + MappedFieldType mappedFieldType = context.getQueryShardContext().fieldMapper(primarySortField.fieldName()); + if (mappedFieldType == null) { + return false; + } + Query rangeQuery = mappedFieldType.rangeQuery(null, null, false, false, null, null, null, context.getQueryShardContext()); + if (rangeQuery instanceof ApproximateScoreQuery approximateScoreQuery) { + approximateScoreQuery.setContext(context); + if (approximateScoreQuery.resolvedQuery instanceof ApproximateQuery) { + approximation = (ApproximateQuery) approximateScoreQuery.resolvedQuery; + return true; + } + } + } + } + return false; + } + + @Override + public String toString(String field) { + return "Approximate(*:*)"; + } + + @Override + public void visit(QueryVisitor visitor) { + visitor.visitLeaf(this); + + } + + @Override + public boolean equals(Object o) { + if (sameClassAs(o)) { + ApproximateMatchAllQuery other = (ApproximateMatchAllQuery) o; + return Objects.equals(approximation, other.approximation); + } + return false; + } + + @Override + public int hashCode() { + return classHash(); + } + + @Override + public Query rewrite(IndexSearcher indexSearcher) throws IOException { + if (approximation == null) { + throw new IllegalStateException("rewrite called without setting context or query could not be approximated"); + } + return approximation.rewrite(indexSearcher); + } +} diff --git a/server/src/main/java/org/opensearch/search/approximate/ApproximateScoreQuery.java b/server/src/main/java/org/opensearch/search/approximate/ApproximateScoreQuery.java index 6b39606620716..cf9ead3f088c2 100644 --- a/server/src/main/java/org/opensearch/search/approximate/ApproximateScoreQuery.java +++ b/server/src/main/java/org/opensearch/search/approximate/ApproximateScoreQuery.java @@ -51,9 +51,6 @@ public Query rewrite(IndexSearcher indexSearcher) throws IOException { } public void setContext(SearchContext context) { - if (resolvedQuery != null) { - throw new IllegalStateException("Query already resolved, duplicate call to setContext"); - } resolvedQuery = approximationQuery.canApproximate(context) ? approximationQuery : originalQuery; }; diff --git a/server/src/main/java/org/opensearch/search/sort/FieldSortBuilder.java b/server/src/main/java/org/opensearch/search/sort/FieldSortBuilder.java index 9825b2cbbe08e..29de7fa66b78e 100644 --- a/server/src/main/java/org/opensearch/search/sort/FieldSortBuilder.java +++ b/server/src/main/java/org/opensearch/search/sort/FieldSortBuilder.java @@ -100,6 +100,7 @@ public class FieldSortBuilder extends SortBuilder implements W * special field name to sort by index order */ public static final String DOC_FIELD_NAME = "_doc"; + public static final String ID_FIELD_NAME = "_id"; private static final SortFieldAndFormat SORT_DOC = new SortFieldAndFormat(new SortField(null, SortField.Type.DOC), DocValueFormat.RAW); private static final SortFieldAndFormat SORT_DOC_REVERSE = new SortFieldAndFormat( new SortField(null, SortField.Type.DOC, true), diff --git a/server/src/test/java/org/opensearch/index/query/BoolQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/BoolQueryBuilderTests.java index f3de666c52932..9223d82339252 100644 --- a/server/src/test/java/org/opensearch/index/query/BoolQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/BoolQueryBuilderTests.java @@ -42,6 +42,8 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.search.approximate.ApproximateMatchAllQuery; +import org.opensearch.search.approximate.ApproximateScoreQuery; import org.opensearch.test.AbstractQueryTestCase; import org.hamcrest.Matchers; @@ -208,7 +210,10 @@ public void testMinShouldMatchFilterWithoutShouldClauses() throws Exception { assertThat(innerBooleanQuery.clauses().size(), equalTo(1)); BooleanClause innerBooleanClause = innerBooleanQuery.clauses().get(0); assertThat(innerBooleanClause.occur(), equalTo(BooleanClause.Occur.MUST)); - assertThat(innerBooleanClause.query(), instanceOf(MatchAllDocsQuery.class)); + assertThat(innerBooleanClause.query(), instanceOf(ApproximateScoreQuery.class)); + ApproximateScoreQuery approxQuery = (ApproximateScoreQuery) innerBooleanClause.query(); + assertThat(approxQuery.getOriginalQuery(), instanceOf(MatchAllDocsQuery.class)); + assertThat(approxQuery.getApproximationQuery(), instanceOf(ApproximateMatchAllQuery.class)); } public void testMinShouldMatchBiggerThanNumberOfShouldClauses() throws Exception { diff --git a/server/src/test/java/org/opensearch/index/query/MatchAllQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/MatchAllQueryBuilderTests.java index 7e51567ac0f84..458c907766f78 100644 --- a/server/src/test/java/org/opensearch/index/query/MatchAllQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/MatchAllQueryBuilderTests.java @@ -34,6 +34,8 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; +import org.opensearch.search.approximate.ApproximateMatchAllQuery; +import org.opensearch.search.approximate.ApproximateScoreQuery; import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; @@ -49,7 +51,10 @@ protected MatchAllQueryBuilder doCreateTestQueryBuilder() { @Override protected void doAssertLuceneQuery(MatchAllQueryBuilder queryBuilder, Query query, QueryShardContext context) throws IOException { - assertThat(query, instanceOf(MatchAllDocsQuery.class)); + assertThat(query, instanceOf(ApproximateScoreQuery.class)); + ApproximateScoreQuery approxQuery = (ApproximateScoreQuery) query; + assertThat(approxQuery.getOriginalQuery(), instanceOf(MatchAllDocsQuery.class)); + assertThat(approxQuery.getApproximationQuery(), instanceOf(ApproximateMatchAllQuery.class)); } public void testFromJson() throws IOException { diff --git a/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java index c371f004295d6..435e529325646 100644 --- a/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java @@ -50,6 +50,8 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.functionscore.FunctionScoreQueryBuilder; import org.opensearch.index.search.OpenSearchToParentBlockJoinQuery; +import org.opensearch.search.approximate.ApproximateMatchAllQuery; +import org.opensearch.search.approximate.ApproximateScoreQuery; import org.opensearch.search.fetch.subphase.InnerHitsContext; import org.opensearch.search.internal.SearchContext; import org.opensearch.search.sort.FieldSortBuilder; @@ -493,7 +495,10 @@ public void testNestedDepthAllowed() throws Exception { .filter(c -> c.occur() == BooleanClause.Occur.MUST) .findFirst(); assertTrue(childLeg.isPresent()); - assertEquals(new MatchAllDocsQuery(), childLeg.get().query()); + assertThat(childLeg.get().query(), instanceOf(ApproximateScoreQuery.class)); + ApproximateScoreQuery approxQuery = (ApproximateScoreQuery) childLeg.get().query(); + assertThat(approxQuery.getOriginalQuery(), instanceOf(MatchAllDocsQuery.class)); + assertThat(approxQuery.getApproximationQuery(), instanceOf(ApproximateMatchAllQuery.class)); }; check.accept(createShardContext()); doWithDepth(randomIntBetween(1, 20), check); diff --git a/server/src/test/java/org/opensearch/index/query/WrapperQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/WrapperQueryBuilderTests.java index 4135f6e0ef049..1786517c1aa1d 100644 --- a/server/src/test/java/org/opensearch/index/query/WrapperQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/WrapperQueryBuilderTests.java @@ -41,12 +41,16 @@ import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.search.approximate.ApproximateMatchAllQuery; +import org.opensearch.search.approximate.ApproximateScoreQuery; import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; import java.io.UncheckedIOException; import java.io.UnsupportedEncodingException; +import static org.hamcrest.CoreMatchers.instanceOf; + public class WrapperQueryBuilderTests extends AbstractQueryTestCase { @Override @@ -171,7 +175,10 @@ public void testRewriteInnerQueryToo() throws IOException { assertEquals(new TermQuery(new Term(TEXT_FIELD_NAME, "bar")), qb.rewrite(shardContext).toQuery(shardContext)); qb = new WrapperQueryBuilder(new BoolQueryBuilder().toString()); - assertEquals(new MatchAllDocsQuery(), qb.rewrite(shardContext).toQuery(shardContext)); + assertThat(qb.rewrite(shardContext).toQuery(shardContext), instanceOf(ApproximateScoreQuery.class)); + ApproximateScoreQuery approxQuery = (ApproximateScoreQuery) qb.rewrite(shardContext).toQuery(shardContext); + assertThat(approxQuery.getOriginalQuery(), instanceOf(MatchAllDocsQuery.class)); + assertThat(approxQuery.getApproximationQuery(), instanceOf(ApproximateMatchAllQuery.class)); } @Override diff --git a/server/src/test/java/org/opensearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java index 8cf7941941bcb..c762fdcde146d 100644 --- a/server/src/test/java/org/opensearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java @@ -67,6 +67,8 @@ import org.opensearch.script.Script; import org.opensearch.script.ScriptType; import org.opensearch.search.MultiValueMode; +import org.opensearch.search.approximate.ApproximateMatchAllQuery; +import org.opensearch.search.approximate.ApproximateScoreQuery; import org.opensearch.test.AbstractQueryTestCase; import org.opensearch.test.TestGeoShapeFieldMapperPlugin; import org.joda.time.DateTime; @@ -630,7 +632,10 @@ public void testCustomWeightFactorQueryBuilderWithFunctionScoreWithoutQueryGiven Query parsedQuery = parseQuery(functionScoreQuery(weightFactorFunction(1.3f))).toQuery(createShardContext()); assertThat(parsedQuery, instanceOf(FunctionScoreQuery.class)); FunctionScoreQuery functionScoreQuery = (FunctionScoreQuery) parsedQuery; - assertThat(functionScoreQuery.getSubQuery() instanceof MatchAllDocsQuery, equalTo(true)); + assertThat(functionScoreQuery.getSubQuery(), CoreMatchers.instanceOf(ApproximateScoreQuery.class)); + ApproximateScoreQuery approxQuery = (ApproximateScoreQuery) functionScoreQuery.getSubQuery(); + assertThat(approxQuery.getOriginalQuery(), CoreMatchers.instanceOf(MatchAllDocsQuery.class)); + assertThat(approxQuery.getApproximationQuery(), CoreMatchers.instanceOf(ApproximateMatchAllQuery.class)); assertThat((double) (functionScoreQuery.getFunctions()[0]).getWeight(), closeTo(1.3, 0.001)); } diff --git a/server/src/test/java/org/opensearch/index/search/NestedHelperTests.java b/server/src/test/java/org/opensearch/index/search/NestedHelperTests.java index f7f921e824490..173cbf6b540a6 100644 --- a/server/src/test/java/org/opensearch/index/search/NestedHelperTests.java +++ b/server/src/test/java/org/opensearch/index/search/NestedHelperTests.java @@ -42,6 +42,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.join.ScoreMode; +import org.opensearch.common.lucene.search.Queries; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentBuilder; @@ -52,6 +53,8 @@ import org.opensearch.index.query.NestedQueryBuilder; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.search.approximate.ApproximateMatchAllQuery; +import org.opensearch.search.approximate.ApproximateScoreQuery; import org.opensearch.test.OpenSearchSingleNodeTestCase; import java.io.IOException; @@ -325,7 +328,10 @@ public void testNested() throws IOException { NestedQueryBuilder queryBuilder = new NestedQueryBuilder("nested1", new MatchAllQueryBuilder(), ScoreMode.Avg); OpenSearchToParentBlockJoinQuery query = (OpenSearchToParentBlockJoinQuery) queryBuilder.toQuery(context); - Query expectedChildQuery = new BooleanQuery.Builder().add(new MatchAllDocsQuery(), Occur.MUST) + Query expectedChildQuery = new BooleanQuery.Builder().add( + new ApproximateScoreQuery(Queries.newMatchAllQuery(), new ApproximateMatchAllQuery()), + Occur.MUST + ) // we automatically add a filter since the inner query might match non-nested docs .add(new TermQuery(new Term(NestedPathFieldMapper.NAME, "nested1")), Occur.FILTER) .build(); diff --git a/server/src/test/java/org/opensearch/search/approximate/ApproximateMatchAllQueryTests.java b/server/src/test/java/org/opensearch/search/approximate/ApproximateMatchAllQueryTests.java new file mode 100644 index 0000000000000..db904751ed76f --- /dev/null +++ b/server/src/test/java/org/opensearch/search/approximate/ApproximateMatchAllQueryTests.java @@ -0,0 +1,108 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.approximate; + +import org.opensearch.Version; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.BigArrays; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.mapper.NumberFieldMapper; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.search.aggregations.AggregatorFactories; +import org.opensearch.search.aggregations.SearchContextAggregations; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.search.internal.ShardSearchRequest; +import org.opensearch.search.sort.FieldSortBuilder; +import org.opensearch.search.sort.SortOrder; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.TestSearchContext; + +import java.io.IOException; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class ApproximateMatchAllQueryTests extends OpenSearchTestCase { + + public void testCanApproximate() throws IOException { + ApproximateMatchAllQuery approximateMatchAllQuery = new ApproximateMatchAllQuery(); + // Fail on null searchContext + assertFalse(approximateMatchAllQuery.canApproximate(null)); + + ShardSearchRequest[] shardSearchRequest = new ShardSearchRequest[1]; + + MapperService mockMapper = mock(MapperService.class); + String sortfield = "myfield"; + MappedFieldType myFieldType = new NumberFieldMapper.NumberFieldType(sortfield, NumberFieldMapper.NumberType.LONG); + when(mockMapper.fieldType(sortfield)).thenReturn(myFieldType); + + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .build(); + IndexMetadata indexMetadata = new IndexMetadata.Builder("index").settings(settings).build(); + QueryShardContext queryShardContext = new QueryShardContext( + 0, + new IndexSettings(indexMetadata, settings), + BigArrays.NON_RECYCLING_INSTANCE, + null, + null, + mockMapper, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ); + TestSearchContext searchContext = new TestSearchContext(queryShardContext) { + @Override + public ShardSearchRequest request() { + return shardSearchRequest[0]; + } + }; + + // Fail if aggregations are present + searchContext.aggregations(new SearchContextAggregations(new AggregatorFactories.Builder().build(null, null), null)); + assertFalse(approximateMatchAllQuery.canApproximate(searchContext)); + searchContext.aggregations(null); + + // Fail on missing ShardSearchRequest + assertFalse(approximateMatchAllQuery.canApproximate(searchContext)); + + // Fail if source is null or empty + shardSearchRequest[0] = new ShardSearchRequest(null, System.currentTimeMillis(), null); + assertFalse(approximateMatchAllQuery.canApproximate(searchContext)); + + // Fail if source does not have a sort. + SearchSourceBuilder source = new SearchSourceBuilder(); + shardSearchRequest[0].source(source); + assertFalse(approximateMatchAllQuery.canApproximate(searchContext)); + + source.sort(sortfield, SortOrder.ASC); + assertTrue(approximateMatchAllQuery.canApproximate(searchContext)); + assertTrue(approximateMatchAllQuery.rewrite(null) instanceof ApproximatePointRangeQuery); + + // But not if the sort field makes a decision about missing data + source.sorts().clear(); + source.sort(new FieldSortBuilder(sortfield).missing("foo")); + assertFalse(approximateMatchAllQuery.canApproximate(searchContext)); + assertThrows(IllegalStateException.class, () -> approximateMatchAllQuery.rewrite(null)); + } + +} diff --git a/server/src/test/java/org/opensearch/search/sort/GeoDistanceSortBuilderTests.java b/server/src/test/java/org/opensearch/search/sort/GeoDistanceSortBuilderTests.java index 385ced3655116..bca1ba13d11c3 100644 --- a/server/src/test/java/org/opensearch/search/sort/GeoDistanceSortBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/sort/GeoDistanceSortBuilderTests.java @@ -59,7 +59,10 @@ import org.opensearch.index.query.RangeQueryBuilder; import org.opensearch.search.DocValueFormat; import org.opensearch.search.MultiValueMode; +import org.opensearch.search.approximate.ApproximateMatchAllQuery; +import org.opensearch.search.approximate.ApproximateScoreQuery; import org.opensearch.test.geo.RandomGeoGenerator; +import org.hamcrest.CoreMatchers; import java.io.IOException; import java.util.ArrayList; @@ -544,7 +547,10 @@ public void testBuildNested() throws IOException { XFieldComparatorSource comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource(); Nested nested = comparatorSource.nested(); assertNotNull(nested); - assertEquals(new MatchAllDocsQuery(), nested.getInnerQuery()); + assertThat(nested.getInnerQuery(), CoreMatchers.instanceOf(ApproximateScoreQuery.class)); + ApproximateScoreQuery approxQuery = (ApproximateScoreQuery) nested.getInnerQuery(); + assertThat(approxQuery.getOriginalQuery(), CoreMatchers.instanceOf(MatchAllDocsQuery.class)); + assertThat(approxQuery.getApproximationQuery(), CoreMatchers.instanceOf(ApproximateMatchAllQuery.class)); sortBuilder = new GeoDistanceSortBuilder("fieldName", 1.0, 1.0).setNestedPath("path"); sortField = sortBuilder.build(shardContextMock).field; @@ -561,7 +567,10 @@ public void testBuildNested() throws IOException { comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource(); nested = comparatorSource.nested(); assertNotNull(nested); - assertEquals(new MatchAllDocsQuery(), nested.getInnerQuery()); + assertThat(nested.getInnerQuery(), CoreMatchers.instanceOf(ApproximateScoreQuery.class)); + approxQuery = (ApproximateScoreQuery) nested.getInnerQuery(); + assertThat(approxQuery.getOriginalQuery(), CoreMatchers.instanceOf(MatchAllDocsQuery.class)); + assertThat(approxQuery.getApproximationQuery(), CoreMatchers.instanceOf(ApproximateMatchAllQuery.class)); // if nested path is missing, we omit any filter and return a regular SortField // (LatLonSortField) diff --git a/server/src/test/java/org/opensearch/search/sort/ScriptSortBuilderTests.java b/server/src/test/java/org/opensearch/search/sort/ScriptSortBuilderTests.java index a124fdfeeb508..1660f8d73c323 100644 --- a/server/src/test/java/org/opensearch/search/sort/ScriptSortBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/sort/ScriptSortBuilderTests.java @@ -54,7 +54,10 @@ import org.opensearch.script.ScriptType; import org.opensearch.search.DocValueFormat; import org.opensearch.search.MultiValueMode; +import org.opensearch.search.approximate.ApproximateMatchAllQuery; +import org.opensearch.search.approximate.ApproximateScoreQuery; import org.opensearch.search.sort.ScriptSortBuilder.ScriptSortType; +import org.hamcrest.CoreMatchers; import java.io.IOException; import java.util.Collections; @@ -336,7 +339,10 @@ public void testBuildNested() throws IOException { XFieldComparatorSource comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource(); Nested nested = comparatorSource.nested(); assertNotNull(nested); - assertEquals(new MatchAllDocsQuery(), nested.getInnerQuery()); + assertThat(nested.getInnerQuery(), CoreMatchers.instanceOf(ApproximateScoreQuery.class)); + ApproximateScoreQuery approxQuery = (ApproximateScoreQuery) nested.getInnerQuery(); + assertThat(approxQuery.getOriginalQuery(), CoreMatchers.instanceOf(MatchAllDocsQuery.class)); + assertThat(approxQuery.getApproximationQuery(), CoreMatchers.instanceOf(ApproximateMatchAllQuery.class)); sortBuilder = new ScriptSortBuilder(mockScript(MOCK_SCRIPT_NAME), ScriptSortType.NUMBER).setNestedPath("path"); sortField = sortBuilder.build(shardContextMock).field; @@ -353,7 +359,10 @@ public void testBuildNested() throws IOException { comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource(); nested = comparatorSource.nested(); assertNotNull(nested); - assertEquals(new MatchAllDocsQuery(), nested.getInnerQuery()); + assertThat(nested.getInnerQuery(), CoreMatchers.instanceOf(ApproximateScoreQuery.class)); + approxQuery = (ApproximateScoreQuery) nested.getInnerQuery(); + assertThat(approxQuery.getOriginalQuery(), CoreMatchers.instanceOf(MatchAllDocsQuery.class)); + assertThat(approxQuery.getApproximationQuery(), CoreMatchers.instanceOf(ApproximateMatchAllQuery.class)); // if nested path is missing, we omit nested element in the comparator sortBuilder = new ScriptSortBuilder(mockScript(MOCK_SCRIPT_NAME), ScriptSortType.NUMBER).setNestedFilter( From 6ff44d9b5e1f1f6f22b9935d21e408957383e940 Mon Sep 17 00:00:00 2001 From: Finn Date: Fri, 11 Apr 2025 15:57:13 -0700 Subject: [PATCH 208/550] Remove ide.gradle debug artifact. (#17908) Signed-off-by: Finn Carroll --- gradle/ide.gradle | 4 ---- 1 file changed, 4 deletions(-) diff --git a/gradle/ide.gradle b/gradle/ide.gradle index 343ac03b8ed20..50b0ec7e7ad14 100644 --- a/gradle/ide.gradle +++ b/gradle/ide.gradle @@ -49,10 +49,6 @@ if (System.getProperty('idea.active') == 'true') { } } - buildScan { - server = 'https://127.0.0.1' - } - idea { project { vcs = 'Git' From 5fb4e6951b6c80db5c9c45398ed3704ac4092ba3 Mon Sep 17 00:00:00 2001 From: bowenlan-amzn Date: Fri, 11 Apr 2025 17:52:51 -0700 Subject: [PATCH 209/550] Support sub agg in filter rewrite optimization (#17447) * Support sub agg in filter rewrite optimization Signed-off-by: bowenlan-amzn * Clean unused code Signed-off-by: bowenlan-amzn * remove singleton DV related change Signed-off-by: bowenlan-amzn * let aggregator decide whether to support sub agg Signed-off-by: bowenlan-amzn * refactor range collector Signed-off-by: bowenlan-amzn * prevent NPE Signed-off-by: bowenlan-amzn * handle tryPrecomputeAggregationForLeaf interface Signed-off-by: bowenlan-amzn * clean up for review Signed-off-by: bowenlan-amzn * add changelog Signed-off-by: bowenlan-amzn * add segment level check this is for the regression we see in pmc, docs per segment doesn't work well. we should also consider number of ranges. From experiments I choose 1000. Signed-off-by: bowenlan-amzn * improvements - throw exception for unreachable path - several place to only run when hasSub - range agg only works for match all Signed-off-by: bowenlan-amzn * experiment annotation Signed-off-by: bowenlan-amzn * Update server/src/main/java/org/opensearch/search/SearchService.java Signed-off-by: bowenlan-amzn * Collect sub agg after each bucket Signed-off-by: bowenlan-amzn * try fixed bit set Signed-off-by: bowenlan-amzn * address comments Signed-off-by: bowenlan-amzn --------- Signed-off-by: bowenlan-amzn --- CHANGELOG.md | 1 + .../common/settings/ClusterSettings.java | 1 + .../search/DefaultSearchContext.java | 15 + .../org/opensearch/search/SearchService.java | 12 + .../bucket/composite/CompositeAggregator.java | 12 +- .../filterrewrite/AggregatorBridge.java | 52 +- .../DateHistogramAggregatorBridge.java | 18 +- .../FilterRewriteOptimizationContext.java | 62 ++- .../filterrewrite/PointTreeTraversal.java | 174 +++---- .../filterrewrite/RangeAggregatorBridge.java | 15 +- .../bucket/filterrewrite/Ranges.java | 14 +- .../AbstractRangeCollector.java | 82 ++++ .../rangecollector/RangeCollector.java | 88 ++++ .../rangecollector/SimpleRangeCollector.java | 65 +++ .../rangecollector/SubAggRangeCollector.java | 99 ++++ .../rangecollector/package-info.java | 12 + .../AutoDateHistogramAggregator.java | 16 +- .../histogram/DateHistogramAggregator.java | 8 +- .../bucket/range/RangeAggregator.java | 8 +- .../search/internal/SearchContext.java | 6 + .../FilterRewriteSubAggTests.java | 452 ++++++++++++++++++ 21 files changed, 1062 insertions(+), 150 deletions(-) create mode 100644 server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/rangecollector/AbstractRangeCollector.java create mode 100644 server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/rangecollector/RangeCollector.java create mode 100644 server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/rangecollector/SimpleRangeCollector.java create mode 100644 server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/rangecollector/SubAggRangeCollector.java create mode 100644 server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/rangecollector/package-info.java create mode 100644 server/src/test/java/org/opensearch/search/aggregations/bucket/filterrewrite/FilterRewriteSubAggTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index e3bc526dea181..7a0f6ab80da3b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,6 +38,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Star Tree] [Search] Add query changes to support unsigned-long in star tree ([#17275](https://github.com/opensearch-project/OpenSearch/pull/17275)) - Add `ApproximateMatchAllQuery` that targets match_all queries and approximates sorts ([#17772](https://github.com/opensearch-project/OpenSearch/pull/17772)) - Add TermsQuery support to Search GRPC endpoint ([#17888](https://github.com/opensearch-project/OpenSearch/pull/17888)) +- Support sub agg in filter rewrite optimization ([#17447](https://github.com/opensearch-project/OpenSearch/pull/17447) ### Changed - Migrate BC libs to their FIPS counterparts ([#14912](https://github.com/opensearch-project/OpenSearch/pull/14912)) diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 5b998165c58a7..d0722b4f3a942 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -552,6 +552,7 @@ public void apply(Settings value, Settings current, Settings previous) { SearchService.MAX_OPEN_PIT_CONTEXT, SearchService.MAX_PIT_KEEPALIVE_SETTING, SearchService.MAX_AGGREGATION_REWRITE_FILTERS, + SearchService.AGGREGATION_REWRITE_FILTER_SEGMENT_THRESHOLD, SearchService.INDICES_MAX_CLAUSE_COUNT_SETTING, SearchService.CARDINALITY_AGGREGATION_PRUNING_THRESHOLD, SearchService.KEYWORD_INDEX_OR_DOC_VALUES_ENABLED, diff --git a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java index 74a7482d975df..854a8dee56e6d 100644 --- a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java @@ -115,6 +115,7 @@ import java.util.function.Function; import java.util.function.LongSupplier; +import static org.opensearch.search.SearchService.AGGREGATION_REWRITE_FILTER_SEGMENT_THRESHOLD; import static org.opensearch.search.SearchService.CARDINALITY_AGGREGATION_PRUNING_THRESHOLD; import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE; import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; @@ -207,6 +208,7 @@ final class DefaultSearchContext extends SearchContext { private final String concurrentSearchMode; private final SetOnce requestShouldUseConcurrentSearch = new SetOnce<>(); private final int maxAggRewriteFilters; + private final int filterRewriteSegmentThreshold; private final int cardinalityAggregationPruningThreshold; private final boolean keywordIndexOrDocValuesEnabled; @@ -267,6 +269,7 @@ final class DefaultSearchContext extends SearchContext { this.requestToAggReduceContextBuilder = requestToAggReduceContextBuilder; this.maxAggRewriteFilters = evaluateFilterRewriteSetting(); + this.filterRewriteSegmentThreshold = evaluateAggRewriteFilterSegThreshold(); this.cardinalityAggregationPruningThreshold = evaluateCardinalityAggregationPruningThreshold(); this.concurrentSearchDeciderFactories = concurrentSearchDeciderFactories; this.keywordIndexOrDocValuesEnabled = evaluateKeywordIndexOrDocValuesEnabled(); @@ -1124,6 +1127,18 @@ private int evaluateFilterRewriteSetting() { return 0; } + @Override + public int filterRewriteSegmentThreshold() { + return filterRewriteSegmentThreshold; + } + + private int evaluateAggRewriteFilterSegThreshold() { + if (clusterService != null) { + return clusterService.getClusterSettings().get(AGGREGATION_REWRITE_FILTER_SEGMENT_THRESHOLD); + } + return 0; + } + @Override public int cardinalityAggregationPruningThreshold() { return cardinalityAggregationPruningThreshold; diff --git a/server/src/main/java/org/opensearch/search/SearchService.java b/server/src/main/java/org/opensearch/search/SearchService.java index 866cf0d62b033..1fa0fca68fca1 100644 --- a/server/src/main/java/org/opensearch/search/SearchService.java +++ b/server/src/main/java/org/opensearch/search/SearchService.java @@ -53,6 +53,7 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.CheckedSupplier; import org.opensearch.common.UUIDs; +import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; import org.opensearch.common.lifecycle.AbstractLifecycleComponent; @@ -309,6 +310,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv ); // value 0 means rewrite filters optimization in aggregations will be disabled + @ExperimentalApi public static final Setting MAX_AGGREGATION_REWRITE_FILTERS = Setting.intSetting( "search.max_aggregation_rewrite_filters", 3000, @@ -317,6 +319,16 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv Property.NodeScope ); + // only do optimization when there's enough docs per range at segment level and sub agg exists + @ExperimentalApi + public static final Setting AGGREGATION_REWRITE_FILTER_SEGMENT_THRESHOLD = Setting.intSetting( + "search.aggregation_rewrite_filters.segment_threshold.docs_per_bucket", + 1000, + 0, + Property.Dynamic, + Property.NodeScope + ); + public static final Setting INDICES_MAX_CLAUSE_COUNT_SETTING = Setting.intSetting( "indices.query.bool.max_clause_count", 1024, diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregator.java index fcf2a40dada14..471baf52b9303 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregator.java @@ -94,7 +94,7 @@ import java.util.stream.Collectors; import static org.opensearch.search.aggregations.MultiBucketConsumerService.MAX_BUCKET_SETTING; -import static org.opensearch.search.aggregations.bucket.filterrewrite.DateHistogramAggregatorBridge.segmentMatchAll; +import static org.opensearch.search.aggregations.bucket.filterrewrite.AggregatorBridge.segmentMatchAll; /** * Main aggregator that aggregates docs from multiple aggregations @@ -173,6 +173,9 @@ public final class CompositeAggregator extends BucketsAggregator { @Override protected boolean canOptimize() { + if (subAggregators.length > 0) { + return false; + } if (canOptimize(sourceConfigs)) { this.valuesSource = (RoundingValuesSource) sourceConfigs[0].valuesSource(); if (rawAfterKey != null) { @@ -566,7 +569,12 @@ private void processLeafFromQuery(LeafReaderContext ctx, Sort indexSortPrefix) t @Override protected boolean tryPrecomputeAggregationForLeaf(LeafReaderContext ctx) throws IOException { finishLeaf(); // May need to wrap up previous leaf if it could not be precomputed - return filterRewriteOptimizationContext.tryOptimize(ctx, this::incrementBucketDocCount, segmentMatchAll(context, ctx)); + return filterRewriteOptimizationContext.tryOptimize( + ctx, + this::incrementBucketDocCount, + segmentMatchAll(context, ctx), + collectableSubAggregators + ); } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/AggregatorBridge.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/AggregatorBridge.java index 145a60373b4f3..2df191d92266e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/AggregatorBridge.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/AggregatorBridge.java @@ -8,16 +8,23 @@ package org.opensearch.search.aggregations.bucket.filterrewrite; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.PointValues; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Weight; import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.search.aggregations.bucket.filterrewrite.rangecollector.RangeCollector; import org.opensearch.search.internal.SearchContext; import java.io.IOException; import java.util.function.BiConsumer; import java.util.function.Consumer; +import java.util.function.Function; + +import static org.opensearch.search.aggregations.bucket.filterrewrite.PointTreeTraversal.createCollector; +import static org.opensearch.search.aggregations.bucket.filterrewrite.PointTreeTraversal.multiRangesTraverse; /** * This interface provides a bridge between an aggregator and the optimization context, allowing @@ -35,6 +42,8 @@ */ public abstract class AggregatorBridge { + static final Logger logger = LogManager.getLogger(Helper.loggerName); + /** * The field type associated with this aggregator bridge. */ @@ -75,16 +84,51 @@ void setRangesConsumer(Consumer setRanges) { /** * Attempts to build aggregation results for a segment * - * @param values the point values (index structure for numeric values) for a segment - * @param incrementDocCount a consumer to increment the document count for a range bucket. The First parameter is document count, the second is the key of the bucket + * @param values the point values (index structure for numeric values) for a segment + * @param incrementDocCount a consumer to increment the document count for a range bucket. The First parameter is document count, the second is the key of the bucket * @param ranges + * @param subAggCollectorParam */ - abstract FilterRewriteOptimizationContext.DebugInfo tryOptimize( + abstract FilterRewriteOptimizationContext.OptimizeResult tryOptimize( PointValues values, BiConsumer incrementDocCount, - Ranges ranges + Ranges ranges, + FilterRewriteOptimizationContext.SubAggCollectorParam subAggCollectorParam ) throws IOException; + static FilterRewriteOptimizationContext.OptimizeResult getResult( + PointValues values, + BiConsumer incrementDocCount, + Ranges ranges, + Function getBucketOrd, + int size, + FilterRewriteOptimizationContext.SubAggCollectorParam subAggCollectorParam + ) throws IOException { + BiConsumer incrementFunc = (activeIndex, docCount) -> { + long bucketOrd = getBucketOrd.apply(activeIndex); + incrementDocCount.accept(bucketOrd, (long) docCount); + }; + + PointValues.PointTree tree = values.getPointTree(); + FilterRewriteOptimizationContext.OptimizeResult optimizeResult = new FilterRewriteOptimizationContext.OptimizeResult(); + int activeIndex = ranges.firstRangeIndex(tree.getMinPackedValue(), tree.getMaxPackedValue()); + if (activeIndex < 0) { + logger.debug("No ranges match the query, skip the fast filter optimization"); + return optimizeResult; + } + RangeCollector collector = createCollector( + ranges, + incrementFunc, + size, + activeIndex, + getBucketOrd, + optimizeResult, + subAggCollectorParam + ); + + return multiRangesTraverse(tree, collector); + } + /** * Checks whether the top level query matches all documents on the segment * diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/DateHistogramAggregatorBridge.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/DateHistogramAggregatorBridge.java index 50fe6a8cbf69f..80c020b03bf40 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/DateHistogramAggregatorBridge.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/DateHistogramAggregatorBridge.java @@ -23,8 +23,6 @@ import java.util.function.BiConsumer; import java.util.function.Function; -import static org.opensearch.search.aggregations.bucket.filterrewrite.PointTreeTraversal.multiRangesTraverse; - /** * For date histogram aggregation */ @@ -127,27 +125,31 @@ private DateFieldMapper.DateFieldType getFieldType() { return (DateFieldMapper.DateFieldType) fieldType; } + /** + * Get the size of buckets to stop early + */ protected int getSize() { return Integer.MAX_VALUE; } @Override - final FilterRewriteOptimizationContext.DebugInfo tryOptimize( + final FilterRewriteOptimizationContext.OptimizeResult tryOptimize( PointValues values, BiConsumer incrementDocCount, - Ranges ranges + Ranges ranges, + FilterRewriteOptimizationContext.SubAggCollectorParam subAggCollectorParam ) throws IOException { int size = getSize(); DateFieldMapper.DateFieldType fieldType = getFieldType(); - BiConsumer incrementFunc = (activeIndex, docCount) -> { + + Function getBucketOrd = (activeIndex) -> { long rangeStart = LongPoint.decodeDimension(ranges.lowers[activeIndex], 0); rangeStart = fieldType.convertNanosToMillis(rangeStart); - long bucketOrd = getBucketOrd(bucketOrdProducer().apply(rangeStart)); - incrementDocCount.accept(bucketOrd, (long) docCount); + return getBucketOrd(bucketOrdProducer().apply(rangeStart)); }; - return multiRangesTraverse(values.getPointTree(), ranges, incrementFunc, size); + return getResult(values, incrementDocCount, ranges, getBucketOrd, size, subAggCollectorParam); } private static long getBucketOrd(long bucketOrd) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/FilterRewriteOptimizationContext.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/FilterRewriteOptimizationContext.java index 87faafe4526de..639f3477b7868 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/FilterRewriteOptimizationContext.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/FilterRewriteOptimizationContext.java @@ -14,7 +14,9 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.PointValues; +import org.apache.lucene.util.DocIdSetBuilder; import org.opensearch.index.mapper.DocCountFieldMapper; +import org.opensearch.search.aggregations.BucketCollector; import org.opensearch.search.internal.SearchContext; import java.io.IOException; @@ -42,12 +44,16 @@ public final class FilterRewriteOptimizationContext { private Ranges ranges; // built at shard level + private boolean hasSubAgg; + // debug info related fields private final AtomicInteger leafNodeVisited = new AtomicInteger(); private final AtomicInteger innerNodeVisited = new AtomicInteger(); private final AtomicInteger segments = new AtomicInteger(); private final AtomicInteger optimizedSegments = new AtomicInteger(); + private int segmentThreshold = 0; + public FilterRewriteOptimizationContext( AggregatorBridge aggregatorBridge, final Object parent, @@ -65,7 +71,8 @@ public FilterRewriteOptimizationContext( private boolean canOptimize(final Object parent, final int subAggLength, SearchContext context) throws IOException { if (context.maxAggRewriteFilters() == 0) return false; - if (parent != null || subAggLength != 0) return false; + if (parent != null) return false; + this.hasSubAgg = subAggLength > 0; boolean canOptimize = aggregatorBridge.canOptimize(); if (canOptimize) { @@ -81,6 +88,7 @@ private boolean canOptimize(final Object parent, final int subAggLength, SearchC } logger.debug("Fast filter rewriteable: {} for shard {}", canOptimize, shardId); + segmentThreshold = context.filterRewriteSegmentThreshold(); return canOptimize; } @@ -94,10 +102,14 @@ void setRanges(Ranges ranges) { * Usage: invoked at segment level — in getLeafCollector of aggregator * * @param incrementDocCount consume the doc_count results for certain ordinal - * @param segmentMatchAll if your optimization can prepareFromSegment, you should pass in this flag to decide whether to prepareFromSegment + * @param segmentMatchAll we can always tryOptimize for match all scenario */ - public boolean tryOptimize(final LeafReaderContext leafCtx, final BiConsumer incrementDocCount, boolean segmentMatchAll) - throws IOException { + public boolean tryOptimize( + final LeafReaderContext leafCtx, + final BiConsumer incrementDocCount, + boolean segmentMatchAll, + BucketCollector collectableSubAggregators + ) throws IOException { segments.incrementAndGet(); if (!canOptimize) { return false; @@ -123,7 +135,25 @@ public boolean tryOptimize(final LeafReaderContext leafCtx, final BiConsumer leafCtx.reader().maxDoc() / ranges.getSize()) { + // comparing with a rough estimate of docs per range in this segment + return false; + } + + OptimizeResult optimizeResult; + SubAggCollectorParam subAggCollectorParam; + if (hasSubAgg) { + subAggCollectorParam = new SubAggCollectorParam(collectableSubAggregators, leafCtx); + } else { + subAggCollectorParam = null; + } + try { + optimizeResult = aggregatorBridge.tryOptimize(values, incrementDocCount, ranges, subAggCollectorParam); + consumeDebugInfo(optimizeResult); + } catch (AbortFilterRewriteOptimizationException e) { + logger.error("Abort filter rewrite optimization, fall back to default path"); + return false; + } optimizedSegments.incrementAndGet(); logger.debug("Fast filter optimization applied to shard {} segment {}", shardId, leafCtx.ord); @@ -132,6 +162,18 @@ public boolean tryOptimize(final LeafReaderContext leafCtx, final BiConsumerThe main entry point is the {@link #multiRangesTraverse(PointValues.PointTree, Ranges, - * BiConsumer, int)} method + *

The main entry point is the {@link #multiRangesTraverse} method * - *

The class uses a {@link RangeCollectorForPointTree} to keep track of the active ranges and + *

The class uses a {@link RangeCollector} to keep track of the active ranges and * determine which parts of the tree to visit. The {@link * PointValues.IntersectVisitor} implementation is responsible for the actual visitation and * document count collection. @@ -36,87 +39,111 @@ private PointTreeTraversal() {} private static final Logger logger = LogManager.getLogger(Helper.loggerName); + /** + * Creates an appropriate RangeCollector based on whether sub-aggregations are needed. + */ + static RangeCollector createCollector( + Ranges ranges, + BiConsumer incrementRangeDocCount, + int maxNumNonZeroRange, + int activeIndex, + Function getBucketOrd, + FilterRewriteOptimizationContext.OptimizeResult result, + FilterRewriteOptimizationContext.SubAggCollectorParam subAggCollectorParam + ) { + if (subAggCollectorParam == null) { + return new SimpleRangeCollector(ranges, incrementRangeDocCount, maxNumNonZeroRange, activeIndex, result); + } else { + return new SubAggRangeCollector( + ranges, + incrementRangeDocCount, + maxNumNonZeroRange, + activeIndex, + result, + getBucketOrd, + subAggCollectorParam + ); + } + } + /** * Traverses the given {@link PointValues.PointTree} and collects document counts for the intersecting ranges. * - * @param tree the point tree to traverse - * @param ranges the set of ranges to intersect with - * @param incrementDocCount a callback to increment the document count for a range bucket - * @param maxNumNonZeroRanges the maximum number of non-zero ranges to collect - * @return a {@link FilterRewriteOptimizationContext.DebugInfo} object containing debug information about the traversal + * @param tree the point tree to traverse + * @param collector the collector to use for gathering results + * @return a {@link FilterRewriteOptimizationContext.OptimizeResult} object containing debug information about the traversal */ - static FilterRewriteOptimizationContext.DebugInfo multiRangesTraverse( - final PointValues.PointTree tree, - final Ranges ranges, - final BiConsumer incrementDocCount, - final int maxNumNonZeroRanges - ) throws IOException { - FilterRewriteOptimizationContext.DebugInfo debugInfo = new FilterRewriteOptimizationContext.DebugInfo(); - int activeIndex = ranges.firstRangeIndex(tree.getMinPackedValue(), tree.getMaxPackedValue()); - if (activeIndex < 0) { - logger.debug("No ranges match the query, skip the fast filter optimization"); - return debugInfo; - } - RangeCollectorForPointTree collector = new RangeCollectorForPointTree(incrementDocCount, maxNumNonZeroRanges, ranges, activeIndex); + static FilterRewriteOptimizationContext.OptimizeResult multiRangesTraverse(final PointValues.PointTree tree, RangeCollector collector) + throws IOException { PointValues.IntersectVisitor visitor = getIntersectVisitor(collector); try { - intersectWithRanges(visitor, tree, collector, debugInfo); + intersectWithRanges(visitor, tree, collector); } catch (CollectionTerminatedException e) { logger.debug("Early terminate since no more range to collect"); } collector.finalizePreviousRange(); - - return debugInfo; + return collector.getResult(); } - private static void intersectWithRanges( - PointValues.IntersectVisitor visitor, - PointValues.PointTree pointTree, - RangeCollectorForPointTree collector, - FilterRewriteOptimizationContext.DebugInfo debug - ) throws IOException { + private static void intersectWithRanges(PointValues.IntersectVisitor visitor, PointValues.PointTree pointTree, RangeCollector collector) + throws IOException { PointValues.Relation r = visitor.compare(pointTree.getMinPackedValue(), pointTree.getMaxPackedValue()); switch (r) { case CELL_INSIDE_QUERY: collector.countNode((int) pointTree.size()); - debug.visitInner(); + if (collector.hasSubAgg()) { + pointTree.visitDocIDs(visitor); + } else { + collector.visitInner(); + } break; case CELL_CROSSES_QUERY: if (pointTree.moveToChild()) { do { - intersectWithRanges(visitor, pointTree, collector, debug); + intersectWithRanges(visitor, pointTree, collector); } while (pointTree.moveToSibling()); pointTree.moveToParent(); } else { pointTree.visitDocValues(visitor); - debug.visitLeaf(); + collector.visitLeaf(); } break; case CELL_OUTSIDE_QUERY: } } - private static PointValues.IntersectVisitor getIntersectVisitor(RangeCollectorForPointTree collector) { + private static PointValues.IntersectVisitor getIntersectVisitor(RangeCollector collector) { return new PointValues.IntersectVisitor() { @Override public void visit(int docID) { - // this branch should be unreachable - throw new UnsupportedOperationException( - "This IntersectVisitor does not perform any actions on a " + "docID=" + docID + " node being visited" - ); + collector.collectDocId(docID); + } + + @Override + public void visit(DocIdSetIterator iterator) throws IOException { + collector.collectDocIdSet(iterator); } @Override public void visit(int docID, byte[] packedValue) throws IOException { - visitPoints(packedValue, collector::count); + visitPoints(packedValue, () -> { + collector.count(); + if (collector.hasSubAgg()) { + collector.collectDocId(docID); + } + }); } @Override public void visit(DocIdSetIterator iterator, byte[] packedValue) throws IOException { visitPoints(packedValue, () -> { + // note: iterator can only iterate once for (int doc = iterator.nextDoc(); doc != NO_MORE_DOCS; doc = iterator.nextDoc()) { collector.count(); + if (collector.hasSubAgg()) { + collector.collectDocId(doc); + } } }); } @@ -124,7 +151,7 @@ public void visit(DocIdSetIterator iterator, byte[] packedValue) throws IOExcept private void visitPoints(byte[] packedValue, CheckedRunnable collect) throws IOException { if (!collector.withinUpperBound(packedValue)) { collector.finalizePreviousRange(); - if (collector.iterateRangeEnd(packedValue)) { + if (collector.iterateRangeEnd(packedValue, true)) { throw new CollectionTerminatedException(); } } @@ -139,7 +166,7 @@ public PointValues.Relation compare(byte[] minPackedValue, byte[] maxPackedValue // try to find the first range that may collect values from this cell if (!collector.withinUpperBound(minPackedValue)) { collector.finalizePreviousRange(); - if (collector.iterateRangeEnd(minPackedValue)) { + if (collector.iterateRangeEnd(minPackedValue, false)) { throw new CollectionTerminatedException(); } } @@ -155,69 +182,4 @@ public PointValues.Relation compare(byte[] minPackedValue, byte[] maxPackedValue } }; } - - private static class RangeCollectorForPointTree { - private final BiConsumer incrementRangeDocCount; - private int counter = 0; - - private final Ranges ranges; - private int activeIndex; - - private int visitedRange = 0; - private final int maxNumNonZeroRange; - - public RangeCollectorForPointTree( - BiConsumer incrementRangeDocCount, - int maxNumNonZeroRange, - Ranges ranges, - int activeIndex - ) { - this.incrementRangeDocCount = incrementRangeDocCount; - this.maxNumNonZeroRange = maxNumNonZeroRange; - this.ranges = ranges; - this.activeIndex = activeIndex; - } - - private void count() { - counter++; - } - - private void countNode(int count) { - counter += count; - } - - private void finalizePreviousRange() { - if (counter > 0) { - incrementRangeDocCount.accept(activeIndex, counter); - counter = 0; - } - } - - /** - * @return true when iterator exhausted or collect enough non-zero ranges - */ - private boolean iterateRangeEnd(byte[] value) { - // the new value may not be contiguous to the previous one - // so try to find the first next range that cross the new value - while (!withinUpperBound(value)) { - if (++activeIndex >= ranges.size) { - return true; - } - } - visitedRange++; - return visitedRange > maxNumNonZeroRange; - } - - private boolean withinLowerBound(byte[] value) { - return Ranges.withinLowerBound(value, ranges.lowers[activeIndex]); - } - - private boolean withinUpperBound(byte[] value) { - return Ranges.withinUpperBound(value, ranges.uppers[activeIndex]); - } - - private boolean withinRange(byte[] value) { - return withinLowerBound(value) && withinUpperBound(value); - } - } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/RangeAggregatorBridge.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/RangeAggregatorBridge.java index fc1bcd83f2c1b..0282a7a27297d 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/RangeAggregatorBridge.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/RangeAggregatorBridge.java @@ -20,8 +20,6 @@ import java.util.function.BiConsumer; import java.util.function.Function; -import static org.opensearch.search.aggregations.bucket.filterrewrite.PointTreeTraversal.multiRangesTraverse; - /** * For range aggregation */ @@ -74,18 +72,17 @@ final Ranges tryBuildRangesFromSegment(LeafReaderContext leaf) { } @Override - final FilterRewriteOptimizationContext.DebugInfo tryOptimize( + final FilterRewriteOptimizationContext.OptimizeResult tryOptimize( PointValues values, BiConsumer incrementDocCount, - Ranges ranges + Ranges ranges, + FilterRewriteOptimizationContext.SubAggCollectorParam subAggCollectorParam ) throws IOException { int size = Integer.MAX_VALUE; - BiConsumer incrementFunc = (activeIndex, docCount) -> { - long bucketOrd = bucketOrdProducer().apply(activeIndex); - incrementDocCount.accept(bucketOrd, (long) docCount); - }; - return multiRangesTraverse(values.getPointTree(), ranges, incrementFunc, size); + Function getBucketOrd = (activeIndex) -> bucketOrdProducer().apply(activeIndex); + + return getResult(values, incrementDocCount, ranges, getBucketOrd, size, subAggCollectorParam); } /** diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/Ranges.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/Ranges.java index 2819778ce215b..44d94ab4a6658 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/Ranges.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/Ranges.java @@ -13,7 +13,7 @@ /** * Internal ranges representation for the filter rewrite optimization */ -final class Ranges { +public final class Ranges { byte[][] lowers; // inclusive byte[][] uppers; // exclusive int size; @@ -29,6 +29,18 @@ final class Ranges { comparator = ArrayUtil.getUnsignedComparator(byteLen); } + public int getSize() { + return size; + } + + public byte[][] getLowers() { + return lowers; + } + + public byte[][] getUppers() { + return uppers; + } + public int firstRangeIndex(byte[] globalMin, byte[] globalMax) { if (compareByteValue(lowers[0], globalMax) > 0) { return -1; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/rangecollector/AbstractRangeCollector.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/rangecollector/AbstractRangeCollector.java new file mode 100644 index 0000000000000..c5f0824e2967c --- /dev/null +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/rangecollector/AbstractRangeCollector.java @@ -0,0 +1,82 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.search.aggregations.bucket.filterrewrite.rangecollector; + +import org.opensearch.search.aggregations.bucket.filterrewrite.FilterRewriteOptimizationContext; +import org.opensearch.search.aggregations.bucket.filterrewrite.Ranges; + +import java.util.function.BiConsumer; + +/** + * Abstract class for range collectors. + */ +public abstract class AbstractRangeCollector implements RangeCollector { + protected final Ranges ranges; + protected int activeIndex; + protected final BiConsumer incrementRangeDocCount; + protected final int maxNumNonZeroRange; + protected final FilterRewriteOptimizationContext.OptimizeResult result; + private int visitedRange = 0; + protected int counter = 0; + + public AbstractRangeCollector( + Ranges ranges, + BiConsumer incrementRangeDocCount, + int maxNumNonZeroRange, + int activeIndex, + FilterRewriteOptimizationContext.OptimizeResult result + ) { + this.ranges = ranges; + this.activeIndex = activeIndex; + this.incrementRangeDocCount = incrementRangeDocCount; + this.maxNumNonZeroRange = maxNumNonZeroRange; + this.result = result; + } + + @Override + public boolean iterateRangeEnd(byte[] value, boolean inLeaf) { + while (!withinUpperBound(value)) { + if (++activeIndex >= ranges.getSize()) { + return true; + } + } + visitedRange++; + return visitedRange > maxNumNonZeroRange; + } + + @Override + public boolean withinLowerBound(byte[] value) { + return Ranges.withinLowerBound(value, ranges.getLowers()[activeIndex]); + } + + @Override + public boolean withinUpperBound(byte[] value) { + return Ranges.withinUpperBound(value, ranges.getUppers()[activeIndex]); + } + + @Override + public boolean withinRange(byte[] value) { + return withinLowerBound(value) && withinUpperBound(value); + } + + @Override + public void visitInner() { + result.visitInner(); + } + + @Override + public void visitLeaf() { + result.visitLeaf(); + } + + @Override + public FilterRewriteOptimizationContext.OptimizeResult getResult() { + return result; + } +} diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/rangecollector/RangeCollector.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/rangecollector/RangeCollector.java new file mode 100644 index 0000000000000..9a0876e0a89e7 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/rangecollector/RangeCollector.java @@ -0,0 +1,88 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.bucket.filterrewrite.rangecollector; + +import org.apache.lucene.search.DocIdSetIterator; +import org.opensearch.search.aggregations.bucket.filterrewrite.FilterRewriteOptimizationContext; + +import java.io.IOException; + +/** + * Interface for collecting documents that fall within specified ranges during point tree traversal. + */ +public interface RangeCollector { + /** + * Whether the collector supports sub aggregation. + */ + boolean hasSubAgg(); + + /** + * Count a node that is fully contained within the current range. + * @param count The number of documents in the node + */ + void countNode(int count); + + /** + * Count a single document. + */ + void count(); + + /** + * Collect a single document ID. + * @param docId The document ID to collect + */ + void collectDocId(int docId); + + /** + * Collect a set of document IDs. + * @param iter Iterator over document IDs + */ + void collectDocIdSet(DocIdSetIterator iter) throws IOException; + + /** + * Finalize the current range and prepare for the next one. + */ + void finalizePreviousRange(); + + /** + * Iterate to find the next range that could include the given value. + * + * @param value The value to check against ranges + * @param inLeaf Whether this is called when processing a leaf node + * @return true if iteration is complete or enough non-zero ranges found + */ + boolean iterateRangeEnd(byte[] value, boolean inLeaf); + + /** + * Check if a value is within the lower bound of current range. + */ + boolean withinLowerBound(byte[] value); + + /** + * Check if a value is within the upper bound of current range. + */ + boolean withinUpperBound(byte[] value); + + /** + * Check if a value is within both bounds of current range. + */ + boolean withinRange(byte[] value); + + /** + * Hook point when visit inner node + */ + void visitInner(); + + /** + * Hook point when visit leaf node + */ + void visitLeaf(); + + FilterRewriteOptimizationContext.OptimizeResult getResult(); +} diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/rangecollector/SimpleRangeCollector.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/rangecollector/SimpleRangeCollector.java new file mode 100644 index 0000000000000..df6d230a5a391 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/rangecollector/SimpleRangeCollector.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.bucket.filterrewrite.rangecollector; + +import org.apache.lucene.search.DocIdSetIterator; +import org.opensearch.search.aggregations.bucket.filterrewrite.FilterRewriteOptimizationContext; +import org.opensearch.search.aggregations.bucket.filterrewrite.Ranges; + +import java.io.IOException; +import java.util.function.BiConsumer; + +/** + * Simple range collector implementation that only counts documents without collecting doc IDs. + */ +public class SimpleRangeCollector extends AbstractRangeCollector { + + public SimpleRangeCollector( + Ranges ranges, + BiConsumer incrementRangeDocCount, + int maxNumNonZeroRange, + int activeIndex, + FilterRewriteOptimizationContext.OptimizeResult result + ) { + super(ranges, incrementRangeDocCount, maxNumNonZeroRange, activeIndex, result); + } + + @Override + public boolean hasSubAgg() { + return false; + } + + @Override + public void countNode(int count) { + counter += count; + } + + @Override + public void count() { + counter++; + } + + @Override + public void collectDocId(int docId) { + throw new UnsupportedOperationException("collectDocId should be unreachable"); + } + + @Override + public void collectDocIdSet(DocIdSetIterator iter) throws IOException { + throw new UnsupportedOperationException("collectDocIdSet should be unreachable"); + } + + @Override + public void finalizePreviousRange() { + if (counter > 0) { + incrementRangeDocCount.accept(activeIndex, counter); + counter = 0; + } + } +} diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/rangecollector/SubAggRangeCollector.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/rangecollector/SubAggRangeCollector.java new file mode 100644 index 0000000000000..5c1f21b22e646 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/rangecollector/SubAggRangeCollector.java @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.bucket.filterrewrite.rangecollector; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.util.BitDocIdSet; +import org.apache.lucene.util.FixedBitSet; +import org.opensearch.search.aggregations.BucketCollector; +import org.opensearch.search.aggregations.LeafBucketCollector; +import org.opensearch.search.aggregations.bucket.filterrewrite.FilterRewriteOptimizationContext; +import org.opensearch.search.aggregations.bucket.filterrewrite.Ranges; + +import java.io.IOException; +import java.util.function.BiConsumer; +import java.util.function.Function; + +import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS; + +/** + * Range collector implementation that supports sub-aggregations by collecting doc IDs. + */ +public class SubAggRangeCollector extends SimpleRangeCollector { + + private static final Logger logger = LogManager.getLogger(SubAggRangeCollector.class); + + private final Function getBucketOrd; + + private final BucketCollector collectableSubAggregators; + private final LeafReaderContext leafCtx; + + private final FixedBitSet bitSet; + private final BitDocIdSet bitDocIdSet; + + public SubAggRangeCollector( + Ranges ranges, + BiConsumer incrementRangeDocCount, + int maxNumNonZeroRange, + int activeIndex, + FilterRewriteOptimizationContext.OptimizeResult result, + Function getBucketOrd, + FilterRewriteOptimizationContext.SubAggCollectorParam subAggCollectorParam + ) { + super(ranges, incrementRangeDocCount, maxNumNonZeroRange, activeIndex, result); + this.getBucketOrd = getBucketOrd; + this.collectableSubAggregators = subAggCollectorParam.collectableSubAggregators(); + this.leafCtx = subAggCollectorParam.leafCtx(); + int numDocs = leafCtx.reader().maxDoc(); + bitSet = new FixedBitSet(numDocs); + bitDocIdSet = new BitDocIdSet(bitSet); + } + + @Override + public boolean hasSubAgg() { + return true; + } + + @Override + public void collectDocId(int docId) { + bitSet.set(docId); + } + + @Override + public void collectDocIdSet(DocIdSetIterator iter) throws IOException { + bitSet.or(iter); + } + + @Override + public void finalizePreviousRange() { + super.finalizePreviousRange(); + + long bucketOrd = getBucketOrd.apply(activeIndex); + logger.trace("finalize range {} with bucket ordinal {}", activeIndex, bucketOrd); + + // trigger the sub agg collection for this range + try { + DocIdSetIterator iterator = bitDocIdSet.iterator(); + // build a new leaf collector for each bucket + LeafBucketCollector sub = collectableSubAggregators.getLeafCollector(leafCtx); + while (iterator.nextDoc() != NO_MORE_DOCS) { + int currentDoc = iterator.docID(); + sub.collect(currentDoc, bucketOrd); + } + logger.trace("collected sub aggregation for bucket {}", bucketOrd); + } catch (IOException e) { + throw new RuntimeException(e); + } + + bitSet.clear(); + } +} diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/rangecollector/package-info.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/rangecollector/package-info.java new file mode 100644 index 0000000000000..1cb630653b0ee --- /dev/null +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/rangecollector/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains range collector interface and implementations for PointTreeTraversal + */ +package org.opensearch.search.aggregations.bucket.filterrewrite.rangecollector; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java index cbeb27e8a3e63..3c9fc2dcb0e43 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java @@ -33,7 +33,6 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; -import org.apache.lucene.search.CollectionTerminatedException; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.util.CollectionUtil; import org.opensearch.common.Rounding; @@ -216,7 +215,7 @@ protected Prepared getRoundingPrepared() { @Override protected Function bucketOrdProducer() { - return (key) -> getBucketOrds().add(0, preparedRounding.round((long) key)); + return (key) -> getBucketOrds().add(0, preparedRounding.round(key)); } }; filterRewriteOptimizationContext = new FilterRewriteOptimizationContext(bridge, parent, subAggregators.length, context); @@ -245,15 +244,22 @@ public final DeferringBucketCollector getDeferringCollector() { protected abstract LeafBucketCollector getLeafCollector(SortedNumericDocValues values, LeafBucketCollector sub) throws IOException; + @Override + protected boolean tryPrecomputeAggregationForLeaf(LeafReaderContext ctx) throws IOException { + return filterRewriteOptimizationContext.tryOptimize( + ctx, + this::incrementBucketDocCount, + segmentMatchAll(context, ctx), + collectableSubAggregators + ); + } + @Override public final LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException { if (valuesSource == null) { return LeafBucketCollector.NO_OP_COLLECTOR; } - boolean optimized = filterRewriteOptimizationContext.tryOptimize(ctx, this::incrementBucketDocCount, segmentMatchAll(context, ctx)); - if (optimized) throw new CollectionTerminatedException(); - final SortedNumericDocValues values = valuesSource.longValues(ctx); final LeafBucketCollector iteratingCollector = getLeafCollector(values, sub); return new LeafBucketCollectorBase(sub, values) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java index 3d935100fd0d1..c3cd4b40cf0d6 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java @@ -194,7 +194,13 @@ protected boolean tryPrecomputeAggregationForLeaf(LeafReaderContext ctx) throws StarTreeQueryHelper.preComputeBucketsWithStarTree(starTreeBucketCollector); return true; } - return filterRewriteOptimizationContext.tryOptimize(ctx, this::incrementBucketDocCount, segmentMatchAll(context, ctx)); + + return filterRewriteOptimizationContext.tryOptimize( + ctx, + this::incrementBucketDocCount, + segmentMatchAll(context, ctx), + collectableSubAggregators + ); } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/RangeAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/RangeAggregator.java index 28f47298935df..02186a6a99079 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/RangeAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/RangeAggregator.java @@ -328,20 +328,18 @@ public ScoreMode scoreMode() { @Override protected boolean tryPrecomputeAggregationForLeaf(LeafReaderContext ctx) throws IOException { - if (segmentMatchAll(context, ctx) && filterRewriteOptimizationContext.tryOptimize(ctx, this::incrementBucketDocCount, false)) { - return true; - } CompositeIndexFieldInfo supportedStarTree = getSupportedStarTree(this.context.getQueryShardContext()); if (supportedStarTree != null) { preComputeWithStarTree(ctx, supportedStarTree); return true; } - return false; + + return segmentMatchAll(context, ctx) + && filterRewriteOptimizationContext.tryOptimize(ctx, this::incrementBucketDocCount, true, collectableSubAggregators); } @Override public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException { - final SortedNumericDoubleValues values = valuesSource.doubleValues(ctx); return new LeafBucketCollectorBase(sub, values) { @Override diff --git a/server/src/main/java/org/opensearch/search/internal/SearchContext.java b/server/src/main/java/org/opensearch/search/internal/SearchContext.java index b539981da4ebd..58e2525cb28c7 100644 --- a/server/src/main/java/org/opensearch/search/internal/SearchContext.java +++ b/server/src/main/java/org/opensearch/search/internal/SearchContext.java @@ -38,6 +38,7 @@ import org.opensearch.action.search.SearchShardTask; import org.opensearch.action.search.SearchType; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; @@ -522,6 +523,11 @@ public int maxAggRewriteFilters() { return 0; } + @ExperimentalApi + public int filterRewriteSegmentThreshold() { + return 0; + } + public int cardinalityAggregationPruningThreshold() { return 0; } diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/filterrewrite/FilterRewriteSubAggTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/filterrewrite/FilterRewriteSubAggTests.java new file mode 100644 index 0000000000000..1187eeac34d84 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/filterrewrite/FilterRewriteSubAggTests.java @@ -0,0 +1,452 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.bucket.filterrewrite; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.LongField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.apache.lucene.tests.util.TestUtil; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.index.mapper.DateFieldMapper; +import org.opensearch.index.mapper.NumberFieldMapper; +import org.opensearch.index.mapper.ParseContext; +import org.opensearch.search.aggregations.AggregationBuilder; +import org.opensearch.search.aggregations.AggregationBuilders; +import org.opensearch.search.aggregations.Aggregator; +import org.opensearch.search.aggregations.AggregatorTestCase; +import org.opensearch.search.aggregations.InternalAggregation; +import org.opensearch.search.aggregations.MultiBucketConsumerService; +import org.opensearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder; +import org.opensearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; +import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.opensearch.search.aggregations.bucket.histogram.Histogram; +import org.opensearch.search.aggregations.bucket.histogram.InternalAutoDateHistogram; +import org.opensearch.search.aggregations.bucket.histogram.InternalDateHistogram; +import org.opensearch.search.aggregations.bucket.range.InternalRange; +import org.opensearch.search.aggregations.bucket.range.RangeAggregationBuilder; +import org.opensearch.search.aggregations.metrics.InternalStats; +import org.opensearch.search.aggregations.pipeline.PipelineAggregator; +import org.opensearch.search.internal.SearchContext; + +import java.io.IOException; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.opensearch.test.InternalAggregationTestCase.DEFAULT_MAX_BUCKETS; + +public class FilterRewriteSubAggTests extends AggregatorTestCase { + private final String longFieldName = "metric"; + private final String dateFieldName = "timestamp"; + private final Query matchAllQuery = new MatchAllDocsQuery(); + private final NumberFieldMapper.NumberFieldType longFieldType = new NumberFieldMapper.NumberFieldType( + longFieldName, + NumberFieldMapper.NumberType.LONG + ); + private final DateFieldMapper.DateFieldType dateFieldType = aggregableDateFieldType(false, true); + private final NumberFieldMapper.NumberType numberType = longFieldType.numberType(); + private final String rangeAggName = "range"; + private final String autoDateAggName = "auto"; + private final String dateAggName = "date"; + private final String statsAggName = "stats"; + private final List DEFAULT_DATA = List.of( + new TestDoc(0, Instant.parse("2020-03-01T00:00:00Z")), + new TestDoc(1, Instant.parse("2020-03-01T00:00:00Z")), + new TestDoc(1, Instant.parse("2020-03-01T00:00:01Z")), + new TestDoc(2, Instant.parse("2020-03-01T01:00:00Z")), + new TestDoc(3, Instant.parse("2020-03-01T02:00:00Z")), + new TestDoc(4, Instant.parse("2020-03-01T03:00:00Z")), + new TestDoc(5, Instant.parse("2020-03-01T04:00:00Z")), + new TestDoc(6, Instant.parse("2020-03-01T04:00:00Z")) + ); + + public void testRange() throws IOException { + RangeAggregationBuilder rangeAggregationBuilder = new RangeAggregationBuilder(rangeAggName).field(longFieldName) + .addRange(1, 2) + .addRange(2, 4) + .addRange(4, 6) + .subAggregation(new AutoDateHistogramAggregationBuilder(autoDateAggName).field(dateFieldName).setNumBuckets(3)); + + InternalRange result = executeAggregation(DEFAULT_DATA, rangeAggregationBuilder, true); + + // Verify results + List buckets = result.getBuckets(); + assertEquals(3, buckets.size()); + + InternalRange.Bucket firstBucket = buckets.get(0); + assertEquals(2, firstBucket.getDocCount()); + InternalAutoDateHistogram firstAuto = firstBucket.getAggregations().get(autoDateAggName); + assertEquals(2, firstAuto.getBuckets().size()); + + InternalRange.Bucket secondBucket = buckets.get(1); + assertEquals(2, secondBucket.getDocCount()); + InternalAutoDateHistogram secondAuto = secondBucket.getAggregations().get(autoDateAggName); + assertEquals(3, secondAuto.getBuckets().size()); + + InternalRange.Bucket thirdBucket = buckets.get(2); + assertEquals(2, thirdBucket.getDocCount()); + InternalAutoDateHistogram thirdAuto = thirdBucket.getAggregations().get(autoDateAggName); + assertEquals(3, thirdAuto.getBuckets().size()); + } + + public void testDateHisto() throws IOException { + DateHistogramAggregationBuilder dateHistogramAggregationBuilder = new DateHistogramAggregationBuilder(dateAggName).field( + dateFieldName + ).calendarInterval(DateHistogramInterval.HOUR).subAggregation(AggregationBuilders.stats(statsAggName).field(longFieldName)); + + InternalDateHistogram result = executeAggregation(DEFAULT_DATA, dateHistogramAggregationBuilder, true); + + // Verify results + List buckets = result.getBuckets(); + assertEquals(5, buckets.size()); + + InternalDateHistogram.Bucket firstBucket = buckets.get(0); + assertEquals("2020-03-01T00:00:00.000Z", firstBucket.getKeyAsString()); + assertEquals(3, firstBucket.getDocCount()); + InternalStats firstStats = firstBucket.getAggregations().get(statsAggName); + assertEquals(3, firstStats.getCount()); + assertEquals(1, firstStats.getMax(), 0); + assertEquals(0, firstStats.getMin(), 0); + + InternalDateHistogram.Bucket secondBucket = buckets.get(1); + assertEquals("2020-03-01T01:00:00.000Z", secondBucket.getKeyAsString()); + assertEquals(1, secondBucket.getDocCount()); + InternalStats secondStats = secondBucket.getAggregations().get(statsAggName); + assertEquals(1, secondStats.getCount()); + assertEquals(2, secondStats.getMax(), 0); + assertEquals(2, secondStats.getMin(), 0); + + InternalDateHistogram.Bucket thirdBucket = buckets.get(2); + assertEquals("2020-03-01T02:00:00.000Z", thirdBucket.getKeyAsString()); + assertEquals(1, thirdBucket.getDocCount()); + InternalStats thirdStats = thirdBucket.getAggregations().get(statsAggName); + assertEquals(1, thirdStats.getCount()); + assertEquals(3, thirdStats.getMax(), 0); + assertEquals(3, thirdStats.getMin(), 0); + + InternalDateHistogram.Bucket fourthBucket = buckets.get(3); + assertEquals("2020-03-01T03:00:00.000Z", fourthBucket.getKeyAsString()); + assertEquals(1, fourthBucket.getDocCount()); + InternalStats fourthStats = fourthBucket.getAggregations().get(statsAggName); + assertEquals(1, fourthStats.getCount()); + assertEquals(4, fourthStats.getMax(), 0); + assertEquals(4, fourthStats.getMin(), 0); + + InternalDateHistogram.Bucket fifthBucket = buckets.get(4); + assertEquals("2020-03-01T04:00:00.000Z", fifthBucket.getKeyAsString()); + assertEquals(2, fifthBucket.getDocCount()); + InternalStats fifthStats = fifthBucket.getAggregations().get(statsAggName); + assertEquals(2, fifthStats.getCount()); + assertEquals(6, fifthStats.getMax(), 0); + assertEquals(5, fifthStats.getMin(), 0); + } + + public void testAutoDateHisto() throws IOException { + AutoDateHistogramAggregationBuilder autoDateHistogramAggregationBuilder = new AutoDateHistogramAggregationBuilder(dateAggName) + .field(dateFieldName) + .setNumBuckets(5) + .subAggregation(AggregationBuilders.stats(statsAggName).field(longFieldName)); + + InternalAutoDateHistogram result = executeAggregation(DEFAULT_DATA, autoDateHistogramAggregationBuilder, true); + + // Verify results + List buckets = result.getBuckets(); + assertEquals(5, buckets.size()); + + Histogram.Bucket firstBucket = buckets.get(0); + assertEquals("2020-03-01T00:00:00.000Z", firstBucket.getKeyAsString()); + assertEquals(3, firstBucket.getDocCount()); + InternalStats firstStats = firstBucket.getAggregations().get(statsAggName); + assertEquals(3, firstStats.getCount()); + assertEquals(1, firstStats.getMax(), 0); + assertEquals(0, firstStats.getMin(), 0); + + Histogram.Bucket secondBucket = buckets.get(1); + assertEquals("2020-03-01T01:00:00.000Z", secondBucket.getKeyAsString()); + assertEquals(1, secondBucket.getDocCount()); + InternalStats secondStats = secondBucket.getAggregations().get(statsAggName); + assertEquals(1, secondStats.getCount()); + assertEquals(2, secondStats.getMax(), 0); + assertEquals(2, secondStats.getMin(), 0); + + Histogram.Bucket thirdBucket = buckets.get(2); + assertEquals("2020-03-01T02:00:00.000Z", thirdBucket.getKeyAsString()); + assertEquals(1, thirdBucket.getDocCount()); + InternalStats thirdStats = thirdBucket.getAggregations().get(statsAggName); + assertEquals(1, thirdStats.getCount()); + assertEquals(3, thirdStats.getMax(), 0); + assertEquals(3, thirdStats.getMin(), 0); + + Histogram.Bucket fourthBucket = buckets.get(3); + assertEquals("2020-03-01T03:00:00.000Z", fourthBucket.getKeyAsString()); + assertEquals(1, fourthBucket.getDocCount()); + InternalStats fourthStats = fourthBucket.getAggregations().get(statsAggName); + assertEquals(1, fourthStats.getCount()); + assertEquals(4, fourthStats.getMax(), 0); + assertEquals(4, fourthStats.getMin(), 0); + + Histogram.Bucket fifthBucket = buckets.get(4); + assertEquals("2020-03-01T04:00:00.000Z", fifthBucket.getKeyAsString()); + assertEquals(2, fifthBucket.getDocCount()); + InternalStats fifthStats = fifthBucket.getAggregations().get(statsAggName); + assertEquals(2, fifthStats.getCount()); + assertEquals(6, fifthStats.getMax(), 0); + assertEquals(5, fifthStats.getMin(), 0); + + } + + public void testRandom() throws IOException { + Map dataset = new HashMap<>(); + dataset.put("2017-02-01T09:02:00.000Z", randomIntBetween(100, 2000)); + dataset.put("2017-02-01T09:59:59.999Z", randomIntBetween(100, 2000)); + dataset.put("2017-02-01T10:00:00.001Z", randomIntBetween(100, 2000)); + dataset.put("2017-02-01T13:06:00.000Z", randomIntBetween(100, 2000)); + dataset.put("2017-02-01T14:04:00.000Z", randomIntBetween(100, 2000)); + dataset.put("2017-02-01T14:05:00.000Z", randomIntBetween(100, 2000)); + dataset.put("2017-02-01T15:59:00.000Z", randomIntBetween(100, 2000)); + dataset.put("2017-02-01T16:06:00.000Z", randomIntBetween(100, 2000)); + dataset.put("2017-02-01T16:48:00.000Z", randomIntBetween(100, 2000)); + dataset.put("2017-02-01T16:59:00.000Z", randomIntBetween(100, 2000)); + + Map subAggToVerify = new HashMap<>(); + List docs = new ArrayList<>(); + for (Map.Entry entry : dataset.entrySet()) { + String date = entry.getKey(); + int docCount = entry.getValue(); + // loop value times and generate TestDoc + if (!subAggToVerify.containsKey(date)) { + subAggToVerify.put(date, new SubAggToVerify()); + } + SubAggToVerify subAgg = subAggToVerify.get(date); + subAgg.count = docCount; + for (int i = 0; i < docCount; i++) { + Instant instant = Instant.parse(date); + int docValue = randomIntBetween(0, 10_000); + subAgg.min = Math.min(subAgg.min, docValue); + subAgg.max = Math.max(subAgg.max, docValue); + docs.add(new TestDoc(docValue, instant)); + } + } + + DateHistogramAggregationBuilder dateHistogramAggregationBuilder = new DateHistogramAggregationBuilder(dateAggName).field( + dateFieldName + ) + .calendarInterval(DateHistogramInterval.HOUR) + .minDocCount(1L) + .subAggregation(AggregationBuilders.stats(statsAggName).field(longFieldName)); + + InternalDateHistogram result = executeAggregation(docs, dateHistogramAggregationBuilder, true); + List buckets = result.getBuckets(); + assertEquals(6, buckets.size()); + for (InternalDateHistogram.Bucket bucket : buckets) { + String date = bucket.getKeyAsString(); + SubAggToVerify subAgg = subAggToVerify.get(date); + if (subAgg == null) continue; + InternalStats stats = bucket.getAggregations().get(statsAggName); + assertEquals(subAgg.count, stats.getCount()); + assertEquals(subAgg.max, stats.getMax(), 0); + assertEquals(subAgg.min, stats.getMin(), 0); + } + } + + public void testLeafTraversal() throws IOException { + Map dataset = new HashMap<>(); + dataset.put("2017-02-01T09:02:00.000Z", 512); + dataset.put("2017-02-01T09:59:59.999Z", 256); + dataset.put("2017-02-01T10:00:00.001Z", 256); + dataset.put("2017-02-01T13:06:00.000Z", 512); + dataset.put("2017-02-01T14:04:00.000Z", 256); + dataset.put("2017-02-01T14:05:00.000Z", 256); + dataset.put("2017-02-01T15:59:00.000Z", 768); + + Map subAggToVerify = new HashMap<>(); + List docs = new ArrayList<>(); + for (Map.Entry entry : dataset.entrySet()) { + String date = entry.getKey(); + int docCount = entry.getValue(); + // loop value times and generate TestDoc + if (!subAggToVerify.containsKey(date)) { + subAggToVerify.put(date, new SubAggToVerify()); + } + SubAggToVerify subAgg = subAggToVerify.get(date); + subAgg.count = docCount; + for (int i = 0; i < docCount; i++) { + Instant instant = Instant.parse(date); + int docValue = randomIntBetween(0, 10_000); + subAgg.min = Math.min(subAgg.min, docValue); + subAgg.max = Math.max(subAgg.max, docValue); + docs.add(new TestDoc(docValue, instant)); + } + } + + DateHistogramAggregationBuilder dateHistogramAggregationBuilder = new DateHistogramAggregationBuilder(dateAggName).field( + dateFieldName + ) + .calendarInterval(DateHistogramInterval.HOUR) + .minDocCount(1L) + .subAggregation(AggregationBuilders.stats(statsAggName).field(longFieldName)); + + InternalDateHistogram result = executeAggregation(docs, dateHistogramAggregationBuilder, false); + List buckets = result.getBuckets(); + assertEquals(5, buckets.size()); + for (InternalDateHistogram.Bucket bucket : buckets) { + String date = bucket.getKeyAsString(); + SubAggToVerify subAgg = subAggToVerify.get(date); + if (subAgg == null) continue; + InternalStats stats = bucket.getAggregations().get(statsAggName); + assertEquals(subAgg.count, stats.getCount()); + assertEquals(subAgg.max, stats.getMax(), 0); + assertEquals(subAgg.min, stats.getMin(), 0); + } + } + + private IA executeAggregation( + List docs, + AggregationBuilder aggregationBuilder, + boolean random + ) throws IOException { + try (Directory directory = setupIndex(docs, random)) { + try (DirectoryReader indexReader = DirectoryReader.open(directory)) { + return executeAggregationOnReader(indexReader, aggregationBuilder); + } + } + } + + private Directory setupIndex(List docs, boolean random) throws IOException { + Directory directory = newDirectory(); + if (!random) { + try (IndexWriter indexWriter = new IndexWriter(directory, new IndexWriterConfig().setCodec(TestUtil.getDefaultCodec()))) { + for (TestDoc doc : docs) { + indexWriter.addDocument(doc.toDocument()); + } + } + } else { + try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { + for (TestDoc doc : docs) { + indexWriter.addDocument(doc.toDocument()); + } + } + } + return directory; + } + + private IA executeAggregationOnReader( + DirectoryReader indexReader, + AggregationBuilder aggregationBuilder + ) throws IOException { + IndexSearcher indexSearcher = new IndexSearcher(indexReader); + + MultiBucketConsumerService.MultiBucketConsumer bucketConsumer = createBucketConsumer(); + SearchContext searchContext = createSearchContext( + indexSearcher, + createIndexSettings(), + matchAllQuery, + bucketConsumer, + longFieldType, + dateFieldType + ); + Aggregator aggregator = createAggregator(aggregationBuilder, searchContext); + CountingAggregator countingAggregator = new CountingAggregator(new AtomicInteger(), aggregator); + + // Execute aggregation + countingAggregator.preCollection(); + indexSearcher.search(matchAllQuery, countingAggregator); + countingAggregator.postCollection(); + + // Reduce results + IA topLevel = (IA) countingAggregator.buildTopLevel(); + MultiBucketConsumerService.MultiBucketConsumer reduceBucketConsumer = createReduceBucketConsumer(); + InternalAggregation.ReduceContext context = createReduceContext(countingAggregator, reduceBucketConsumer); + + IA result = (IA) topLevel.reduce(Collections.singletonList(topLevel), context); + doAssertReducedMultiBucketConsumer(result, reduceBucketConsumer); + + assertEquals("Expect not using collect to do aggregation", 0, countingAggregator.getCollectCount().get()); + + return result; + } + + private MultiBucketConsumerService.MultiBucketConsumer createBucketConsumer() { + return new MultiBucketConsumerService.MultiBucketConsumer( + DEFAULT_MAX_BUCKETS, + new NoneCircuitBreakerService().getBreaker(CircuitBreaker.REQUEST) + ); + } + + private MultiBucketConsumerService.MultiBucketConsumer createReduceBucketConsumer() { + return new MultiBucketConsumerService.MultiBucketConsumer( + Integer.MAX_VALUE, + new NoneCircuitBreakerService().getBreaker(CircuitBreaker.REQUEST) + ); + } + + private InternalAggregation.ReduceContext createReduceContext( + Aggregator aggregator, + MultiBucketConsumerService.MultiBucketConsumer reduceBucketConsumer + ) { + return InternalAggregation.ReduceContext.forFinalReduction( + aggregator.context().bigArrays(), + getMockScriptService(), + reduceBucketConsumer, + PipelineAggregator.PipelineTree.EMPTY + ); + } + + private class TestDoc { + private final long metric; + private final Instant timestamp; + + public TestDoc(long metric, Instant timestamp) { + this.metric = metric; + this.timestamp = timestamp; + } + + public ParseContext.Document toDocument() { + ParseContext.Document doc = new ParseContext.Document(); + + List fieldList = numberType.createFields(longFieldName, metric, true, true, false); + for (Field fld : fieldList) + doc.add(fld); + doc.add(new LongField(dateFieldName, dateFieldType.parse(timestamp.toString()), Field.Store.NO)); + + return doc; + } + } + + private static class SubAggToVerify { + int min; + int max; + int count; + } + + protected final DateFieldMapper.DateFieldType aggregableDateFieldType(boolean useNanosecondResolution, boolean isSearchable) { + return new DateFieldMapper.DateFieldType( + "timestamp", + isSearchable, + false, + true, + DateFieldMapper.getDefaultDateTimeFormatter(), + useNanosecondResolution ? DateFieldMapper.Resolution.NANOSECONDS : DateFieldMapper.Resolution.MILLISECONDS, + null, + Collections.emptyMap() + ); + } +} From 5bbb6991edac8250f49e4612a2e2a4ee19b59cfa Mon Sep 17 00:00:00 2001 From: Sachin Kale Date: Sat, 12 Apr 2025 07:41:40 +0530 Subject: [PATCH 210/550] Fix flaky test AzureBlobStoreRepositoryTests (#17905) * Fix flaky test AzureBlobStoreRepositoryTests Signed-off-by: Sachin Kale * Fix spotless errors Signed-off-by: Sachin Kale --------- Signed-off-by: Sachin Kale --- .../repositories/azure/AzureBlobStoreRepositoryTests.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureBlobStoreRepositoryTests.java b/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureBlobStoreRepositoryTests.java index 1ba16422c9214..a8d9efe1aa72a 100644 --- a/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureBlobStoreRepositoryTests.java +++ b/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureBlobStoreRepositoryTests.java @@ -49,6 +49,7 @@ import org.opensearch.core.rest.RestStatus; import org.opensearch.plugins.Plugin; import org.opensearch.repositories.blobstore.OpenSearchMockAPIBasedRepositoryIntegTestCase; +import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.AfterClass; import java.io.IOException; @@ -63,6 +64,7 @@ import reactor.core.scheduler.Schedulers; @SuppressForbidden(reason = "this test uses a HttpServer to emulate an Azure endpoint") +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) public class AzureBlobStoreRepositoryTests extends OpenSearchMockAPIBasedRepositoryIntegTestCase { @AfterClass public static void shutdownSchedulers() { From 4308f4c1132c79cade19cc595722964e0569ecf5 Mon Sep 17 00:00:00 2001 From: Asim M Date: Fri, 11 Apr 2025 23:38:50 -0700 Subject: [PATCH 211/550] Disable scoring of Keyword Term search by default, allow user use old logic using useSimilarity parameter (#17889) * Initial attempt to add constant scorer for term keyword search * will follow up with unit tests * making sure I'm on the right track Signed-off-by: Asim Mahmood * Fix unit tests * will follow up with manually testing the new mapping parameter Signed-off-by: Asim Mahmood * Add changelog entry Signed-off-by: Asim Mahmood * Fix unit test Signed-off-by: Asim Mahmood * Fix changelog entry Signed-off-by: Asim Mahmood * Apply style check Signed-off-by: Asim Mahmood * Fix unit tests * since term is a basic query type, it affects many that build on top * none of these should cause a regression, infact this change should fix any possible regression these query types, which may not be copatured in big5 datasets Signed-off-by: Asim Mahmood * Fix more unit tests Signed-off-by: Asim Mahmood * Fix flakly test * due to the random nature of org.opensearch.index.query.SimpleQueryStringBuilderTests.testToQuery, this can sometimes fail with `but: was ` Signed-off-by: Asim Mahmood # Please enter the commit message for your changes. Lines starting * Fix style check Signed-off-by: Asim Mahmood * Change parameter to use snake_case: use_similarity Signed-off-by: Asim Mahmood * Fix integ tests * also check all places where we do `X instanceof BoostQuery` to also check for `ConstantScoreQuery` Signed-off-by: Asim Mahmood --------- Signed-off-by: Asim Mahmood Signed-off-by: Asim Mahmood # Please enter the commit message for your changes. Lines starting Signed-off-by: Asim M --- CHANGELOG.md | 1 + .../percolator/QueryBuilderStoreTests.java | 12 +++-- .../index/mapper/KeywordFieldMapper.java | 27 +++++++++- .../index/mapper/MappedFieldType.java | 3 ++ .../mapper/FlatObjectFieldTypeTests.java | 18 +++++-- .../index/mapper/KeywordFieldTypeTests.java | 21 ++++++-- .../query/MultiMatchQueryBuilderTests.java | 32 +++++++---- .../query/QueryStringQueryBuilderTests.java | 54 ++++++++++++------- .../query/SimpleQueryStringBuilderTests.java | 30 ++++++++--- .../FunctionScoreQueryBuilderTests.java | 4 +- .../index/search/MultiMatchQueryTests.java | 19 ++++--- .../index/search/NestedHelperTests.java | 17 +++--- .../search/sort/FieldSortBuilderTests.java | 5 +- 13 files changed, 178 insertions(+), 65 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7a0f6ab80da3b..f98144dc068e0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,6 +39,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add `ApproximateMatchAllQuery` that targets match_all queries and approximates sorts ([#17772](https://github.com/opensearch-project/OpenSearch/pull/17772)) - Add TermsQuery support to Search GRPC endpoint ([#17888](https://github.com/opensearch-project/OpenSearch/pull/17888)) - Support sub agg in filter rewrite optimization ([#17447](https://github.com/opensearch-project/OpenSearch/pull/17447) +- Disable scoring of keyword term search by default, fallback logic with new use_similarity:true parameter ([#17889](https://github.com/opensearch-project/OpenSearch/pull/17889)) ### Changed - Migrate BC libs to their FIPS counterparts ([#14912](https://github.com/opensearch-project/OpenSearch/pull/14912)) diff --git a/modules/percolator/src/test/java/org/opensearch/percolator/QueryBuilderStoreTests.java b/modules/percolator/src/test/java/org/opensearch/percolator/QueryBuilderStoreTests.java index 340d359f85523..9a708a823507f 100644 --- a/modules/percolator/src/test/java/org/opensearch/percolator/QueryBuilderStoreTests.java +++ b/modules/percolator/src/test/java/org/opensearch/percolator/QueryBuilderStoreTests.java @@ -38,6 +38,8 @@ import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; @@ -121,9 +123,13 @@ public void testStoringQueryBuilders() throws IOException { CheckedFunction queries = queryStore.getQueries(leafContext); assertEquals(queryBuilders.length, leafContext.reader().numDocs()); for (int i = 0; i < queryBuilders.length; i++) { - TermQuery query = (TermQuery) queries.apply(i); - assertEquals(queryBuilders[i].fieldName(), query.getTerm().field()); - assertEquals(queryBuilders[i].value(), query.getTerm().text()); + Query query = queries.apply(i); + if (query instanceof ConstantScoreQuery constantScoreQuery) { + query = constantScoreQuery.getQuery(); + } + Term term = ((TermQuery) query).getTerm(); + assertEquals(queryBuilders[i].fieldName(), term.field()); + assertEquals(queryBuilders[i].value(), term.text()); } } } diff --git a/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java index b4d205f38a7b9..2f1160f6a2a6c 100644 --- a/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java @@ -40,6 +40,7 @@ import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.Term; import org.apache.lucene.search.BoostQuery; +import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.MultiTermQuery; @@ -158,6 +159,7 @@ public static class Builder extends ParametrizedFieldMapper.Builder { ); private final Parameter hasNorms = TextParams.norms(false, m -> toType(m).fieldType.omitNorms() == false); private final Parameter similarity = TextParams.similarity(m -> toType(m).similarity); + private final Parameter useSimilarity = Parameter.boolParam("use_similarity", true, m -> toType(m).useSimilarity, false); private final Parameter normalizer = Parameter.stringParam("normalizer", false, m -> toType(m).normalizerName, "default"); @@ -214,6 +216,7 @@ protected List> getParameters() { indexOptions, hasNorms, similarity, + useSimilarity, normalizer, splitQueriesOnWhitespace, boost, @@ -275,6 +278,7 @@ public static class KeywordFieldType extends StringFieldType { private final int ignoreAbove; private final String nullValue; + private final boolean useSimilarity; public KeywordFieldType(String name, FieldType fieldType, NamedAnalyzer normalizer, NamedAnalyzer searchAnalyzer, Builder builder) { super( @@ -290,13 +294,19 @@ public KeywordFieldType(String name, FieldType fieldType, NamedAnalyzer normaliz setBoost(builder.boost.getValue()); this.ignoreAbove = builder.ignoreAbove.getValue(); this.nullValue = builder.nullValue.getValue(); + this.useSimilarity = builder.useSimilarity.getValue(); } public KeywordFieldType(String name, boolean isSearchable, boolean hasDocValues, Map meta) { + this(name, isSearchable, hasDocValues, false, meta); + } + + public KeywordFieldType(String name, boolean isSearchable, boolean hasDocValues, boolean useSimilarity, Map meta) { super(name, isSearchable, false, hasDocValues, TextSearchInfo.SIMPLE_MATCH_ONLY, meta); setIndexAnalyzer(Lucene.KEYWORD_ANALYZER); this.ignoreAbove = Integer.MAX_VALUE; this.nullValue = null; + this.useSimilarity = useSimilarity; } public KeywordFieldType(String name) { @@ -314,12 +324,14 @@ public KeywordFieldType(String name, FieldType fieldType) { ); this.ignoreAbove = Integer.MAX_VALUE; this.nullValue = null; + this.useSimilarity = false; } public KeywordFieldType(String name, NamedAnalyzer analyzer) { super(name, true, false, true, new TextSearchInfo(Defaults.FIELD_TYPE, null, analyzer, analyzer), Collections.emptyMap()); this.ignoreAbove = Integer.MAX_VALUE; this.nullValue = null; + this.useSimilarity = false; } @Override @@ -423,7 +435,14 @@ public Query termQueryCaseInsensitive(Object value, QueryShardContext context) { public Query termQuery(Object value, QueryShardContext context) { failIfNotIndexedAndNoDocValues(); if (isSearchable()) { - return super.termQuery(value, context); + Query query = super.termQuery(value, context); + if (!this.useSimilarity) { + query = new ConstantScoreQuery(super.termQuery(value, context)); + } + if (boost() != 1f) { + query = new BoostQuery(query, boost()); + } + return query; } else { Query query = SortedSetDocValuesField.newSlowRangeQuery( name(), @@ -703,6 +722,7 @@ public Query wildcardQuery( private final String indexOptions; private final FieldType fieldType; private final SimilarityProvider similarity; + private final boolean useSimilarity; private final String normalizerName; private final boolean splitQueriesOnWhitespace; @@ -726,6 +746,7 @@ protected KeywordFieldMapper( this.indexOptions = builder.indexOptions.getValue(); this.fieldType = fieldType; this.similarity = builder.similarity.getValue(); + this.useSimilarity = builder.useSimilarity.getValue(); this.normalizerName = builder.normalizer.getValue(); this.splitQueriesOnWhitespace = builder.splitQueriesOnWhitespace.getValue(); @@ -740,6 +761,10 @@ public int ignoreAbove() { return ignoreAbove; } + boolean useSimilarity() { + return useSimilarity; + } + @Override protected KeywordFieldMapper clone() { return (KeywordFieldMapper) super.clone(); diff --git a/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java b/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java index c6c89d8f981cb..a3ea6b5764913 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java +++ b/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java @@ -497,6 +497,9 @@ public static Term extractTerm(Query termQuery) throws IOException { return new Term(tisQuery.getField(), term); } } + if (termQuery instanceof ConstantScoreQuery) { + termQuery = ((ConstantScoreQuery) termQuery).getQuery(); + } if (termQuery instanceof TermQuery == false) { throw new IllegalArgumentException("Cannot extract a term from a query of type " + termQuery.getClass() + ": " + termQuery); } diff --git a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldTypeTests.java index f3ed5d8786521..de2b743ba42b4 100644 --- a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldTypeTests.java @@ -13,6 +13,7 @@ import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.Term; import org.apache.lucene.search.AutomatonQuery; +import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.FieldExistsQuery; import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.IndexOrDocValuesQuery; @@ -182,7 +183,9 @@ public void testTermQueryCaseInsensitive() { true, false ); - Query expected = new TermQuery(new Term("field" + VALUE_AND_PATH_SUFFIX, new BytesRef("field.field1=fOo"))); + Query expected = new ConstantScoreQuery( + new TermQuery(new Term("field" + VALUE_AND_PATH_SUFFIX, new BytesRef("field.field1=fOo"))) + ); assertEquals(expected, ft.termQuery("fOo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); } @@ -275,7 +278,10 @@ public void testTermQuery() { String searchFieldName = flatParentFieldType.getSearchField(); String searchValues = flatParentFieldType.rewriteSearchValue("foo"); assertEquals("foo", searchValues); - assertEquals(new TermQuery(new Term(searchFieldName, searchValues)), flatParentFieldType.termQuery(searchValues, null)); + assertEquals( + new ConstantScoreQuery(new TermQuery(new Term(searchFieldName, searchValues))), + flatParentFieldType.termQuery(searchValues, null) + ); FlatObjectFieldType dynamicMappedFieldType = new FlatObjectFieldMapper.FlatObjectFieldType( "field.bar", @@ -289,7 +295,7 @@ public void testTermQuery() { String searchValuesDocPath = dynamicMappedFieldType.rewriteSearchValue("foo"); assertEquals("field.bar=foo", searchValuesDocPath); assertEquals( - new TermQuery(new Term(searchFieldNameDocPath, searchValuesDocPath)), + new ConstantScoreQuery(new TermQuery(new Term(searchFieldNameDocPath, searchValuesDocPath))), dynamicMappedFieldType.termQuery("foo", null) ); } @@ -302,7 +308,7 @@ public void testTermQuery() { true, false ); - Query expected = new TermQuery(new Term("field" + VALUE_SUFFIX, new BytesRef("foo"))); + Query expected = new ConstantScoreQuery(new TermQuery(new Term("field" + VALUE_SUFFIX, new BytesRef("foo")))); assertEquals(expected, ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); } @@ -314,7 +320,9 @@ public void testTermQuery() { true, false ); - Query expected = new TermQuery(new Term("field" + VALUE_AND_PATH_SUFFIX, new BytesRef("field.field1=foo"))); + Query expected = new ConstantScoreQuery( + new TermQuery(new Term("field" + VALUE_AND_PATH_SUFFIX, new BytesRef("field.field1=foo"))) + ); assertEquals(expected, ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); } diff --git a/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java index 02fd0d825c26f..f886ba3a750b0 100644 --- a/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java @@ -43,6 +43,7 @@ import org.apache.lucene.document.FieldType; import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.Term; +import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.FieldExistsQuery; import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.IndexOrDocValuesQuery; @@ -129,10 +130,16 @@ public void testTermQueryCaseInsensitive() { public void testTermQuery() { MappedFieldType ft = new KeywordFieldType("field"); - assertEquals(new TermQuery(new Term("field", "foo")), ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + assertEquals( + new ConstantScoreQuery(new TermQuery(new Term("field", "foo"))), + ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES) + ); ft = new KeywordFieldType("field", true, false, Collections.emptyMap()); - assertEquals(new TermQuery(new Term("field", "foo")), ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + assertEquals( + new ConstantScoreQuery(new TermQuery(new Term("field", "foo"))), + ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES) + ); ft = new KeywordFieldType("field", false, true, Collections.emptyMap()); Query expected = SortedSetDocValuesField.newSlowRangeQuery("field", new BytesRef("foo"), new BytesRef("foo"), true, true); @@ -147,6 +154,10 @@ public void testTermQuery() { "Cannot search on field [field] since it is both not indexed, and does not have doc_values " + "enabled.", e.getMessage() ); + // backwards compatible enaled with useSimilarity=true + ft = new KeywordFieldType("field", true, false, true, Collections.emptyMap()); + assertEquals(new TermQuery(new Term("field", "foo")), ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } public void testTermQueryWithNormalizer() { @@ -164,7 +175,7 @@ protected TokenStream normalize(String fieldName, TokenStream in) { } }; MappedFieldType ft = new KeywordFieldType("field", new NamedAnalyzer("my_normalizer", AnalyzerScope.INDEX, normalizer)); - assertEquals(new TermQuery(new Term("field", "foo bar")), ft.termQuery("fOo BaR", null)); + assertEquals(new ConstantScoreQuery(new TermQuery(new Term("field", "foo bar"))), ft.termQuery("fOo BaR", null)); } public void testTermsQuery() { @@ -413,9 +424,9 @@ public void testWildCardQuery() { public void testNormalizeQueries() { MappedFieldType ft = new KeywordFieldType("field"); - assertEquals(new TermQuery(new Term("field", new BytesRef("FOO"))), ft.termQuery("FOO", null)); + assertEquals(new ConstantScoreQuery(new TermQuery(new Term("field", new BytesRef("FOO")))), ft.termQuery("FOO", null)); ft = new KeywordFieldType("field", Lucene.STANDARD_ANALYZER); - assertEquals(new TermQuery(new Term("field", new BytesRef("foo"))), ft.termQuery("FOO", null)); + assertEquals(new ConstantScoreQuery(new TermQuery(new Term("field", new BytesRef("foo")))), ft.termQuery("FOO", null)); } public void testFetchSourceValue() throws IOException { diff --git a/server/src/test/java/org/opensearch/index/query/MultiMatchQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/MultiMatchQueryBuilderTests.java index 5dff6e96e0de0..bcf4fd0a6cb86 100644 --- a/server/src/test/java/org/opensearch/index/query/MultiMatchQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/MultiMatchQueryBuilderTests.java @@ -36,6 +36,7 @@ import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; +import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.IndexOrDocValuesQuery; @@ -242,7 +243,10 @@ public void testToQueryMultipleFieldsDisableDismax() throws Exception { .tieBreaker(1.0f) .toQuery(createShardContext()); Query expected = new DisjunctionMaxQuery( - List.of(new TermQuery(new Term(TEXT_FIELD_NAME, "test")), new TermQuery(new Term(KEYWORD_FIELD_NAME, "test"))), + List.of( + new TermQuery(new Term(TEXT_FIELD_NAME, "test")), + new ConstantScoreQuery(new TermQuery(new Term(KEYWORD_FIELD_NAME, "test"))) + ), 1 ); assertEquals(expected, query); @@ -251,7 +255,10 @@ public void testToQueryMultipleFieldsDisableDismax() throws Exception { public void testToQueryMultipleFieldsDisMaxQuery() throws Exception { Query query = multiMatchQuery("test").field(TEXT_FIELD_NAME).field(KEYWORD_FIELD_NAME).toQuery(createShardContext()); Query expected = new DisjunctionMaxQuery( - List.of(new TermQuery(new Term(TEXT_FIELD_NAME, "test")), new TermQuery(new Term(KEYWORD_FIELD_NAME, "test"))), + List.of( + new TermQuery(new Term(TEXT_FIELD_NAME, "test")), + new ConstantScoreQuery(new TermQuery(new Term(KEYWORD_FIELD_NAME, "test"))) + ), 0 ); assertEquals(expected, query); @@ -260,7 +267,10 @@ public void testToQueryMultipleFieldsDisMaxQuery() throws Exception { public void testToQueryFieldsWildcard() throws Exception { Query query = multiMatchQuery("test").field("mapped_str*").tieBreaker(1.0f).toQuery(createShardContext()); Query expected = new DisjunctionMaxQuery( - List.of(new TermQuery(new Term(TEXT_FIELD_NAME, "test")), new TermQuery(new Term(KEYWORD_FIELD_NAME, "test"))), + List.of( + new TermQuery(new Term(TEXT_FIELD_NAME, "test")), + new ConstantScoreQuery(new TermQuery(new Term(KEYWORD_FIELD_NAME, "test"))) + ), 1 ); assertEquals(expected, query); @@ -459,7 +469,7 @@ public void testDefaultField() throws Exception { DisjunctionMaxQuery expected = new DisjunctionMaxQuery( Arrays.asList( new TermQuery(new Term(TEXT_FIELD_NAME, "hello")), - new BoostQuery(new TermQuery(new Term(KEYWORD_FIELD_NAME, "hello")), 5.0f) + new BoostQuery(new ConstantScoreQuery(new TermQuery(new Term(KEYWORD_FIELD_NAME, "hello"))), 5.0f) ), 0.0f ); @@ -487,7 +497,7 @@ public void testDefaultField() throws Exception { Arrays.asList( new MatchNoDocsQuery("failed [mapped_int] query, caused by number_format_exception:[For input string: \"hello\"]"), new TermQuery(new Term(TEXT_FIELD_NAME, "hello")), - new BoostQuery(new TermQuery(new Term(KEYWORD_FIELD_NAME, "hello")), 5.0f) + new BoostQuery(new ConstantScoreQuery(new TermQuery(new Term(KEYWORD_FIELD_NAME, "hello"))), 5.0f) ), 0.0f ); @@ -531,9 +541,10 @@ public void testWithStopWords() throws Exception { new BooleanQuery.Builder().add(new TermQuery(new Term(TEXT_FIELD_NAME, "quick")), BooleanClause.Occur.SHOULD) .add(new TermQuery(new Term(TEXT_FIELD_NAME, "fox")), BooleanClause.Occur.SHOULD) .build(), - new BooleanQuery.Builder().add(new TermQuery(new Term(KEYWORD_FIELD_NAME, "quick")), BooleanClause.Occur.SHOULD) - .add(new TermQuery(new Term(KEYWORD_FIELD_NAME, "fox")), BooleanClause.Occur.SHOULD) - .build() + new BooleanQuery.Builder().add( + new ConstantScoreQuery(new TermQuery(new Term(KEYWORD_FIELD_NAME, "quick"))), + BooleanClause.Occur.SHOULD + ).add(new ConstantScoreQuery(new TermQuery(new Term(KEYWORD_FIELD_NAME, "fox"))), BooleanClause.Occur.SHOULD).build() ), 0f ); @@ -588,7 +599,10 @@ private void assertQueryWithAllFieldsWildcard(Query query) { assertEquals(9, noMatchNoDocsQueries); assertThat( disjunctionMaxQuery.getDisjuncts(), - hasItems(new TermQuery(new Term(TEXT_FIELD_NAME, "hello")), new TermQuery(new Term(KEYWORD_FIELD_NAME, "hello"))) + hasItems( + new TermQuery(new Term(TEXT_FIELD_NAME, "hello")), + new ConstantScoreQuery(new TermQuery(new Term(KEYWORD_FIELD_NAME, "hello"))) + ) ); } diff --git a/server/src/test/java/org/opensearch/index/query/QueryStringQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/QueryStringQueryBuilderTests.java index 4458271124693..f0fde1a7dfedb 100644 --- a/server/src/test/java/org/opensearch/index/query/QueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/QueryStringQueryBuilderTests.java @@ -505,7 +505,10 @@ public void testToQueryMultipleTermsBooleanQuery() throws Exception { public void testToQueryMultipleFieldsBooleanQuery() throws Exception { Query query = queryStringQuery("test").field(TEXT_FIELD_NAME).field(KEYWORD_FIELD_NAME).toQuery(createShardContext()); Query expected = new DisjunctionMaxQuery( - List.of(new TermQuery(new Term(TEXT_FIELD_NAME, "test")), new TermQuery(new Term(KEYWORD_FIELD_NAME, "test"))), + List.of( + new TermQuery(new Term(TEXT_FIELD_NAME, "test")), + new ConstantScoreQuery(new TermQuery(new Term(KEYWORD_FIELD_NAME, "test"))) + ), 0 ); assertEquals(expected, query); @@ -514,7 +517,10 @@ public void testToQueryMultipleFieldsBooleanQuery() throws Exception { public void testToQueryMultipleFieldsDisMaxQuery() throws Exception { Query query = queryStringQuery("test").field(TEXT_FIELD_NAME).field(KEYWORD_FIELD_NAME).toQuery(createShardContext()); Query expected = new DisjunctionMaxQuery( - List.of(new TermQuery(new Term(TEXT_FIELD_NAME, "test")), new TermQuery(new Term(KEYWORD_FIELD_NAME, "test"))), + List.of( + new TermQuery(new Term(TEXT_FIELD_NAME, "test")), + new ConstantScoreQuery(new TermQuery(new Term(KEYWORD_FIELD_NAME, "test"))) + ), 0 ); assertEquals(expected, query); @@ -523,7 +529,10 @@ public void testToQueryMultipleFieldsDisMaxQuery() throws Exception { public void testToQueryFieldsWildcard() throws Exception { Query query = queryStringQuery("test").field("mapped_str*").toQuery(createShardContext()); Query expected = new DisjunctionMaxQuery( - List.of(new TermQuery(new Term(TEXT_FIELD_NAME, "test")), new TermQuery(new Term(KEYWORD_FIELD_NAME, "test"))), + List.of( + new TermQuery(new Term(TEXT_FIELD_NAME, "test")), + new ConstantScoreQuery(new TermQuery(new Term(KEYWORD_FIELD_NAME, "test"))) + ), 0 ); assertEquals(expected, query); @@ -548,7 +557,7 @@ public void testToQueryDisMaxQuery() throws Exception { Query expected = new DisjunctionMaxQuery( List.of( new BoostQuery(new TermQuery(new Term(TEXT_FIELD_NAME, "test")), 2.2f), - new TermQuery(new Term(KEYWORD_FIELD_NAME, "test")) + new ConstantScoreQuery(new TermQuery(new Term(KEYWORD_FIELD_NAME, "test"))) ), 0 ); @@ -1013,7 +1022,7 @@ public void testToQueryTextParsing() throws IOException { ).add(new BooleanClause(new TermQuery(new Term(TEXT_FIELD_NAME, "bar")), BooleanClause.Occur.SHOULD)).build(); List disjuncts = new ArrayList<>(); disjuncts.add(bq1); - disjuncts.add(new TermQuery(new Term(KEYWORD_FIELD_NAME, "foo bar"))); + disjuncts.add(new ConstantScoreQuery(new TermQuery(new Term(KEYWORD_FIELD_NAME, "foo bar")))); DisjunctionMaxQuery expectedQuery = new DisjunctionMaxQuery(disjuncts, 0.0f); assertThat(query, equalTo(expectedQuery)); } @@ -1027,7 +1036,7 @@ public void testToQueryTextParsing() throws IOException { List disjuncts = new ArrayList<>(); PhraseQuery pq = new PhraseQuery.Builder().add(new Term(TEXT_FIELD_NAME, "foo")).add(new Term(TEXT_FIELD_NAME, "bar")).build(); disjuncts.add(pq); - disjuncts.add(new TermQuery(new Term(KEYWORD_FIELD_NAME, "foo bar"))); + disjuncts.add(new ConstantScoreQuery(new TermQuery(new Term(KEYWORD_FIELD_NAME, "foo bar")))); DisjunctionMaxQuery expectedQuery = new DisjunctionMaxQuery(disjuncts, 0.0f); assertThat(query, equalTo(expectedQuery)); } @@ -1041,7 +1050,7 @@ public void testToQueryTextParsing() throws IOException { ).add(new BooleanClause(new TermQuery(new Term(TEXT_FIELD_NAME, "bar")), BooleanClause.Occur.SHOULD)).build(); List disjuncts = new ArrayList<>(); disjuncts.add(bq1); - disjuncts.add(new TermQuery(new Term(KEYWORD_FIELD_NAME, "foo bar"))); + disjuncts.add(new ConstantScoreQuery(new TermQuery(new Term(KEYWORD_FIELD_NAME, "foo bar")))); DisjunctionMaxQuery disjunctionMaxQuery = new DisjunctionMaxQuery(disjuncts, 0.0f); BooleanQuery expectedQuery = new BooleanQuery.Builder().add(disjunctionMaxQuery, BooleanClause.Occur.SHOULD) .add(new TermQuery(new Term(TEXT_FIELD_NAME, "other")), BooleanClause.Occur.SHOULD) @@ -1056,12 +1065,12 @@ public void testToQueryTextParsing() throws IOException { List disjuncts1 = new ArrayList<>(); disjuncts1.add(new TermQuery(new Term(TEXT_FIELD_NAME, "foo"))); - disjuncts1.add(new TermQuery(new Term(KEYWORD_FIELD_NAME, "foo"))); + disjuncts1.add(new ConstantScoreQuery(new TermQuery(new Term(KEYWORD_FIELD_NAME, "foo")))); DisjunctionMaxQuery maxQuery1 = new DisjunctionMaxQuery(disjuncts1, 0.0f); List disjuncts2 = new ArrayList<>(); disjuncts2.add(new TermQuery(new Term(TEXT_FIELD_NAME, "bar"))); - disjuncts2.add(new TermQuery(new Term(KEYWORD_FIELD_NAME, "bar"))); + disjuncts2.add(new ConstantScoreQuery(new TermQuery(new Term(KEYWORD_FIELD_NAME, "bar")))); DisjunctionMaxQuery maxQuery2 = new DisjunctionMaxQuery(disjuncts2, 0.0f); BooleanQuery expectedQuery = new BooleanQuery.Builder().add(new BooleanClause(maxQuery1, BooleanClause.Occur.SHOULD)) @@ -1302,7 +1311,7 @@ public void testDefaultField() throws Exception { Query expected = new DisjunctionMaxQuery( Arrays.asList( new TermQuery(new Term(TEXT_FIELD_NAME, "hello")), - new BoostQuery(new TermQuery(new Term(KEYWORD_FIELD_NAME, "hello")), 5.0f) + new BoostQuery(new ConstantScoreQuery(new TermQuery(new Term(KEYWORD_FIELD_NAME, "hello"))), 5.0f) ), 0.0f ); @@ -1358,7 +1367,7 @@ public void testQuoteFieldSuffix() throws IOException { new QueryStringQueryBuilder("bar").quoteFieldSuffix("_2").field(TEXT_FIELD_NAME).doToQuery(context) ); assertEquals( - new TermQuery(new Term(KEYWORD_FIELD_NAME, "bar")), + new ConstantScoreQuery(new TermQuery(new Term(KEYWORD_FIELD_NAME, "bar"))), new QueryStringQueryBuilder("\"bar\"").quoteFieldSuffix("_2").field(TEXT_FIELD_NAME).doToQuery(context) ); @@ -1399,8 +1408,8 @@ public void testWithStopWords() throws Exception { new BooleanQuery.Builder().add(new TermQuery(new Term(TEXT_FIELD_NAME, "quick")), Occur.SHOULD) .add(new TermQuery(new Term(TEXT_FIELD_NAME, "fox")), Occur.SHOULD) .build(), - new BooleanQuery.Builder().add(new TermQuery(new Term(KEYWORD_FIELD_NAME, "quick")), Occur.SHOULD) - .add(new TermQuery(new Term(KEYWORD_FIELD_NAME, "fox")), Occur.SHOULD) + new BooleanQuery.Builder().add(new ConstantScoreQuery(new TermQuery(new Term(KEYWORD_FIELD_NAME, "quick"))), Occur.SHOULD) + .add(new ConstantScoreQuery(new TermQuery(new Term(KEYWORD_FIELD_NAME, "fox"))), Occur.SHOULD) .build() ), 0f @@ -1465,13 +1474,19 @@ public void testCrossFields() throws Exception { Query query = new QueryStringQueryBuilder("foo").analyzer("whitespace") .type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) .toQuery(createShardContext()); - Query expected = BlendedTermQuery.dismaxBlendedQuery(blendedTerms, 1.0f); - assertEquals(expected, query); + Query expected = new DisjunctionMaxQuery( + Arrays.asList( + new ConstantScoreQuery(new TermQuery(new Term(KEYWORD_FIELD_NAME, "foo"))), + BlendedTermQuery.dismaxBlendedQuery(new Term[] { blendedTerms[0] }, 1.0f) + ), + 0.0f + ); + assertEquals(expected, query); query = new QueryStringQueryBuilder("foo mapped_string:10").analyzer("whitespace") .type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) .toQuery(createShardContext()); - expected = new BooleanQuery.Builder().add(BlendedTermQuery.dismaxBlendedQuery(blendedTerms, 1.0f), Occur.SHOULD) + expected = new BooleanQuery.Builder().add(expected, Occur.SHOULD) .add(new TermQuery(new Term(TEXT_FIELD_NAME, "10")), Occur.SHOULD) .build(); assertEquals(expected, query); @@ -1542,7 +1557,7 @@ public void testMergeBoosts() throws IOException { .toQuery(createShardContext()); List terms = new ArrayList<>(); terms.add(new BoostQuery(new TermQuery(new Term(TEXT_FIELD_NAME, "first")), 0.075f)); - terms.add(new BoostQuery(new TermQuery(new Term(KEYWORD_FIELD_NAME, "first")), 0.5f)); + terms.add(new BoostQuery(new ConstantScoreQuery(new TermQuery(new Term(KEYWORD_FIELD_NAME, "first"))), 0.5f)); Query expected = new DisjunctionMaxQuery(terms, 1.0f); assertEquals(expected, query); } @@ -1564,7 +1579,10 @@ private void assertQueryWithAllFieldsWildcard(Query query) { assertEquals(9, noMatchNoDocsQueries); assertThat( disjunctionMaxQuery.getDisjuncts(), - hasItems(new TermQuery(new Term(TEXT_FIELD_NAME, "hello")), new TermQuery(new Term(KEYWORD_FIELD_NAME, "hello"))) + hasItems( + new TermQuery(new Term(TEXT_FIELD_NAME, "hello")), + new ConstantScoreQuery(new TermQuery(new Term(KEYWORD_FIELD_NAME, "hello"))) + ) ); } diff --git a/server/src/test/java/org/opensearch/index/query/SimpleQueryStringBuilderTests.java b/server/src/test/java/org/opensearch/index/query/SimpleQueryStringBuilderTests.java index c838be336e844..0edd387ea9c6f 100644 --- a/server/src/test/java/org/opensearch/index/query/SimpleQueryStringBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/SimpleQueryStringBuilderTests.java @@ -41,6 +41,7 @@ import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; +import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.MatchNoDocsQuery; @@ -305,12 +306,17 @@ protected void doAssertLuceneQuery(SimpleQueryStringBuilder queryBuilder, Query for (Query disjunct : maxQuery.getDisjuncts()) { assertThat( disjunct, - either(instanceOf(TermQuery.class)).or(instanceOf(BoostQuery.class)).or(instanceOf(MatchNoDocsQuery.class)) + either(instanceOf(TermQuery.class)).or(instanceOf(BoostQuery.class)) + .or(instanceOf(MatchNoDocsQuery.class)) + .or(instanceOf(ConstantScoreQuery.class)) ); Query termQuery = disjunct; if (disjunct instanceof BoostQuery) { termQuery = ((BoostQuery) disjunct).getQuery(); } + if (termQuery instanceof ConstantScoreQuery) { + termQuery = ((ConstantScoreQuery) termQuery).getQuery(); + } if (termQuery instanceof TermQuery) { TermQuery inner = (TermQuery) termQuery; assertThat(inner.getTerm().bytes().toString(), is(inner.getTerm().bytes().toString().toLowerCase(Locale.ROOT))); @@ -330,6 +336,9 @@ protected void doAssertLuceneQuery(SimpleQueryStringBuilder queryBuilder, Query ); if (query instanceof DisjunctionMaxQuery) { for (Query disjunct : (DisjunctionMaxQuery) query) { + if (disjunct instanceof ConstantScoreQuery constantScoreQuery) { + disjunct = constantScoreQuery.getQuery(); + } assertThat(disjunct, either(instanceOf(TermQuery.class)).or(instanceOf(MatchNoDocsQuery.class))); } } @@ -624,7 +633,7 @@ public void testQuoteFieldSuffix() { createShardContext() ); assertEquals(new TermQuery(new Term(TEXT_FIELD_NAME, "bar")), parser.parse("bar")); - assertEquals(new TermQuery(new Term(KEYWORD_FIELD_NAME, "bar")), parser.parse("\"bar\"")); + assertEquals(new ConstantScoreQuery(new TermQuery(new Term(KEYWORD_FIELD_NAME, "bar"))), parser.parse("\"bar\"")); // Now check what happens if the quote field does not exist settings.quoteFieldSuffix(".quote"); @@ -670,7 +679,7 @@ public void testDefaultField() throws Exception { Query expected = new DisjunctionMaxQuery( Arrays.asList( new TermQuery(new Term(TEXT_FIELD_NAME, "hello")), - new BoostQuery(new TermQuery(new Term(KEYWORD_FIELD_NAME, "hello")), 5.0f) + new BoostQuery(new ConstantScoreQuery(new TermQuery(new Term(KEYWORD_FIELD_NAME, "hello"))), 5.0f) ), 1.0f ); @@ -736,14 +745,20 @@ public void testWithStopWords() throws Exception { .toQuery(createShardContext()); expected = new BooleanQuery.Builder().add( new DisjunctionMaxQuery( - Arrays.asList(new TermQuery(new Term(TEXT_FIELD_NAME, "quick")), new TermQuery(new Term(KEYWORD_FIELD_NAME, "quick"))), + Arrays.asList( + new TermQuery(new Term(TEXT_FIELD_NAME, "quick")), + new ConstantScoreQuery(new TermQuery(new Term(KEYWORD_FIELD_NAME, "quick"))) + ), 1.0f ), BooleanClause.Occur.SHOULD ) .add( new DisjunctionMaxQuery( - Arrays.asList(new TermQuery(new Term(TEXT_FIELD_NAME, "fox")), new TermQuery(new Term(KEYWORD_FIELD_NAME, "fox"))), + Arrays.asList( + new TermQuery(new Term(TEXT_FIELD_NAME, "fox")), + new ConstantScoreQuery(new TermQuery(new Term(KEYWORD_FIELD_NAME, "fox"))) + ), 1.0f ), BooleanClause.Occur.SHOULD @@ -834,7 +849,10 @@ private void assertQueryWithAllFieldsWildcard(Query query) { assertEquals(9, noMatchNoDocsQueries); assertThat( disjunctionMaxQuery.getDisjuncts(), - hasItems(new TermQuery(new Term(TEXT_FIELD_NAME, "hello")), new TermQuery(new Term(KEYWORD_FIELD_NAME, "hello"))) + hasItems( + new TermQuery(new Term(TEXT_FIELD_NAME, "hello")), + new ConstantScoreQuery(new TermQuery(new Term(KEYWORD_FIELD_NAME, "hello"))) + ) ); } diff --git a/server/src/test/java/org/opensearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java index c762fdcde146d..ab1e42385e1f9 100644 --- a/server/src/test/java/org/opensearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java @@ -35,6 +35,7 @@ import com.fasterxml.jackson.core.JsonParseException; import org.apache.lucene.index.Term; +import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; @@ -624,7 +625,8 @@ public void testCustomWeightFactorQueryBuilderWithFunctionScore() throws IOExcep ).toQuery(context); assertThat(parsedQuery, instanceOf(FunctionScoreQuery.class)); FunctionScoreQuery functionScoreQuery = (FunctionScoreQuery) parsedQuery; - assertThat(((TermQuery) functionScoreQuery.getSubQuery()).getTerm(), equalTo(new Term(KEYWORD_FIELD_NAME, "banon"))); + ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) functionScoreQuery.getSubQuery(); + assertThat(((TermQuery) constantScoreQuery.getQuery()).getTerm(), equalTo(new Term(KEYWORD_FIELD_NAME, "banon"))); assertThat((double) (functionScoreQuery.getFunctions()[0]).getWeight(), closeTo(1.3, 0.001)); } diff --git a/server/src/test/java/org/opensearch/index/search/MultiMatchQueryTests.java b/server/src/test/java/org/opensearch/index/search/MultiMatchQueryTests.java index d43e71f0f8f07..9b59582ebb391 100644 --- a/server/src/test/java/org/opensearch/index/search/MultiMatchQueryTests.java +++ b/server/src/test/java/org/opensearch/index/search/MultiMatchQueryTests.java @@ -36,6 +36,7 @@ import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; +import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.PhraseQuery; @@ -321,14 +322,16 @@ public void testKeywordSplitQueriesOnWhitespace() throws IOException { Query query = parser.parse(MultiMatchQueryBuilder.Type.BEST_FIELDS, fieldNames, "Foo Bar", null); DisjunctionMaxQuery expected = new DisjunctionMaxQuery( Arrays.asList( - new TermQuery(new Term("field_normalizer", "foo bar")), - new TermQuery(new Term("field", "Foo Bar")), - new BooleanQuery.Builder().add(new TermQuery(new Term("field_split", "Foo")), BooleanClause.Occur.SHOULD) - .add(new TermQuery(new Term("field_split", "Bar")), BooleanClause.Occur.SHOULD) - .build(), - new BooleanQuery.Builder().add(new TermQuery(new Term("field_split_normalizer", "foo")), BooleanClause.Occur.SHOULD) - .add(new TermQuery(new Term("field_split_normalizer", "bar")), BooleanClause.Occur.SHOULD) - .build() + new ConstantScoreQuery(new TermQuery(new Term("field_normalizer", "foo bar"))), + new ConstantScoreQuery(new TermQuery(new Term("field", "Foo Bar"))), + new BooleanQuery.Builder().add( + new ConstantScoreQuery(new TermQuery(new Term("field_split", "Foo"))), + BooleanClause.Occur.SHOULD + ).add(new ConstantScoreQuery(new TermQuery(new Term("field_split", "Bar"))), BooleanClause.Occur.SHOULD).build(), + new BooleanQuery.Builder().add( + new ConstantScoreQuery(new TermQuery(new Term("field_split_normalizer", "foo"))), + BooleanClause.Occur.SHOULD + ).add(new ConstantScoreQuery(new TermQuery(new Term("field_split_normalizer", "bar"))), BooleanClause.Occur.SHOULD).build() ), 0.0f ); diff --git a/server/src/test/java/org/opensearch/index/search/NestedHelperTests.java b/server/src/test/java/org/opensearch/index/search/NestedHelperTests.java index 173cbf6b540a6..cc77a19755f5d 100644 --- a/server/src/test/java/org/opensearch/index/search/NestedHelperTests.java +++ b/server/src/test/java/org/opensearch/index/search/NestedHelperTests.java @@ -36,6 +36,7 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; @@ -347,7 +348,7 @@ public void testNested() throws IOException { query = (OpenSearchToParentBlockJoinQuery) queryBuilder.toQuery(context); // this time we do not add a filter since the inner query only matches inner docs - expectedChildQuery = new TermQuery(new Term("nested1.foo", "bar")); + expectedChildQuery = new ConstantScoreQuery(new TermQuery(new Term("nested1.foo", "bar"))); assertEquals(expectedChildQuery, query.getChildQuery()); assertFalse(new NestedHelper(mapperService).mightMatchNestedDocs(query)); @@ -360,9 +361,10 @@ public void testNested() throws IOException { query = (OpenSearchToParentBlockJoinQuery) queryBuilder.toQuery(context); // we need to add the filter again because of include_in_parent - expectedChildQuery = new BooleanQuery.Builder().add(new TermQuery(new Term("nested2.foo", "bar")), Occur.MUST) - .add(new TermQuery(new Term(NestedPathFieldMapper.NAME, "nested2")), Occur.FILTER) - .build(); + expectedChildQuery = new BooleanQuery.Builder().add( + new ConstantScoreQuery(new TermQuery(new Term("nested2.foo", "bar"))), + Occur.MUST + ).add(new TermQuery(new Term(NestedPathFieldMapper.NAME, "nested2")), Occur.FILTER).build(); assertEquals(expectedChildQuery, query.getChildQuery()); assertFalse(new NestedHelper(mapperService).mightMatchNestedDocs(query)); @@ -375,9 +377,10 @@ public void testNested() throws IOException { query = (OpenSearchToParentBlockJoinQuery) queryBuilder.toQuery(context); // we need to add the filter again because of include_in_root - expectedChildQuery = new BooleanQuery.Builder().add(new TermQuery(new Term("nested3.foo", "bar")), Occur.MUST) - .add(new TermQuery(new Term(NestedPathFieldMapper.NAME, "nested3")), Occur.FILTER) - .build(); + expectedChildQuery = new BooleanQuery.Builder().add( + new ConstantScoreQuery(new TermQuery(new Term("nested3.foo", "bar"))), + Occur.MUST + ).add(new TermQuery(new Term(NestedPathFieldMapper.NAME, "nested3")), Occur.FILTER).build(); assertEquals(expectedChildQuery, query.getChildQuery()); assertFalse(new NestedHelper(mapperService).mightMatchNestedDocs(query)); diff --git a/server/src/test/java/org/opensearch/search/sort/FieldSortBuilderTests.java b/server/src/test/java/org/opensearch/search/sort/FieldSortBuilderTests.java index ced952db555aa..8ffd4f888a117 100644 --- a/server/src/test/java/org/opensearch/search/sort/FieldSortBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/sort/FieldSortBuilderTests.java @@ -45,6 +45,7 @@ import org.apache.lucene.index.Term; import org.apache.lucene.sandbox.document.BigIntegerPoint; import org.apache.lucene.sandbox.document.HalfFloatPoint; +import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.SortField; import org.apache.lucene.search.SortedNumericSelector; @@ -320,7 +321,7 @@ public void testBuildNested() throws IOException { XFieldComparatorSource comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource(); Nested nested = comparatorSource.nested(); assertNotNull(nested); - assertEquals(new TermQuery(new Term(MAPPED_STRING_FIELDNAME, "value")), nested.getInnerQuery()); + assertEquals(new ConstantScoreQuery(new TermQuery(new Term(MAPPED_STRING_FIELDNAME, "value"))), nested.getInnerQuery()); sortBuilder = new FieldSortBuilder("fieldName").setNestedPath("path"); sortField = sortBuilder.build(shardContextMock).field; @@ -337,7 +338,7 @@ public void testBuildNested() throws IOException { comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource(); nested = comparatorSource.nested(); assertNotNull(nested); - assertEquals(new TermQuery(new Term(MAPPED_STRING_FIELDNAME, "value")), nested.getInnerQuery()); + assertEquals(new ConstantScoreQuery(new TermQuery(new Term(MAPPED_STRING_FIELDNAME, "value"))), nested.getInnerQuery()); // if nested path is missing, we omit any filter and return a SortedNumericSortField sortBuilder = new FieldSortBuilder("fieldName").setNestedFilter(QueryBuilders.termQuery(MAPPED_STRING_FIELDNAME, "value")); From 471acefc36851b639f19ea1ac1aada92fbb04cc9 Mon Sep 17 00:00:00 2001 From: Rajat Gupta <72070007+RajatGupta02@users.noreply.github.com> Date: Sun, 13 Apr 2025 08:40:35 +0530 Subject: [PATCH 212/550] Add agent files in deb,rpm (#17917) Signed-off-by: Rajat Gupta Co-authored-by: Rajat Gupta --- distribution/packages/build.gradle | 3 +++ 1 file changed, 3 insertions(+) diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index a980c4d415045..a02f907f86aef 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -175,6 +175,9 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) { with jdkFiles(project, 'linux', architecture) } } + into('agent') { + with agentFiles() + } // we need to specify every intermediate directory in these paths so the package managers know they are explicitly // intended to manage them; otherwise they may be left behind on uninstallation. duplicate calls of the same // directory are fine From 693c7884e2fae72f29b3d484f001f8e210195357 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Apr 2025 10:38:04 -0400 Subject: [PATCH 213/550] Bump com.google.code.gson:gson from 2.12.1 to 2.13.0 in /plugins/repository-gcs (#17921) * Bump com.google.code.gson:gson in /plugins/repository-gcs Bumps [com.google.code.gson:gson](https://github.com/google/gson) from 2.12.1 to 2.13.0. - [Release notes](https://github.com/google/gson/releases) - [Changelog](https://github.com/google/gson/blob/main/CHANGELOG.md) - [Commits](https://github.com/google/gson/compare/gson-parent-2.12.1...gson-parent-2.13.0) --- updated-dependencies: - dependency-name: com.google.code.gson:gson dependency-version: 2.13.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- CHANGELOG.md | 2 +- plugins/repository-gcs/build.gradle | 2 +- plugins/repository-gcs/licenses/gson-2.12.1.jar.sha1 | 1 - plugins/repository-gcs/licenses/gson-2.13.0.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 3 deletions(-) delete mode 100644 plugins/repository-gcs/licenses/gson-2.12.1.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/gson-2.13.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index f98144dc068e0..d00f8fc73d7b4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -60,7 +60,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.google.api:gax` from 2.35.0 to 2.63.1 ([#17465](https://github.com/opensearch-project/OpenSearch/pull/17465)) - Bump `com.azure:azure-storage-blob` from 12.29.1 to 12.30.0 ([#17667](https://github.com/opensearch-project/OpenSearch/pull/17667)) - Bump `tj-actions/changed-files` from 46.0.1 to 46.0.4 ([#17666](https://github.com/opensearch-project/OpenSearch/pull/17666), [#17813](https://github.com/opensearch-project/OpenSearch/pull/17813)) -- Bump `com.google.code.gson:gson` from 2.11.0 to 2.12.1 ([#17668](https://github.com/opensearch-project/OpenSearch/pull/17668)) +- Bump `com.google.code.gson:gson` from 2.11.0 to 2.13.0 ([#17668](https://github.com/opensearch-project/OpenSearch/pull/17668), [#17921](https://github.com/opensearch-project/OpenSearch/pull/17921)) - Bump `com.github.luben:zstd-jni` from 1.5.5-1 to 1.5.6-1 ([#17674](https://github.com/opensearch-project/OpenSearch/pull/17674)) - Bump `lycheeverse/lychee-action` from 2.3.0 to 2.4.0 ([#17731](https://github.com/opensearch-project/OpenSearch/pull/17731)) - Bump `com.netflix.nebula.ospackage-base` from 11.11.1 to 11.11.2 ([#17734](https://github.com/opensearch-project/OpenSearch/pull/17734)) diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index d6352ef99f987..038d46a53cc67 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -66,7 +66,7 @@ dependencies { api 'com.google.cloud:google-cloud-core-http:2.47.0' api 'com.google.cloud:google-cloud-storage:1.113.1' - api 'com.google.code.gson:gson:2.12.1' + api 'com.google.code.gson:gson:2.13.0' runtimeOnly "com.google.guava:guava:${versions.guava}" api 'com.google.guava:failureaccess:1.0.1' diff --git a/plugins/repository-gcs/licenses/gson-2.12.1.jar.sha1 b/plugins/repository-gcs/licenses/gson-2.12.1.jar.sha1 deleted file mode 100644 index 7d57e885daa08..0000000000000 --- a/plugins/repository-gcs/licenses/gson-2.12.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4e773a317740b83b43cfc3d652962856041697cb \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gson-2.13.0.jar.sha1 b/plugins/repository-gcs/licenses/gson-2.13.0.jar.sha1 new file mode 100644 index 0000000000000..7cf8ab0bbe08e --- /dev/null +++ b/plugins/repository-gcs/licenses/gson-2.13.0.jar.sha1 @@ -0,0 +1 @@ +111ac98ad3d2d099d81d53b0549748144a8d2659 \ No newline at end of file From 0930403b7f48c31d516cd9cc6b870bc88fb2ebc5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Apr 2025 12:15:50 -0400 Subject: [PATCH 214/550] Bump tj-actions/changed-files from 46.0.4 to 46.0.5 (#17920) * Bump tj-actions/changed-files from 46.0.4 to 46.0.5 Bumps [tj-actions/changed-files](https://github.com/tj-actions/changed-files) from 46.0.4 to 46.0.5. - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v46.0.4...v46.0.5) --- updated-dependencies: - dependency-name: tj-actions/changed-files dependency-version: 46.0.5 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Signed-off-by: Craig Perkins Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Craig Perkins --- .github/workflows/gradle-check.yml | 2 +- CHANGELOG.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index 8ffe395370804..22c88606d47c5 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -24,7 +24,7 @@ jobs: - uses: actions/checkout@v4 - name: Get changed files id: changed-files-specific - uses: tj-actions/changed-files@v46.0.4 + uses: tj-actions/changed-files@v46.0.5 with: files_ignore: | release-notes/*.md diff --git a/CHANGELOG.md b/CHANGELOG.md index d00f8fc73d7b4..3f3f720983d4b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -59,7 +59,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `dangoslen/dependabot-changelog-helper` from 3 to 4 ([#17498](https://github.com/opensearch-project/OpenSearch/pull/17498)) - Bump `com.google.api:gax` from 2.35.0 to 2.63.1 ([#17465](https://github.com/opensearch-project/OpenSearch/pull/17465)) - Bump `com.azure:azure-storage-blob` from 12.29.1 to 12.30.0 ([#17667](https://github.com/opensearch-project/OpenSearch/pull/17667)) -- Bump `tj-actions/changed-files` from 46.0.1 to 46.0.4 ([#17666](https://github.com/opensearch-project/OpenSearch/pull/17666), [#17813](https://github.com/opensearch-project/OpenSearch/pull/17813)) +- Bump `tj-actions/changed-files` from 46.0.1 to 46.0.5 ([#17666](https://github.com/opensearch-project/OpenSearch/pull/17666), [#17813](https://github.com/opensearch-project/OpenSearch/pull/17813), [#17920](https://github.com/opensearch-project/OpenSearch/pull/17920)) - Bump `com.google.code.gson:gson` from 2.11.0 to 2.13.0 ([#17668](https://github.com/opensearch-project/OpenSearch/pull/17668), [#17921](https://github.com/opensearch-project/OpenSearch/pull/17921)) - Bump `com.github.luben:zstd-jni` from 1.5.5-1 to 1.5.6-1 ([#17674](https://github.com/opensearch-project/OpenSearch/pull/17674)) - Bump `lycheeverse/lychee-action` from 2.3.0 to 2.4.0 ([#17731](https://github.com/opensearch-project/OpenSearch/pull/17731)) From 7eeb323f4f4fd9a2657b19a1c07fd605abddb99c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Apr 2025 10:15:33 -0700 Subject: [PATCH 215/550] Bump com.google.code.gson:gson from 2.12.1 to 2.13.0 in /plugins/repository-hdfs (#17926) * Bump com.google.code.gson:gson in /plugins/repository-hdfs Bumps [com.google.code.gson:gson](https://github.com/google/gson) from 2.12.1 to 2.13.0. - [Release notes](https://github.com/google/gson/releases) - [Changelog](https://github.com/google/gson/blob/main/CHANGELOG.md) - [Commits](https://github.com/google/gson/compare/gson-parent-2.12.1...gson-parent-2.13.0) --- updated-dependencies: - dependency-name: com.google.code.gson:gson dependency-version: 2.13.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Signed-off-by: Craig Perkins Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Craig Perkins --- CHANGELOG.md | 2 +- plugins/repository-hdfs/build.gradle | 2 +- plugins/repository-hdfs/licenses/gson-2.12.1.jar.sha1 | 1 - plugins/repository-hdfs/licenses/gson-2.13.0.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 3 deletions(-) delete mode 100644 plugins/repository-hdfs/licenses/gson-2.12.1.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/gson-2.13.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 3f3f720983d4b..9fb2f1d6a5234 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -60,7 +60,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.google.api:gax` from 2.35.0 to 2.63.1 ([#17465](https://github.com/opensearch-project/OpenSearch/pull/17465)) - Bump `com.azure:azure-storage-blob` from 12.29.1 to 12.30.0 ([#17667](https://github.com/opensearch-project/OpenSearch/pull/17667)) - Bump `tj-actions/changed-files` from 46.0.1 to 46.0.5 ([#17666](https://github.com/opensearch-project/OpenSearch/pull/17666), [#17813](https://github.com/opensearch-project/OpenSearch/pull/17813), [#17920](https://github.com/opensearch-project/OpenSearch/pull/17920)) -- Bump `com.google.code.gson:gson` from 2.11.0 to 2.13.0 ([#17668](https://github.com/opensearch-project/OpenSearch/pull/17668), [#17921](https://github.com/opensearch-project/OpenSearch/pull/17921)) +- Bump `com.google.code.gson:gson` from 2.11.0 to 2.13.0 ([#17668](https://github.com/opensearch-project/OpenSearch/pull/17668), [#17921](https://github.com/opensearch-project/OpenSearch/pull/17921)), [#17926](https://github.com/opensearch-project/OpenSearch/pull/17926)) - Bump `com.github.luben:zstd-jni` from 1.5.5-1 to 1.5.6-1 ([#17674](https://github.com/opensearch-project/OpenSearch/pull/17674)) - Bump `lycheeverse/lychee-action` from 2.3.0 to 2.4.0 ([#17731](https://github.com/opensearch-project/OpenSearch/pull/17731)) - Bump `com.netflix.nebula.ospackage-base` from 11.11.1 to 11.11.2 ([#17734](https://github.com/opensearch-project/OpenSearch/pull/17734)) diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 7c2488ff6b1a8..5660fb486f970 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -67,7 +67,7 @@ dependencies { api 'org.apache.htrace:htrace-core4:4.2.0-incubating' api "org.apache.logging.log4j:log4j-core:${versions.log4j}" api 'org.apache.avro:avro:1.12.0' - api 'com.google.code.gson:gson:2.12.1' + api 'com.google.code.gson:gson:2.13.0' runtimeOnly "com.google.guava:guava:${versions.guava}" api "commons-logging:commons-logging:${versions.commonslogging}" api 'commons-cli:commons-cli:1.9.0' diff --git a/plugins/repository-hdfs/licenses/gson-2.12.1.jar.sha1 b/plugins/repository-hdfs/licenses/gson-2.12.1.jar.sha1 deleted file mode 100644 index 7d57e885daa08..0000000000000 --- a/plugins/repository-hdfs/licenses/gson-2.12.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4e773a317740b83b43cfc3d652962856041697cb \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/gson-2.13.0.jar.sha1 b/plugins/repository-hdfs/licenses/gson-2.13.0.jar.sha1 new file mode 100644 index 0000000000000..7cf8ab0bbe08e --- /dev/null +++ b/plugins/repository-hdfs/licenses/gson-2.13.0.jar.sha1 @@ -0,0 +1 @@ +111ac98ad3d2d099d81d53b0549748144a8d2659 \ No newline at end of file From ddb435462333d244a30656318fa19cff42cdc294 Mon Sep 17 00:00:00 2001 From: Darshit Chanpura Date: Mon, 14 Apr 2025 16:33:25 -0400 Subject: [PATCH 216/550] Relaxes jarHell check for optionally extended plugins (#17893) * Relaxes jarHell check for optionally extended plugins Signed-off-by: Darshit Chanpura * Adds changelog entry Signed-off-by: Darshit Chanpura * Fixes Changelog entry Signed-off-by: Darshit Chanpura --------- Signed-off-by: Darshit Chanpura --- CHANGELOG.md | 1 + server/src/main/java/org/opensearch/plugins/PluginsService.java | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9fb2f1d6a5234..11caf7b74968a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -50,6 +50,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Avoid skewed segment replication lag metric ([#17831](https://github.com/opensearch-project/OpenSearch/pull/17831)) - Increase the default segment counter step size when replica promoting ([#17568](https://github.com/opensearch-project/OpenSearch/pull/17568)) - [WLM] Rename QueryGroup to WorkloadGroup ([#17901](https://github.com/opensearch-project/OpenSearch/pull/17901)) +- Relaxes jarHell check for optionally extended plugins([#17893](https://github.com/opensearch-project/OpenSearch/pull/17893))) ### Dependencies - Bump `com.nimbusds:nimbus-jose-jwt` from 9.41.1 to 10.0.2 ([#17607](https://github.com/opensearch-project/OpenSearch/pull/17607), [#17669](https://github.com/opensearch-project/OpenSearch/pull/17669)) diff --git a/server/src/main/java/org/opensearch/plugins/PluginsService.java b/server/src/main/java/org/opensearch/plugins/PluginsService.java index 0d522311ee649..769494204cc49 100644 --- a/server/src/main/java/org/opensearch/plugins/PluginsService.java +++ b/server/src/main/java/org/opensearch/plugins/PluginsService.java @@ -690,7 +690,7 @@ static void checkBundleJarHell(Set classpath, Bundle bundle, Map urls = new HashSet<>(); for (String extendedPlugin : exts) { Set pluginUrls = transitiveUrls.get(extendedPlugin); - if (pluginUrls == null && bundle.plugin.isExtendedPluginOptional(extendedPlugin)) { + if (bundle.plugin.isExtendedPluginOptional(extendedPlugin)) { continue; } assert pluginUrls != null : "transitive urls should have already been set for " + extendedPlugin; From 5799fe76afd74dee12e73080b9b8e5eadf91bb74 Mon Sep 17 00:00:00 2001 From: Vinay Krishna Pudyodu Date: Mon, 14 Apr 2025 20:33:13 -0700 Subject: [PATCH 217/550] Remove Reader Writer Split experimental feature flag (#17880) * Remove Reader Writer Split experimental feature flag Signed-off-by: Vinay Krishna Pudyodu * Fixed failing tests Signed-off-by: Vinay Krishna Pudyodu * Fixed failing unit tests Signed-off-by: Vinay Krishna Pudyodu * Added back setting updater in Search only Operation routing Signed-off-by: Vinay Krishna Pudyodu * Fixed SpotlessApply Signed-off-by: Vinay Krishna Pudyodu * Fixed the way roles added while creating node for testing Signed-off-by: Vinay Krishna Pudyodu * Revert "Fixed failing unit tests" This reverts commit 7aa103a5b0fc491d60d8ad0c0edace2cad63cbe3. Signed-off-by: Vinay Krishna Pudyodu * Revert "Fixed the way roles added while creating node for testing" This reverts commit 4481794fba5e8cbef3b6503096691f1717b09905. Signed-off-by: Vinay Krishna Pudyodu * Added SEARCH_ROLE to rolesToMap Signed-off-by: Vinay Krishna Pudyodu --------- Signed-off-by: Vinay Krishna Pudyodu --- .../scale/searchonly/ScaleIndexIT.java | 6 -- .../allocation/SearchReplicaAllocationIT.java | 6 -- .../SearchReplicaAwarenessAllocationIT.java | 6 -- .../metadata/AutoExpandSearchReplicasIT.java | 6 -- .../metadata/MetadataIndexStateServiceIT.java | 6 -- ...SearchReplicaReplicationAndRecoveryIT.java | 6 -- .../replication/SearchReplicaRestoreIT.java | 6 -- .../SearchOnlyReplicaFeatureFlagIT.java | 56 ------------------- .../indices/settings/SearchOnlyReplicaIT.java | 6 -- .../org/opensearch/cluster/ClusterModule.java | 5 +- .../metadata/MetadataCreateIndexService.java | 4 +- .../MetadataUpdateSettingsService.java | 5 +- .../cluster/node/DiscoveryNode.java | 3 +- .../cluster/node/DiscoveryNodeRole.java | 2 +- .../cluster/routing/OperationRouting.java | 10 +--- .../cluster/routing/ShardRouting.java | 5 +- .../common/settings/FeatureFlagSettings.java | 1 - .../common/settings/IndexScopedSettings.java | 7 ++- .../opensearch/common/util/FeatureFlags.java | 9 --- .../org/opensearch/index/IndexService.java | 9 +-- .../cluster/ClusterModuleTests.java | 2 + .../opensearch/cluster/ClusterStateTests.java | 6 ++ .../metadata/SearchOnlyReplicaTests.java | 9 --- .../routing/OperationRoutingTests.java | 5 +- .../UpdateNumberOfReplicasTests.java | 8 ++- .../SearchReplicaAllocationDeciderTests.java | 21 ++++++- ...SearchReplicaAwarenessAllocationTests.java | 12 +--- .../common/settings/ScopedSettingsTests.java | 5 +- .../opensearch/index/IndexServiceTests.java | 9 --- .../cluster/OpenSearchAllocationTestCase.java | 4 ++ 30 files changed, 60 insertions(+), 185 deletions(-) delete mode 100644 server/src/internalClusterTest/java/org/opensearch/indices/settings/SearchOnlyReplicaFeatureFlagIT.java diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexIT.java index 06c6c6074fc47..d312b3b495692 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/scale/searchonly/ScaleIndexIT.java @@ -18,7 +18,6 @@ import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.rest.RestStatus; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; @@ -41,11 +40,6 @@ public class ScaleIndexIT extends RemoteStoreBaseIntegTestCase { private static final String TEST_INDEX = "test_scale_index"; - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL, Boolean.TRUE).build(); - } - public Settings indexSettings() { return Settings.builder().put(SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT).build(); } diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/SearchReplicaAllocationIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/SearchReplicaAllocationIT.java index 0ede555098834..594a225d1f3c9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/SearchReplicaAllocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/SearchReplicaAllocationIT.java @@ -12,7 +12,6 @@ import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase; @@ -25,11 +24,6 @@ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class SearchReplicaAllocationIT extends RemoteStoreBaseIntegTestCase { - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL, Boolean.TRUE).build(); - } - public void testSearchReplicaAllocatedToDedicatedSearchNode() { internalCluster().startClusterManagerOnlyNode(); String primaryNode = internalCluster().startDataOnlyNode(); diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/SearchReplicaAwarenessAllocationIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/SearchReplicaAwarenessAllocationIT.java index 3e610df1887ed..2cd99c120c454 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/SearchReplicaAwarenessAllocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/SearchReplicaAwarenessAllocationIT.java @@ -19,7 +19,6 @@ import org.opensearch.cluster.routing.allocation.AwarenessReplicaBalance; import org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase; @@ -40,11 +39,6 @@ public class SearchReplicaAwarenessAllocationIT extends RemoteStoreBaseIntegTest private final Logger logger = LogManager.getLogger(SearchReplicaAwarenessAllocationIT.class); - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL, Boolean.TRUE).build(); - } - public void testAllocationAwarenessZones() { Settings commonSettings = Settings.builder() .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "zone.values", "a,b") diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/AutoExpandSearchReplicasIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/AutoExpandSearchReplicasIT.java index c177b01fea642..abd29b256f09e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/AutoExpandSearchReplicasIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/AutoExpandSearchReplicasIT.java @@ -11,7 +11,6 @@ import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; import org.opensearch.test.InternalTestCluster; @@ -22,11 +21,6 @@ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class AutoExpandSearchReplicasIT extends RemoteStoreBaseIntegTestCase { - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL, Boolean.TRUE).build(); - } - public void testAutoExpandSearchReplica() throws Exception { String indexName = "test"; internalCluster().startClusterManagerOnlyNode(); diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/MetadataIndexStateServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/MetadataIndexStateServiceIT.java index b914dbff03a42..82b6eebc31504 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/MetadataIndexStateServiceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/MetadataIndexStateServiceIT.java @@ -11,7 +11,6 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.WriteRequest; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase; @@ -28,11 +27,6 @@ public class MetadataIndexStateServiceIT extends RemoteStoreBaseIntegTestCase { private static final String TEST_INDEX = "test_open_close_index"; - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL, Boolean.TRUE).build(); - } - public void testIndexCloseAndOpen() throws Exception { internalCluster().startClusterManagerOnlyNode(); internalCluster().startDataOnlyNodes(2); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationAndRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationAndRecoveryIT.java index 3d09c0ef7ef25..0ba27e44e5556 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationAndRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationAndRecoveryIT.java @@ -21,7 +21,6 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.SegmentReplicationPerGroupStats; import org.opensearch.index.SegmentReplicationShardStats; import org.opensearch.indices.recovery.RecoveryState; @@ -72,11 +71,6 @@ public Settings indexSettings() { .build(); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL, true).build(); - } - public void testReplication() throws Exception { internalCluster().startClusterManagerOnlyNode(); final String primary = internalCluster().startDataOnlyNode(); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaRestoreIT.java index cfed17cdf4967..73b64bec69493 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaRestoreIT.java @@ -16,7 +16,6 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.QueryBuilders; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.remotestore.RemoteSnapshotIT; @@ -39,11 +38,6 @@ public class SearchReplicaRestoreIT extends RemoteSnapshotIT { private static final String FS_REPOSITORY_TYPE = "fs"; private static final int DOC_COUNT = 10; - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL, true).build(); - } - public void testSearchReplicaRestore_WhenSnapshotOnSegRep_RestoreOnDocRepWithSearchReplica() throws Exception { bootstrapIndexWithOutSearchReplicas(ReplicationType.SEGMENT); createRepoAndSnapshot(REPOSITORY_NAME, FS_REPOSITORY_TYPE, SNAPSHOT_NAME, INDEX_NAME); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/settings/SearchOnlyReplicaFeatureFlagIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/settings/SearchOnlyReplicaFeatureFlagIT.java deleted file mode 100644 index e5a05c04fa7ee..0000000000000 --- a/server/src/internalClusterTest/java/org/opensearch/indices/settings/SearchOnlyReplicaFeatureFlagIT.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.indices.settings; - -import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.settings.SettingsException; -import org.opensearch.common.util.FeatureFlags; -import org.opensearch.indices.replication.common.ReplicationType; -import org.opensearch.test.OpenSearchIntegTestCase; - -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; - -@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, numDataNodes = 1) -public class SearchOnlyReplicaFeatureFlagIT extends OpenSearchIntegTestCase { - - private static final String TEST_INDEX = "test_index"; - - @Override - protected Settings featureFlagSettings() { - return Settings.builder() - .put(super.featureFlagSettings()) - .put(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL, Boolean.FALSE) - .build(); - } - - public void testCreateFeatureFlagDisabled() { - Settings settings = Settings.builder().put(indexSettings()).put(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL, false).build(); - SettingsException settingsException = expectThrows(SettingsException.class, () -> createIndex(TEST_INDEX, settings)); - assertTrue(settingsException.getMessage().contains("unknown setting")); - } - - public void testUpdateFeatureFlagDisabled() { - Settings settings = Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) - .build(); - - createIndex(TEST_INDEX, settings); - SettingsException settingsException = expectThrows(SettingsException.class, () -> { - client().admin() - .indices() - .prepareUpdateSettings(TEST_INDEX) - .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 1)) - .get(); - }); - assertTrue(settingsException.getMessage().contains("unknown setting")); - } -} diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/settings/SearchOnlyReplicaIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/settings/SearchOnlyReplicaIT.java index 0e6d425de1a9f..11eb78d7dd7cf 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/settings/SearchOnlyReplicaIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/settings/SearchOnlyReplicaIT.java @@ -18,7 +18,6 @@ import org.opensearch.cluster.routing.Preference; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.QueryBuilders; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; @@ -40,11 +39,6 @@ public class SearchOnlyReplicaIT extends RemoteStoreBaseIntegTestCase { private static final String TEST_INDEX = "test_index"; - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL, Boolean.TRUE).build(); - } - private final String expectedFailureMessage = "To set index.number_of_search_replicas, index.replication.type must be set to SEGMENT"; @Override diff --git a/server/src/main/java/org/opensearch/cluster/ClusterModule.java b/server/src/main/java/org/opensearch/cluster/ClusterModule.java index 6466726836f0a..baa71cf04e8d6 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterModule.java @@ -87,7 +87,6 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.util.set.Sets; import org.opensearch.core.ParseField; @@ -391,9 +390,7 @@ public static Collection createAllocationDeciders( addAllocationDecider(deciders, new SnapshotInProgressAllocationDecider()); addAllocationDecider(deciders, new RestoreInProgressAllocationDecider()); addAllocationDecider(deciders, new FilterAllocationDecider(settings, clusterSettings)); - if (FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL_SETTING.get(settings)) { - addAllocationDecider(deciders, new SearchReplicaAllocationDecider()); - } + addAllocationDecider(deciders, new SearchReplicaAllocationDecider()); addAllocationDecider(deciders, new SameShardAllocationDecider(settings, clusterSettings)); addAllocationDecider(deciders, new DiskThresholdDecider(settings, clusterSettings)); addAllocationDecider(deciders, new ThrottlingAllocationDecider(settings, clusterSettings)); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index 76b2b948ca164..f4cc14bee7c2e 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -1092,9 +1092,7 @@ static Settings aggregateIndexSettings( validateRefreshIntervalSettings(request.settings(), clusterSettings); validateTranslogFlushIntervalSettingsForCompositeIndex(request.settings(), clusterSettings); validateTranslogDurabilitySettings(request.settings(), clusterSettings, settings); - if (FeatureFlags.isEnabled(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL_SETTING)) { - validateSearchOnlyReplicasSettings(indexSettings); - } + validateSearchOnlyReplicasSettings(indexSettings); validateIndexTotalPrimaryShardsPerNodeSetting(indexSettings); return indexSettings; } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java index 8eff5604045bc..6bc7f5a865d25 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java @@ -58,7 +58,6 @@ import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.Index; import org.opensearch.index.IndexSettings; @@ -296,9 +295,7 @@ public ClusterState execute(ClusterState currentState) { } if (IndexMetadata.INDEX_NUMBER_OF_SEARCH_REPLICAS_SETTING.exists(openSettings)) { - if (FeatureFlags.isEnabled(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL_SETTING)) { - validateSearchReplicaCountSettings(normalizedSettings, request.indices(), currentState); - } + validateSearchReplicaCountSettings(normalizedSettings, request.indices(), currentState); final int updatedNumberOfSearchReplicas = IndexMetadata.INDEX_NUMBER_OF_SEARCH_REPLICAS_SETTING.get(openSettings); if (preserveExisting == false) { for (Index index : request.indices()) { diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java index eceb076dfc926..f3c0079b6b7b7 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java @@ -607,7 +607,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } private static Map rolesToMap(final Stream roles) { - return Collections.unmodifiableMap(roles.collect(Collectors.toMap(DiscoveryNodeRole::roleName, Function.identity()))); + Stream rolesWithSearch = Stream.concat(roles, Stream.of(DiscoveryNodeRole.SEARCH_ROLE)); + return Collections.unmodifiableMap(rolesWithSearch.collect(Collectors.toMap(DiscoveryNodeRole::roleName, Function.identity()))); } private static Map roleMap = rolesToMap(DiscoveryNodeRole.BUILT_IN_ROLES.stream()); diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java index 62fb93eb4c41b..3dc86be816b02 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java @@ -342,7 +342,7 @@ public void validateRole(List roles) { * The built-in node roles. */ public static SortedSet BUILT_IN_ROLES = Collections.unmodifiableSortedSet( - new TreeSet<>(Arrays.asList(DATA_ROLE, INGEST_ROLE, CLUSTER_MANAGER_ROLE, REMOTE_CLUSTER_CLIENT_ROLE, WARM_ROLE, SEARCH_ROLE)) + new TreeSet<>(Arrays.asList(DATA_ROLE, INGEST_ROLE, CLUSTER_MANAGER_ROLE, REMOTE_CLUSTER_CLIENT_ROLE, WARM_ROLE)) ); /** diff --git a/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java b/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java index 21087094d2be0..ada58fed0d12a 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java +++ b/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java @@ -133,7 +133,6 @@ public class OperationRouting { private volatile boolean isStrictWeightedShardRouting; private volatile boolean ignoreWeightedRouting; private volatile boolean isStrictSearchOnlyShardRouting; - private final boolean isReaderWriterSplitEnabled; public OperationRouting(Settings settings, ClusterSettings clusterSettings) { // whether to ignore awareness attributes when routing requests @@ -156,7 +155,6 @@ public OperationRouting(Settings settings, ClusterSettings clusterSettings) { clusterSettings.addSettingsUpdateConsumer(STRICT_WEIGHTED_SHARD_ROUTING_ENABLED, this::setStrictWeightedShardRouting); clusterSettings.addSettingsUpdateConsumer(IGNORE_WEIGHTED_SHARD_ROUTING, this::setIgnoreWeightedRouting); clusterSettings.addSettingsUpdateConsumer(STRICT_SEARCH_REPLICA_ROUTING_ENABLED, this::setStrictSearchOnlyShardRouting); - this.isReaderWriterSplitEnabled = FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL_SETTING.get(settings); } void setUseAdaptiveReplicaSelection(boolean useAdaptiveReplicaSelection) { @@ -277,11 +275,9 @@ public GroupShardsIterator searchShards( preference = Preference.PRIMARY_FIRST.type(); } - if (isReaderWriterSplitEnabled) { - if (preference == null || preference.isEmpty()) { - if (indexMetadataForShard.getNumberOfSearchOnlyReplicas() > 0 && isStrictSearchOnlyShardRouting) { - preference = Preference.SEARCH_REPLICA.type(); - } + if (preference == null || preference.isEmpty()) { + if (indexMetadataForShard.getNumberOfSearchOnlyReplicas() > 0 && isStrictSearchOnlyShardRouting) { + preference = Preference.SEARCH_REPLICA.type(); } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/ShardRouting.java b/server/src/main/java/org/opensearch/cluster/routing/ShardRouting.java index bdc98061f2fa4..d79f21caf42f4 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/ShardRouting.java +++ b/server/src/main/java/org/opensearch/cluster/routing/ShardRouting.java @@ -38,7 +38,6 @@ import org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.opensearch.common.Nullable; import org.opensearch.common.annotation.PublicApi; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -912,9 +911,7 @@ public String shortSummary() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { XContentBuilder fieldBuilder = builder.startObject().field("state", state()).field("primary", primary()); - if (FeatureFlags.isEnabled(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL)) { - fieldBuilder.field("searchOnly", isSearchOnly()); - } + fieldBuilder.field("searchOnly", isSearchOnly()); fieldBuilder.field("node", currentNodeId()) .field("relocating_node", relocatingNodeId()) .field("shard", id()) diff --git a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java index ba35a8bd1133a..962e51b53f5fd 100644 --- a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java @@ -37,7 +37,6 @@ protected FeatureFlagSettings( FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING, FeatureFlags.STAR_TREE_INDEX_SETTING, FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES_SETTING, - FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL_SETTING, FeatureFlags.TERM_VERSION_PRECOMMIT_ENABLE_SETTING, FeatureFlags.ARROW_STREAMS_SETTING ); diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index 5353ad9776522..861d2bcdad9d2 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -270,6 +270,9 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexMetadata.INGESTION_SOURCE_PARAMS_SETTING, IndexMetadata.INGESTION_SOURCE_ERROR_STRATEGY_SETTING, + // Settings for search replica + IndexMetadata.INDEX_NUMBER_OF_SEARCH_REPLICAS_SETTING, + // validate that built-in similarities don't get redefined Setting.groupSetting("index.similarity.", (s) -> { Map groups = s.getAsGroups(); @@ -295,9 +298,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { public static final Map> FEATURE_FLAGGED_INDEX_SETTINGS = Map.of( FeatureFlags.WRITABLE_WARM_INDEX_EXPERIMENTAL_FLAG, // TODO: Create a separate feature flag for hot tiering index state. - List.of(IndexModule.INDEX_STORE_LOCALITY_SETTING, IndexModule.INDEX_TIERING_STATE, IndexModule.IS_WARM_INDEX_SETTING), - FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL, - List.of(IndexMetadata.INDEX_NUMBER_OF_SEARCH_REPLICAS_SETTING) + List.of(IndexModule.INDEX_STORE_LOCALITY_SETTING, IndexModule.INDEX_TIERING_STATE, IndexModule.IS_WARM_INDEX_SETTING) ); public static final IndexScopedSettings DEFAULT_SCOPED_SETTINGS = new IndexScopedSettings(Settings.EMPTY, BUILT_IN_INDEX_SETTINGS); diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java index 70461e711679e..1a1a28dc9def8 100644 --- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -75,8 +75,6 @@ public class FeatureFlags { */ public static final String BACKGROUND_TASK_EXECUTION_EXPERIMENTAL = FEATURE_FLAG_PREFIX + "task.background.enabled"; - public static final String READER_WRITER_SPLIT_EXPERIMENTAL = FEATURE_FLAG_PREFIX + "read.write.split.enabled"; - public static final Setting REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING = Setting.boolSetting( REMOTE_STORE_MIGRATION_EXPERIMENTAL, false, @@ -99,12 +97,6 @@ public class FeatureFlags { Property.NodeScope ); - public static final Setting READER_WRITER_SPLIT_EXPERIMENTAL_SETTING = Setting.boolSetting( - READER_WRITER_SPLIT_EXPERIMENTAL, - false, - Property.NodeScope - ); - /** * Gates the functionality of star tree index, which improves the performance of search * aggregations. @@ -155,7 +147,6 @@ static class FeatureFlagsImpl { APPLICATION_BASED_CONFIGURATION_TEMPLATES_SETTING, APPLICATION_BASED_CONFIGURATION_TEMPLATES_SETTING.getDefault(Settings.EMPTY) ); - put(READER_WRITER_SPLIT_EXPERIMENTAL_SETTING, READER_WRITER_SPLIT_EXPERIMENTAL_SETTING.getDefault(Settings.EMPTY)); put(TERM_VERSION_PRECOMMIT_ENABLE_SETTING, TERM_VERSION_PRECOMMIT_ENABLE_SETTING.getDefault(Settings.EMPTY)); put(ARROW_STREAMS_SETTING, ARROW_STREAMS_SETTING.getDefault(Settings.EMPTY)); put( diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index de269f1676f1c..b455ad18c7227 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -136,7 +136,6 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; import static org.opensearch.common.collect.MapBuilder.newMapBuilder; -import static org.opensearch.common.util.FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL_SETTING; import static org.opensearch.index.remote.RemoteMigrationIndexMetadataUpdater.indexHasRemoteStoreSettings; /** @@ -318,9 +317,7 @@ public IndexService( this.globalCheckpointTask = new AsyncGlobalCheckpointTask(this); this.retentionLeaseSyncTask = new AsyncRetentionLeaseSyncTask(this); } - if (READER_WRITER_SPLIT_EXPERIMENTAL_SETTING.get(indexSettings.getNodeSettings())) { - this.asyncReplicationTask = new AsyncReplicationTask(this); - } + this.asyncReplicationTask = new AsyncReplicationTask(this); this.translogFactorySupplier = translogFactorySupplier; this.recoverySettings = recoverySettings; this.remoteStoreSettings = remoteStoreSettings; @@ -1110,9 +1107,7 @@ public synchronized void updateMetadata(final IndexMetadata currentIndexMetadata } onRefreshIntervalChange(); updateFsyncTaskIfNecessary(); - if (READER_WRITER_SPLIT_EXPERIMENTAL_SETTING.get(indexSettings.getNodeSettings())) { - updateReplicationTask(); - } + updateReplicationTask(); } metadataListeners.forEach(c -> c.accept(newIndexMetadata)); diff --git a/server/src/test/java/org/opensearch/cluster/ClusterModuleTests.java b/server/src/test/java/org/opensearch/cluster/ClusterModuleTests.java index d590df6575680..8f888cedd5819 100644 --- a/server/src/test/java/org/opensearch/cluster/ClusterModuleTests.java +++ b/server/src/test/java/org/opensearch/cluster/ClusterModuleTests.java @@ -57,6 +57,7 @@ import org.opensearch.cluster.routing.allocation.decider.ResizeAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.RestoreInProgressAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.SameShardAllocationDecider; +import org.opensearch.cluster.routing.allocation.decider.SearchReplicaAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.TargetPoolAllocationDecider; @@ -249,6 +250,7 @@ public void testAllocationDeciderOrder() { SnapshotInProgressAllocationDecider.class, RestoreInProgressAllocationDecider.class, FilterAllocationDecider.class, + SearchReplicaAllocationDecider.class, SameShardAllocationDecider.class, DiskThresholdDecider.class, ThrottlingAllocationDecider.class, diff --git a/server/src/test/java/org/opensearch/cluster/ClusterStateTests.java b/server/src/test/java/org/opensearch/cluster/ClusterStateTests.java index 457bdac1809ef..06c4d2ce05b06 100644 --- a/server/src/test/java/org/opensearch/cluster/ClusterStateTests.java +++ b/server/src/test/java/org/opensearch/cluster/ClusterStateTests.java @@ -290,6 +290,7 @@ public void testToXContent() throws IOException { + " {\n" + " \"state\" : \"STARTED\",\n" + " \"primary\" : true,\n" + + " \"searchOnly\" : false,\n" + " \"node\" : \"nodeId2\",\n" + " \"relocating_node\" : null,\n" + " \"shard\" : 1,\n" @@ -312,6 +313,7 @@ public void testToXContent() throws IOException { + " {\n" + " \"state\" : \"STARTED\",\n" + " \"primary\" : true,\n" + + " \"searchOnly\" : false,\n" + " \"node\" : \"nodeId2\",\n" + " \"relocating_node\" : null,\n" + " \"shard\" : 1,\n" @@ -490,6 +492,7 @@ public void testToXContent_FlatSettingTrue_ReduceMappingFalse() throws IOExcepti + " {\n" + " \"state\" : \"STARTED\",\n" + " \"primary\" : true,\n" + + " \"searchOnly\" : false,\n" + " \"node\" : \"nodeId2\",\n" + " \"relocating_node\" : null,\n" + " \"shard\" : 1,\n" @@ -512,6 +515,7 @@ public void testToXContent_FlatSettingTrue_ReduceMappingFalse() throws IOExcepti + " {\n" + " \"state\" : \"STARTED\",\n" + " \"primary\" : true,\n" + + " \"searchOnly\" : false,\n" + " \"node\" : \"nodeId2\",\n" + " \"relocating_node\" : null,\n" + " \"shard\" : 1,\n" @@ -697,6 +701,7 @@ public void testToXContent_FlatSettingFalse_ReduceMappingTrue() throws IOExcepti + " {\n" + " \"state\" : \"STARTED\",\n" + " \"primary\" : true,\n" + + " \"searchOnly\" : false,\n" + " \"node\" : \"nodeId2\",\n" + " \"relocating_node\" : null,\n" + " \"shard\" : 1,\n" @@ -719,6 +724,7 @@ public void testToXContent_FlatSettingFalse_ReduceMappingTrue() throws IOExcepti + " {\n" + " \"state\" : \"STARTED\",\n" + " \"primary\" : true,\n" + + " \"searchOnly\" : false,\n" + " \"node\" : \"nodeId2\",\n" + " \"relocating_node\" : null,\n" + " \"shard\" : 1,\n" diff --git a/server/src/test/java/org/opensearch/cluster/metadata/SearchOnlyReplicaTests.java b/server/src/test/java/org/opensearch/cluster/metadata/SearchOnlyReplicaTests.java index da63222a2786e..49fbd570734f0 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/SearchOnlyReplicaTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/SearchOnlyReplicaTests.java @@ -21,7 +21,6 @@ import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.common.ValidationException; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.env.Environment; import org.opensearch.gateway.remote.RemoteClusterStateService; import org.opensearch.indices.ShardLimitValidator; @@ -73,14 +72,6 @@ public void tearDown() throws Exception { terminate(threadPool); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder() - .put(super.featureFlagSettings()) - .put(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL_SETTING.getKey(), true) - .build(); - } - public void testCreateWithDefaultSearchReplicasSetting() { final ClusterStateChanges cluster = new ClusterStateChanges(xContentRegistry(), threadPool); ClusterState state = createIndexWithSettings(cluster, Settings.builder().build()); diff --git a/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java b/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java index f0fc3dd57749d..f6c179373793a 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java @@ -45,7 +45,6 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; @@ -1132,7 +1131,7 @@ public void testSearchReplicaDefaultRouting() throws Exception { try { OperationRouting opRouting = new OperationRouting( - Settings.builder().put(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL, "true").build(), + Settings.builder().build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) ); @@ -1208,7 +1207,7 @@ public void testSearchReplicaRoutingWhenSearchOnlyStrictSettingIsFalse() throws try { OperationRouting opRouting = new OperationRouting( - Settings.builder().put(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL, "true").build(), + Settings.builder().build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) ); opRouting.setStrictSearchOnlyShardRouting(false); diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java index 5082d6ab0a37c..4cb53def99e6a 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java @@ -230,7 +230,7 @@ public void testUpdateNumberOfReplicasDoesNotImpactSearchReplicas() { logger.info("Adding two nodes and performing rerouting"); clusterState = ClusterState.builder(clusterState) - .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3"))) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newSearchNode("node3"))) .build(); clusterState = strategy.reroute(clusterState, "reroute"); @@ -354,7 +354,7 @@ public void testUpdateSearchReplicasDoesNotImpactRegularReplicas() { logger.info("Adding three nodes and performing rerouting"); clusterState = ClusterState.builder(clusterState) - .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3"))) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newSearchNode("node3"))) .build(); clusterState = strategy.reroute(clusterState, "reroute"); @@ -407,7 +407,9 @@ public void testUpdateSearchReplicasDoesNotImpactRegularReplicas() { assertEquals(shardRoutingTable.searchOnlyReplicas().get(1).state(), UNASSIGNED); logger.info("Add another node and start the added replica"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node4"))).build(); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newSearchNode("node4"))) + .build(); newState = strategy.reroute(clusterState, "reroute"); newState = startInitializingShardsAndReroute(strategy, newState); assertNotEquals(newState, clusterState); diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/SearchReplicaAllocationDeciderTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/SearchReplicaAllocationDeciderTests.java index 584d22794024c..6adc0562d0018 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/SearchReplicaAllocationDeciderTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/SearchReplicaAllocationDeciderTests.java @@ -161,7 +161,9 @@ public void testSearchReplicaWithThrottlingDecider_PrimaryBasedReplication() { ClusterState clusterState = initializeClusterStateWithSingleIndexAndShard(newNode("node1"), metadata, gatewayAllocator); clusterState = strategy.reroute(clusterState, "reroute"); clusterState = startInitializingShardsAndReroute(strategy, clusterState); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newSearchNode("node2"))) + .build(); clusterState = strategy.reroute(clusterState, "reroute"); clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertEquals(2, clusterState.routingTable().shardsWithState(STARTED).size()); @@ -215,7 +217,7 @@ public void testSearchReplicaWithThrottlingDeciderWithoutPrimary_RemoteStoreEnab clusterState = strategy.reroute(clusterState, "reroute"); clusterState = startInitializingShardsAndReroute(strategy, clusterState); - DiscoveryNode node2 = newRemoteNode("node2"); + DiscoveryNode node2 = newRemoteSearchNode("node2"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(node2)).build(); clusterState = strategy.reroute(clusterState, "reroute"); clusterState = startInitializingShardsAndReroute(strategy, clusterState); @@ -305,4 +307,19 @@ private static DiscoveryNode newRemoteNode(String name) { ) ); } + + private static DiscoveryNode newRemoteSearchNode(String name) { + return newSearchNode( + name, + name, + Map.of( + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, + "cluster-repo", + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, + "segment-repo", + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, + "translog-repo" + ) + ); + } } diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/SearchReplicaAwarenessAllocationTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/SearchReplicaAwarenessAllocationTests.java index b757d5911d204..93d2c37039d44 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/SearchReplicaAwarenessAllocationTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/SearchReplicaAwarenessAllocationTests.java @@ -23,7 +23,6 @@ import org.opensearch.cluster.routing.allocation.command.AllocationCommands; import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import java.util.Comparator; import java.util.List; @@ -41,10 +40,7 @@ public class SearchReplicaAwarenessAllocationTests extends OpenSearchAllocationT public void testAllocationAwarenessForIndexWithSearchReplica() { AllocationService strategy = createAllocationService( - Settings.builder() - .put(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL_SETTING.getKey(), true) - .put("cluster.routing.allocation.awareness.attributes", "zone") - .build() + Settings.builder().put("cluster.routing.allocation.awareness.attributes", "zone").build() ); logger.info("--> Building initial routing table"); @@ -122,10 +118,7 @@ public void testAllocationAwarenessForIndexWithSearchReplica() { public void testMoveShardOnceNewNodeWithOutAwarenessAttributeAdded() { AllocationService strategy = createAllocationService( - Settings.builder() - .put(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL_SETTING.getKey(), true) - .put("cluster.routing.allocation.awareness.attributes", "zone") - .build() + Settings.builder().put("cluster.routing.allocation.awareness.attributes", "zone").build() ); logger.info("--> Building initial routing table'"); @@ -195,7 +188,6 @@ public void testMoveShardOnceNewNodeWithOutAwarenessAttributeAdded() { public void testFullAwarenessWithSearchReplica() { AllocationService strategy = createAllocationService( Settings.builder() - .put(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL_SETTING.getKey(), true) .put("cluster.routing.allocation.awareness.attributes", "zone") .put("cluster.routing.allocation.awareness.force.zone.values", "a,b") .build() diff --git a/server/src/test/java/org/opensearch/common/settings/ScopedSettingsTests.java b/server/src/test/java/org/opensearch/common/settings/ScopedSettingsTests.java index 55e3cfa34040b..839549b96d0f0 100644 --- a/server/src/test/java/org/opensearch/common/settings/ScopedSettingsTests.java +++ b/server/src/test/java/org/opensearch/common/settings/ScopedSettingsTests.java @@ -994,7 +994,10 @@ public void testValidateWithSuggestion() { SettingsException.class, () -> settings.validate(Settings.builder().put("index.numbe_of_replica", "1").build(), false) ); - assertEquals(e.getMessage(), "unknown setting [index.numbe_of_replica] did you mean [index.number_of_replicas]?"); + assertEquals( + e.getMessage(), + "unknown setting [index.numbe_of_replica] did you mean any of [index.number_of_replicas, index.number_of_search_replicas]?" + ); } public void testValidate() { diff --git a/server/src/test/java/org/opensearch/index/IndexServiceTests.java b/server/src/test/java/org/opensearch/index/IndexServiceTests.java index b2db510477a34..b03a915f31f84 100644 --- a/server/src/test/java/org/opensearch/index/IndexServiceTests.java +++ b/server/src/test/java/org/opensearch/index/IndexServiceTests.java @@ -41,7 +41,6 @@ import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.index.Index; import org.opensearch.core.xcontent.MediaTypeRegistry; @@ -736,14 +735,6 @@ protected void runInternal() { } } - @Override - protected Settings featureFlagSettings() { - return Settings.builder() - .put(super.featureFlagSettings()) - .put(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL_SETTING.getKey(), true) - .build(); - } - private static String createTestMapping(String type) { return " \"properties\": {\n" + " \"test\": {\n" diff --git a/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationTestCase.java b/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationTestCase.java index f0e4502787b28..c2000f5e24684 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationTestCase.java @@ -174,6 +174,10 @@ protected static DiscoveryNode newSearchNode(String nodeId, Map return new DiscoveryNode(nodeId, buildNewFakeTransportAddress(), attributes, SEARCH_ROLE, Version.CURRENT); } + protected static DiscoveryNode newSearchNode(String nodeName, String nodeId, Map attributes) { + return new DiscoveryNode(nodeName, nodeId, buildNewFakeTransportAddress(), attributes, SEARCH_ROLE, Version.CURRENT); + } + protected static DiscoveryNode newNode(String nodeName, String nodeId, Map attributes) { return new DiscoveryNode(nodeName, nodeId, buildNewFakeTransportAddress(), attributes, CLUSTER_MANAGER_DATA_ROLES, Version.CURRENT); } From 1be3e46ba4947dcfe849e7753ec6feed6caaffbc Mon Sep 17 00:00:00 2001 From: Varun Bharadwaj Date: Mon, 14 Apr 2025 22:04:36 -0700 Subject: [PATCH 218/550] Support versioning in pull-based ingestion (#17918) Signed-off-by: Varun Bharadwaj --- CHANGELOG.md | 1 + .../plugin/kafka/KafkaIngestionBaseIT.java | 13 ++ .../plugin/kafka/RemoteStoreKafkaIT.java | 118 +++++++++++++++ .../index/engine/IngestionEngine.java | 143 +++++++++++++++--- .../index/engine/InternalEngine.java | 14 +- .../MessageProcessorRunnable.java | 25 ++- 6 files changed, 286 insertions(+), 28 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 11caf7b74968a..60371158d4933 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,6 +40,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add TermsQuery support to Search GRPC endpoint ([#17888](https://github.com/opensearch-project/OpenSearch/pull/17888)) - Support sub agg in filter rewrite optimization ([#17447](https://github.com/opensearch-project/OpenSearch/pull/17447) - Disable scoring of keyword term search by default, fallback logic with new use_similarity:true parameter ([#17889](https://github.com/opensearch-project/OpenSearch/pull/17889)) +- Add versioning support in pull-based ingestion ([#17918](https://github.com/opensearch-project/OpenSearch/pull/17918)) ### Changed - Migrate BC libs to their FIPS counterparts ([#14912](https://github.com/opensearch-project/OpenSearch/pull/14912)) diff --git a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/KafkaIngestionBaseIT.java b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/KafkaIngestionBaseIT.java index eb118c7bdbfce..e037b08cd4ef4 100644 --- a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/KafkaIngestionBaseIT.java +++ b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/KafkaIngestionBaseIT.java @@ -108,6 +108,19 @@ protected void produceData(String id, String name, String age, long timestamp, S producer.send(new ProducerRecord<>(topicName, null, timestamp, "null", payload)); } + protected void produceDataWithExternalVersion(String id, long version, String name, String age, long timestamp, String opType) { + String payload = String.format( + Locale.ROOT, + "{\"_id\":\"%s\", \"_version\":\"%d\", \"_op_type\":\"%s\",\"_source\":{\"name\":\"%s\", \"age\": %s}}", + id, + version, + opType, + name, + age + ); + producer.send(new ProducerRecord<>(topicName, null, timestamp, "null", payload)); + } + protected void produceData(String payload) { producer.send(new ProducerRecord<>(topicName, null, defaultMessageTimestamp, "null", payload)); } diff --git a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/RemoteStoreKafkaIT.java b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/RemoteStoreKafkaIT.java index 54adeaa1396e5..4ab3bda474d10 100644 --- a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/RemoteStoreKafkaIT.java +++ b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/RemoteStoreKafkaIT.java @@ -19,7 +19,9 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.allocation.command.AllocateReplicaAllocationCommand; import org.opensearch.common.settings.Settings; +import org.opensearch.index.query.BoolQueryBuilder; import org.opensearch.index.query.RangeQueryBuilder; +import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.transport.client.Requests; @@ -310,6 +312,122 @@ public void testPaginatedGetIngestionState() throws ExecutionException, Interrup })); } + public void testExternalVersioning() throws Exception { + // setup nodes and index + produceDataWithExternalVersion("1", 1, "name1", "25", defaultMessageTimestamp, "index"); + produceDataWithExternalVersion("2", 1, "name2", "25", defaultMessageTimestamp, "index"); + internalCluster().startClusterManagerOnlyNode(); + final String nodeA = internalCluster().startDataOnlyNode(); + final String nodeB = internalCluster().startDataOnlyNode(); + + createIndexWithDefaultSettings(1, 1); + ensureGreen(indexName); + waitForSearchableDocs(2, Arrays.asList(nodeA, nodeB)); + + // validate next version docs get indexed + produceDataWithExternalVersion("1", 2, "name1", "30", defaultMessageTimestamp, "index"); + produceDataWithExternalVersion("2", 2, "name2", "30", defaultMessageTimestamp, "index"); + waitForState(() -> { + BoolQueryBuilder query1 = new BoolQueryBuilder().must(new TermQueryBuilder("_id", 1)); + SearchResponse response1 = client().prepareSearch(indexName).setQuery(query1).get(); + assertThat(response1.getHits().getTotalHits().value(), is(1L)); + BoolQueryBuilder query2 = new BoolQueryBuilder().must(new TermQueryBuilder("_id", 2)); + SearchResponse response2 = client().prepareSearch(indexName).setQuery(query2).get(); + assertThat(response2.getHits().getTotalHits().value(), is(1L)); + return 30 == (Integer) response1.getHits().getHits()[0].getSourceAsMap().get("age") + && 30 == (Integer) response2.getHits().getHits()[0].getSourceAsMap().get("age"); + }); + + // test out-of-order updates + produceDataWithExternalVersion("1", 1, "name1", "25", defaultMessageTimestamp, "index"); + produceDataWithExternalVersion("2", 1, "name2", "25", defaultMessageTimestamp, "index"); + produceDataWithExternalVersion("3", 1, "name3", "25", defaultMessageTimestamp, "index"); + waitForSearchableDocs(3, Arrays.asList(nodeA, nodeB)); + + BoolQueryBuilder query1 = new BoolQueryBuilder().must(new TermQueryBuilder("_id", 1)); + SearchResponse response1 = client().prepareSearch(indexName).setQuery(query1).get(); + assertThat(response1.getHits().getTotalHits().value(), is(1L)); + assertEquals(30, response1.getHits().getHits()[0].getSourceAsMap().get("age")); + + BoolQueryBuilder query2 = new BoolQueryBuilder().must(new TermQueryBuilder("_id", 2)); + SearchResponse response2 = client().prepareSearch(indexName).setQuery(query2).get(); + assertThat(response2.getHits().getTotalHits().value(), is(1L)); + assertEquals(30, response2.getHits().getHits()[0].getSourceAsMap().get("age")); + + // test deletes with smaller version + produceDataWithExternalVersion("1", 1, "name1", "25", defaultMessageTimestamp, "delete"); + produceDataWithExternalVersion("4", 1, "name4", "25", defaultMessageTimestamp, "index"); + waitForSearchableDocs(4, Arrays.asList(nodeA, nodeB)); + RangeQueryBuilder query = new RangeQueryBuilder("age").gte(23); + SearchResponse response = client().prepareSearch(indexName).setQuery(query).get(); + assertThat(response.getHits().getTotalHits().value(), is(4L)); + + // test deletes with correct version + produceDataWithExternalVersion("1", 3, "name1", "30", defaultMessageTimestamp, "delete"); + produceDataWithExternalVersion("2", 3, "name2", "30", defaultMessageTimestamp, "delete"); + waitForState(() -> { + RangeQueryBuilder rangeQuery = new RangeQueryBuilder("age").gte(23); + SearchResponse rangeQueryResponse = client().prepareSearch(indexName).setQuery(rangeQuery).get(); + assertThat(rangeQueryResponse.getHits().getTotalHits().value(), is(2L)); + return true; + }); + } + + public void testExternalVersioningWithDisabledGCDeletes() throws Exception { + // setup nodes and index + internalCluster().startClusterManagerOnlyNode(); + final String nodeA = internalCluster().startDataOnlyNode(); + final String nodeB = internalCluster().startDataOnlyNode(); + + createIndex( + indexName, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put("ingestion_source.type", "kafka") + .put("ingestion_source.pointer.init.reset", "earliest") + .put("ingestion_source.param.topic", topicName) + .put("ingestion_source.param.bootstrap_servers", kafka.getBootstrapServers()) + .put("index.replication.type", "SEGMENT") + .put("index.gc_deletes", "0") + .build(), + "{\"properties\":{\"name\":{\"type\": \"text\"},\"age\":{\"type\": \"integer\"}}}}" + ); + + // insert documents + produceDataWithExternalVersion("1", 1, "name1", "25", defaultMessageTimestamp, "index"); + produceDataWithExternalVersion("2", 1, "name2", "25", defaultMessageTimestamp, "index"); + waitForState(() -> { + RangeQueryBuilder rangeQuery = new RangeQueryBuilder("age").gte(23); + SearchResponse rangeQueryResponse = client().prepareSearch(indexName).setQuery(rangeQuery).get(); + assertThat(rangeQueryResponse.getHits().getTotalHits().value(), is(2L)); + return true; + }); + + // delete documents 1 and 2 + produceDataWithExternalVersion("1", 2, "name1", "25", defaultMessageTimestamp, "delete"); + produceDataWithExternalVersion("2", 2, "name2", "25", defaultMessageTimestamp, "delete"); + produceDataWithExternalVersion("3", 1, "name3", "25", defaultMessageTimestamp, "index"); + waitForState(() -> { + BoolQueryBuilder query = new BoolQueryBuilder().must(new TermQueryBuilder("_id", 3)); + SearchResponse response = client().prepareSearch(indexName).setQuery(query).get(); + assertThat(response.getHits().getTotalHits().value(), is(1L)); + return 25 == (Integer) response.getHits().getHits()[0].getSourceAsMap().get("age"); + }); + waitForSearchableDocs(1, Arrays.asList(nodeA, nodeB)); + + // validate index operation with lower version creates new document + produceDataWithExternalVersion("1", 1, "name1", "35", defaultMessageTimestamp, "index"); + produceDataWithExternalVersion("4", 1, "name4", "35", defaultMessageTimestamp, "index"); + waitForState(() -> { + RangeQueryBuilder rangeQuery = new RangeQueryBuilder("age").gte(34); + SearchResponse rangeQueryResponse = client().prepareSearch(indexName).setQuery(rangeQuery).get(); + assertThat(rangeQueryResponse.getHits().getTotalHits().value(), is(2L)); + return true; + }); + + } + private void verifyRemoteStoreEnabled(String node) { GetSettingsResponse settingsResponse = client(node).admin().indices().prepareGetSettings(indexName).get(); String remoteStoreEnabled = settingsResponse.getIndexToSettings().get(indexName).get("index.remote_store.enabled"); diff --git a/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java b/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java index bd17ee2170121..f3e613621a053 100644 --- a/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java @@ -16,10 +16,14 @@ import org.opensearch.action.admin.indices.streamingingestion.state.ShardIngestionState; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IngestionSource; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.lucene.Lucene; +import org.opensearch.common.lucene.uid.Versions; +import org.opensearch.common.util.concurrent.ReleasableLock; import org.opensearch.index.IngestionConsumerFactory; import org.opensearch.index.IngestionShardConsumer; import org.opensearch.index.IngestionShardPointer; +import org.opensearch.index.VersionType; import org.opensearch.index.mapper.DocumentMapperForType; import org.opensearch.index.mapper.IdFieldMapper; import org.opensearch.index.mapper.ParseContext; @@ -47,6 +51,7 @@ import java.util.function.BiFunction; import static org.opensearch.action.index.IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP; +import static org.opensearch.index.translog.Translog.EMPTY_TRANSLOG_LOCATION; import static org.opensearch.index.translog.Translog.EMPTY_TRANSLOG_SNAPSHOT; /** @@ -157,15 +162,45 @@ public IndexResult index(Index index) throws IOException { /** * Indexes the document into the engine. This is used internally by the stream poller only. * @param index the index request - * @return the index result * @throws IOException if an error occurs */ - public IndexResult indexInternal(Index index) throws IOException { + public void indexInternal(Index index) throws IOException { + // todo: add number of inserts/updates metric assert Objects.equals(index.uid().field(), IdFieldMapper.NAME) : index.uid().field(); - ensureOpen(); - final IndexResult indexResult; - indexResult = indexIntoLucene(index); - return indexResult; + + try ( + ReleasableLock releasableLock1 = readLock.acquire(); + Releasable releasableLock2 = versionMap.acquireLock(index.uid().bytes()) + ) { + ensureOpen(); + lastWriteNanos = index.startTime(); + boolean isExternalVersioning = index.versionType() == VersionType.EXTERNAL; + if (index.getAutoGeneratedIdTimestamp() == UNSET_AUTO_GENERATED_TIMESTAMP) { + validateDocumentVersion(index); + } + + if (isExternalVersioning) { + index.parsedDoc().version().setLongValue(index.version()); + } + + IndexResult indexResult = indexIntoLucene(index); + if (isExternalVersioning && indexResult.getResultType() == Result.Type.SUCCESS) { + versionMap.maybePutIndexUnderLock( + index.uid().bytes(), + new IndexVersionValue(EMPTY_TRANSLOG_LOCATION, index.version(), index.seqNo(), index.primaryTerm()) + ); + } + } catch (VersionConflictEngineException e) { + logger.debug("Version conflict encountered when processing index operation", e); + throw e; + } catch (RuntimeException | IOException e) { + try { + maybeFailEngine("index", e); + } catch (Exception inner) { + e.addSuppressed(inner); + } + throw e; + } } private IndexResult indexIntoLucene(Index index) throws IOException { @@ -203,18 +238,56 @@ public DeleteResult delete(Delete delete) throws IOException { /** * Processes delete operations. This is used internally by the stream poller only. */ - public DeleteResult deleteInternal(Delete delete) throws IOException { + public void deleteInternal(Delete delete) throws IOException { + // todo: add number of deletes metric + versionMap.enforceSafeAccess(); assert Objects.equals(delete.uid().field(), IdFieldMapper.NAME) : delete.uid().field(); - ensureOpen(); - final ParsedDocument tombstone = engineConfig.getTombstoneDocSupplier().newDeleteTombstoneDoc(delete.id()); - assert tombstone.docs().size() == 1 : "Tombstone doc should have single doc [" + tombstone + "]"; - final ParseContext.Document doc = tombstone.docs().get(0); - assert doc.getField(SeqNoFieldMapper.TOMBSTONE_NAME) != null : "Delete tombstone document but _tombstone field is not set [" - + doc - + " ]"; - doc.add(softDeletesField); - indexWriter.softUpdateDocument(delete.uid(), doc, softDeletesField); - return new DeleteResult(1, delete.primaryTerm(), -1, true); + lastWriteNanos = delete.startTime(); + + try ( + ReleasableLock releasableLock1 = readLock.acquire(); + Releasable releasableLock2 = versionMap.acquireLock(delete.uid().bytes()) + ) { + ensureOpen(); + validateDocumentVersion(delete); + final ParsedDocument tombstone = engineConfig.getTombstoneDocSupplier().newDeleteTombstoneDoc(delete.id()); + boolean isExternalVersioning = delete.versionType() == VersionType.EXTERNAL; + if (isExternalVersioning) { + tombstone.version().setLongValue(delete.version()); + } + + assert tombstone.docs().size() == 1 : "Tombstone doc should have single doc [" + tombstone + "]"; + final ParseContext.Document doc = tombstone.docs().get(0); + assert doc.getField(SeqNoFieldMapper.TOMBSTONE_NAME) != null : "Delete tombstone document but _tombstone field is not set [" + + doc + + " ]"; + doc.add(softDeletesField); + + indexWriter.softUpdateDocument(delete.uid(), doc, softDeletesField); + if (isExternalVersioning) { + versionMap.putDeleteUnderLock( + delete.uid().bytes(), + new DeleteVersionValue( + delete.version(), + delete.seqNo(), + delete.primaryTerm(), + engineConfig.getThreadPool().relativeTimeInMillis() + ) + ); + } + } catch (VersionConflictEngineException e) { + logger.debug("Version conflict encountered when processing deletes", e); + throw e; + } catch (RuntimeException | IOException e) { + try { + maybeFailEngine("delete", e); + } catch (Exception inner) { + e.addSuppressed(inner); + } + throw e; + } + + maybePruneDeletes(); } @Override @@ -229,6 +302,15 @@ public GetResult get(Get get, BiFunction search return getFromSearcher(get, searcherFactory, SearcherScope.EXTERNAL); } + @Override + protected void pruneDeletedTombstones() { + final long timeMSec = engineConfig.getThreadPool().relativeTimeInMillis(); + final long maxTimestampToPrune = timeMSec - engineConfig.getIndexSettings().getGcDeletesInMillis(); + // prune based only on timestamp and not sequence number + versionMap.pruneTombstones(maxTimestampToPrune, Long.MAX_VALUE); + lastDeleteVersionPruneTimeMSec = timeMSec; + } + @Override public Translog.Snapshot newChangesSnapshot( String source, @@ -381,6 +463,33 @@ private void updateErrorHandlingStrategy(IngestionErrorStrategy.ErrorStrategy er streamPoller.updateErrorStrategy(updatedIngestionErrorStrategy); } + /** + * Validates document version for pull-based ingestion. Only external versioning is supported. + */ + private void validateDocumentVersion(final Operation operation) throws IOException { + if (operation.versionType() != VersionType.EXTERNAL) { + return; + } + + versionMap.enforceSafeAccess(); + final VersionValue versionValue = resolveDocVersion(operation, false); + final long currentVersion; + final boolean currentNotFoundOrDeleted; + + if (versionValue == null) { + // todo: possible to optimize addDoc instead of updateDoc if version is not present? + currentVersion = Versions.NOT_FOUND; + currentNotFoundOrDeleted = true; + } else { + currentVersion = versionValue.version; + currentNotFoundOrDeleted = versionValue.isDelete(); + } + + if (operation.versionType().isVersionConflictForWrites(currentVersion, operation.version(), currentNotFoundOrDeleted)) { + throw new VersionConflictEngineException(shardId, operation, currentVersion, currentNotFoundOrDeleted); + } + } + /** * Pause the poller. Used by management flows. */ diff --git a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java index 7e171e3f1714c..e6baed6cd16b6 100644 --- a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java @@ -153,7 +153,7 @@ public class InternalEngine extends Engine { /** * When we last pruned expired tombstones from versionMap.deletes: */ - private volatile long lastDeleteVersionPruneTimeMSec; + protected volatile long lastDeleteVersionPruneTimeMSec; protected final TranslogManager translogManager; protected final IndexWriter indexWriter; @@ -163,6 +163,10 @@ public class InternalEngine extends Engine { protected final AtomicBoolean shouldPeriodicallyFlushAfterBigMerge = new AtomicBoolean(false); protected final NumericDocValuesField softDeletesField = Lucene.newSoftDeletesField(); + // A uid (in the form of BytesRef) to the version map + // we use the hashed variant since we iterate over it and check removal and additions on existing keys + protected final LiveVersionMap versionMap = new LiveVersionMap(); + @Nullable protected final String historyUUID; @@ -173,10 +177,6 @@ public class InternalEngine extends Engine { private final Lock flushLock = new ReentrantLock(); private final ReentrantLock optimizeLock = new ReentrantLock(); - // A uid (in the form of BytesRef) to the version map - // we use the hashed variant since we iterate over it and check removal and additions on existing keys - private final LiveVersionMap versionMap = new LiveVersionMap(); - private volatile SegmentInfos lastCommittedSegmentInfos; private final IndexThrottle throttle; @@ -745,7 +745,7 @@ private OpVsLuceneDocStatus compareOpToLuceneDocBasedOnSeqNo(final Operation op) } /** resolves the current version of the document, returning null if not found */ - private VersionValue resolveDocVersion(final Operation op, boolean loadSeqNo) throws IOException { + protected VersionValue resolveDocVersion(final Operation op, boolean loadSeqNo) throws IOException { assert incrementVersionLookup(); // used for asserting in tests VersionValue versionValue = getVersionFromMap(op.uid().bytes()); if (versionValue == null) { @@ -1974,7 +1974,7 @@ private void refreshLastCommittedSegmentInfos() { } } - private void pruneDeletedTombstones() { + protected void pruneDeletedTombstones() { /* * We need to deploy two different trimming strategies for GC deletes on primary and replicas. Delete operations on primary * are remembered for at least one GC delete cycle and trimmed periodically. This is, at the moment, the best we can do on diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/MessageProcessorRunnable.java b/server/src/main/java/org/opensearch/indices/pollingingest/MessageProcessorRunnable.java index c1d098279a7eb..b1d3004131291 100644 --- a/server/src/main/java/org/opensearch/indices/pollingingest/MessageProcessorRunnable.java +++ b/server/src/main/java/org/opensearch/indices/pollingingest/MessageProcessorRunnable.java @@ -29,11 +29,13 @@ import org.opensearch.index.VersionType; import org.opensearch.index.engine.Engine; import org.opensearch.index.engine.IngestionEngine; +import org.opensearch.index.engine.VersionConflictEngineException; import org.opensearch.index.mapper.IdFieldMapper; import org.opensearch.index.mapper.ParseContext; import org.opensearch.index.mapper.ParsedDocument; import org.opensearch.index.mapper.SourceToParse; import org.opensearch.index.mapper.Uid; +import org.opensearch.index.mapper.VersionFieldMapper; import java.io.IOException; import java.util.Map; @@ -170,6 +172,15 @@ protected Engine.Operation getOperation(byte[] payload, IngestionShardPointer po String opTypeString = (String) payloadMap.getOrDefault(OP_TYPE, "index"); DocWriteRequest.OpType opType = DocWriteRequest.OpType.fromString(opTypeString); + // Check message for document version. Pull-based ingestion only supports external versioning. + // By default, writes succeed regardless of document version. + long documentVersion = Versions.MATCH_ANY; + VersionType documentVersionType = VersionType.INTERNAL; + if (payloadMap.containsKey(VersionFieldMapper.NAME)) { + documentVersion = Long.parseLong((String) payloadMap.get(VersionFieldMapper.NAME)); + documentVersionType = VersionType.EXTERNAL; + } + Engine.Operation operation; switch (opType) { case INDEX: @@ -199,8 +210,8 @@ protected Engine.Operation getOperation(byte[] payload, IngestionShardPointer po doc, 0, 1, - Versions.MATCH_ANY, - VersionType.INTERNAL, + documentVersion, + documentVersionType, Engine.Operation.Origin.PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, @@ -225,8 +236,8 @@ protected Engine.Operation getOperation(byte[] payload, IngestionShardPointer po new Term(IdFieldMapper.NAME, Uid.encodeId(id)), 0, 1, - Versions.MATCH_ANY, - VersionType.INTERNAL, + documentVersion, + documentVersionType, Engine.Operation.Origin.PRIMARY, System.nanoTime(), UNASSIGNED_SEQ_NO, @@ -282,6 +293,12 @@ public void run() { currentShardPointer = readResult.getPointer(); messageProcessor.process(readResult.getMessage(), readResult.getPointer()); readResult = null; + } catch (VersionConflictEngineException e) { + // Messages with version conflicts will be dropped. This should not have any impact to data + // correctness as pull-based ingestion does not support partial updates. + // TODO: add metric + logger.debug("Dropping message due to version conflict. ShardPointer: " + readResult.getPointer().asString(), e); + readResult = null; } catch (Exception e) { errorStrategy.handleError(e, IngestionErrorStrategy.ErrorStage.PROCESSING); if (errorStrategy.shouldIgnoreError(e, IngestionErrorStrategy.ErrorStage.PROCESSING)) { From 763d4fc829e714edb9a6a9ea1c24d954406765f2 Mon Sep 17 00:00:00 2001 From: Ashish Singh Date: Tue, 15 Apr 2025 13:47:03 +0530 Subject: [PATCH 219/550] Add UT for getSleepDuration in AbstractAsyncTask (#17945) Signed-off-by: Ashish Singh --- .../concurrent/AbstractAsyncTaskTests.java | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/server/src/test/java/org/opensearch/common/util/concurrent/AbstractAsyncTaskTests.java b/server/src/test/java/org/opensearch/common/util/concurrent/AbstractAsyncTaskTests.java index e56a1793c06e6..4c531cddbd73b 100644 --- a/server/src/test/java/org/opensearch/common/util/concurrent/AbstractAsyncTaskTests.java +++ b/server/src/test/java/org/opensearch/common/util/concurrent/AbstractAsyncTaskTests.java @@ -248,4 +248,26 @@ protected void runInternal() { assertFalse(task.isScheduled()); } } + + public void testGetSleepDurationForFirstRefresh() { + AbstractAsyncTask task = new AbstractAsyncTask( + logger, + threadPool, + TimeValue.timeValueMillis(randomIntBetween(1, 2)), + true, + OpenSearchTestCase::randomBoolean + ) { + @Override + protected boolean mustReschedule() { + return true; + } + + @Override + protected void runInternal() { + // no-op + } + }; + // No exceptions should be thrown + task.getSleepDuration(); + } } From c44d230bba25ce20bad96fd91e3ad241852bd01c Mon Sep 17 00:00:00 2001 From: guojialiang Date: Tue, 15 Apr 2025 16:17:16 +0800 Subject: [PATCH 220/550] [segment replication] Add cluster setting for retry timeout of publish checkpoint tx action (#17749) * TransportReplicationAction support specifying retryTimeout, PublishCheckpointAction use the never give up strategy. Signed-off-by: guojialiang * support PublishCheckpointAction PUBLISH_CHECK_POINT_RETRY_TIMEOUT to override the default retry timeout Signed-off-by: guojialiang * add TransportReplicationAction.getRetryTimeoutSetting Signed-off-by: guojialiang * add entry to CHANGELOG.md Signed-off-by: guojialiang * rewrite the PR title Signed-off-by: guojialiang * modify changelog entry Signed-off-by: guojialiang * add comments Signed-off-by: guojialiang * update Signed-off-by: guojialiang --------- Signed-off-by: guojialiang --- CHANGELOG.md | 1 + .../replication/SegmentReplicationIT.java | 55 +++++++++++++++++++ .../TransportReplicationAction.java | 8 ++- .../common/settings/ClusterSettings.java | 2 + .../checkpoint/PublishCheckpointAction.java | 17 ++++++ 5 files changed, 81 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 60371158d4933..ec1c7b626dc5f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -52,6 +52,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Increase the default segment counter step size when replica promoting ([#17568](https://github.com/opensearch-project/OpenSearch/pull/17568)) - [WLM] Rename QueryGroup to WorkloadGroup ([#17901](https://github.com/opensearch-project/OpenSearch/pull/17901)) - Relaxes jarHell check for optionally extended plugins([#17893](https://github.com/opensearch-project/OpenSearch/pull/17893))) +- Add cluster setting for retry timeout of publish checkpoint tx action ([#17749](https://github.com/opensearch-project/OpenSearch/pull/17749)) ### Dependencies - Bump `com.nimbusds:nimbus-jose-jwt` from 9.41.1 to 10.0.2 ([#17607](https://github.com/opensearch-project/OpenSearch/pull/17607), [#17669](https://github.com/opensearch-project/OpenSearch/pull/17669)) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java index 608ae2e215b31..ae093620c25b4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java @@ -41,6 +41,7 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchType; import org.opensearch.action.support.WriteRequest; +import org.opensearch.action.support.replication.TransportReplicationAction; import org.opensearch.action.termvectors.TermVectorsRequestBuilder; import org.opensearch.action.termvectors.TermVectorsResponse; import org.opensearch.action.update.UpdateResponse; @@ -59,6 +60,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.set.Sets; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexModule; @@ -72,6 +74,7 @@ import org.opensearch.index.engine.NRTReplicationReaderManager; import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.recovery.FileChunkRequest; +import org.opensearch.indices.replication.checkpoint.PublishCheckpointAction; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.node.NodeClosedException; import org.opensearch.search.SearchService; @@ -83,6 +86,7 @@ import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.junit.annotations.TestLogging; import org.opensearch.test.transport.MockTransportService; +import org.opensearch.transport.RemoteTransportException; import org.opensearch.transport.TransportService; import org.opensearch.transport.client.Requests; import org.junit.Before; @@ -98,6 +102,7 @@ import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; @@ -131,6 +136,56 @@ private static String indexOrAlias() { return randomBoolean() ? INDEX_NAME : "alias"; } + public void testRetryPublishCheckPoint() throws Exception { + // Reproduce the case where the replica shard cannot synchronize data from the primary shard when there is a network exception. + // Test update of configuration PublishCheckpointAction#PUBLISH_CHECK_POINT_RETRY_TIMEOUT. + Settings mockNodeSetting = Settings.builder() + .put(TransportReplicationAction.REPLICATION_RETRY_TIMEOUT.getKey(), TimeValue.timeValueSeconds(0)) + .put(PublishCheckpointAction.PUBLISH_CHECK_POINT_RETRY_TIMEOUT.getKey(), TimeValue.timeValueSeconds(0)) + .build(); + + final String primaryNode = internalCluster().startDataOnlyNode(mockNodeSetting); + createIndex(INDEX_NAME, Settings.builder().put(indexSettings()).put("index.refresh_interval", -1).build()); + ensureYellowAndNoInitializingShards(INDEX_NAME); + final String replicaNode = internalCluster().startDataOnlyNode(mockNodeSetting); + ensureGreen(INDEX_NAME); + + // update publish checkpoint retry time out + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings( + Settings.builder().put(PublishCheckpointAction.PUBLISH_CHECK_POINT_RETRY_TIMEOUT.getKey(), TimeValue.timeValueMinutes(10)) + ) + .get(); + + // mock network exception + MockTransportService replicaTransportService = ((MockTransportService) internalCluster().getInstance( + TransportService.class, + replicaNode + )); + AtomicBoolean mockReplicaReceivePublishCheckpointException = new AtomicBoolean(true); + replicaTransportService.addRequestHandlingBehavior( + PublishCheckpointAction.ACTION_NAME + TransportReplicationAction.REPLICA_ACTION_SUFFIX, + (handler, request, channel, task) -> { + if (mockReplicaReceivePublishCheckpointException.get()) { + logger.info("mock remote transport exception"); + throw new RemoteTransportException("mock remote transport exception", new OpenSearchRejectedExecutionException()); + } + logger.info("replica receive publish checkpoint request"); + handler.messageReceived(request, channel, task); + } + ); + + client().prepareIndex(INDEX_NAME).setId("1").setSource("foo", "bar").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + waitForSearchableDocs(0, replicaNode); + logger.info("ensure publish checkpoint request can be process"); + mockReplicaReceivePublishCheckpointException.set(false); + + waitForSearchableDocs(1, primaryNode, replicaNode); + replicaTransportService.clearAllRules(); + } + public void testPrimaryStopped_ReplicaPromoted() throws Exception { final String primary = internalCluster().startDataOnlyNode(); createIndex(INDEX_NAME); diff --git a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java index 29cf4003ac679..c81754b33fa62 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java @@ -249,7 +249,7 @@ protected TransportReplicationAction( this.transportReplicaAction = actionName + REPLICA_ACTION_SUFFIX; this.initialRetryBackoffBound = REPLICATION_INITIAL_RETRY_BACKOFF_BOUND.get(settings); - this.retryTimeout = REPLICATION_RETRY_TIMEOUT.get(settings); + this.retryTimeout = getRetryTimeoutSetting().get(settings); this.forceExecutionOnPrimary = forceExecutionOnPrimary; transportService.registerRequestHandler(actionName, ThreadPool.Names.SAME, requestReader, this::handleOperationRequest); @@ -273,7 +273,11 @@ protected TransportReplicationAction( ClusterSettings clusterSettings = clusterService.getClusterSettings(); clusterSettings.addSettingsUpdateConsumer(REPLICATION_INITIAL_RETRY_BACKOFF_BOUND, (v) -> initialRetryBackoffBound = v); - clusterSettings.addSettingsUpdateConsumer(REPLICATION_RETRY_TIMEOUT, (v) -> retryTimeout = v); + clusterSettings.addSettingsUpdateConsumer(getRetryTimeoutSetting(), (v) -> retryTimeout = v); + } + + protected Setting getRetryTimeoutSetting() { + return REPLICATION_RETRY_TIMEOUT; } /** diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index d0722b4f3a942..137260c82e6f5 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -132,6 +132,7 @@ import org.opensearch.indices.breaker.HierarchyCircuitBreakerService; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.indices.replication.checkpoint.PublishCheckpointAction; import org.opensearch.indices.store.IndicesStore; import org.opensearch.ingest.IngestService; import org.opensearch.monitor.fs.FsHealthService; @@ -440,6 +441,7 @@ public void apply(Settings value, Settings current, Settings previous) { HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING, TransportReplicationAction.REPLICATION_INITIAL_RETRY_BACKOFF_BOUND, TransportReplicationAction.REPLICATION_RETRY_TIMEOUT, + PublishCheckpointAction.PUBLISH_CHECK_POINT_RETRY_TIMEOUT, TransportSettings.HOST, TransportSettings.PUBLISH_HOST, TransportSettings.PUBLISH_HOST_PROFILE, diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java index d1e2884956f5c..7181355333be7 100644 --- a/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java @@ -22,7 +22,9 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Inject; +import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.util.concurrent.ThreadContextAccess; import org.opensearch.core.action.ActionListener; @@ -60,6 +62,16 @@ public class PublishCheckpointAction extends TransportReplicationAction< private final SegmentReplicationTargetService replicationService; + /** + * The timeout for retrying publish checkpoint requests. + */ + public static final Setting PUBLISH_CHECK_POINT_RETRY_TIMEOUT = Setting.timeSetting( + "indices.publish_check_point.retry_timeout", + TimeValue.timeValueMinutes(5), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + @Inject public PublishCheckpointAction( Settings settings, @@ -87,6 +99,11 @@ public PublishCheckpointAction( this.replicationService = targetService; } + @Override + protected Setting getRetryTimeoutSetting() { + return PUBLISH_CHECK_POINT_RETRY_TIMEOUT; + } + @Override protected ReplicationResponse newResponseInstance(StreamInput in) throws IOException { return new ReplicationResponse(in); From 80cb033240c120e8f540c03333df60c060a059d4 Mon Sep 17 00:00:00 2001 From: guojialiang Date: Tue, 15 Apr 2025 16:20:47 +0800 Subject: [PATCH 221/550] Add MergedSegmentWarmerFactory to support the extension of IndexReaderWarmer (#17881) * support merged segment warmer Signed-off-by: guojialiang * Introduce MergedSegmentWarmerFactory for Local/Remote merged segment pre copy Signed-off-by: guojialiang * add FeatureFlags.MERGED_SEGMENT_WARMER_EXPERIMENTAL_FLAG Signed-off-by: guojialiang * add an entry in CHANGELOG Signed-off-by: guojialiang * add validation Signed-off-by: guojialiang * modify changelog entry Signed-off-by: guojialiang * add comment Signed-off-by: guojialiang * add comment Signed-off-by: guojialiang --------- Signed-off-by: guojialiang --- CHANGELOG.md | 1 + .../opensearch/index/shard/IndexShardIT.java | 4 +- .../common/settings/FeatureFlagSettings.java | 3 +- .../opensearch/common/util/FeatureFlags.java | 13 ++++ .../org/opensearch/index/IndexService.java | 7 +- .../org/opensearch/index/IndexSettings.java | 4 + .../opensearch/index/engine/EngineConfig.java | 18 +++++ .../index/engine/EngineConfigFactory.java | 5 +- .../index/engine/InternalEngine.java | 6 ++ .../engine/LocalMergedSegmentWarmer.java | 63 ++++++++++++++++ .../engine/MergedSegmentWarmerFactory.java | 73 +++++++++++++++++++ .../RemoteStoreMergedSegmentWarmer.java | 67 +++++++++++++++++ .../opensearch/index/shard/IndexShard.java | 9 ++- .../opensearch/indices/IndicesService.java | 7 +- .../cluster/IndicesClusterStateService.java | 19 +++-- .../main/java/org/opensearch/node/Node.java | 5 ++ .../engine/EngineConfigFactoryTests.java | 2 + ...dicesLifecycleListenerSingleNodeTests.java | 4 +- .../indices/IndicesRequestCacheTests.java | 4 +- ...actIndicesClusterStateServiceTestCase.java | 4 +- ...ClusterStateServiceRandomUpdatesTests.java | 1 + .../snapshots/SnapshotResiliencyTests.java | 4 +- .../index/shard/IndexShardTestCase.java | 4 +- 23 files changed, 308 insertions(+), 19 deletions(-) create mode 100644 server/src/main/java/org/opensearch/index/engine/LocalMergedSegmentWarmer.java create mode 100644 server/src/main/java/org/opensearch/index/engine/MergedSegmentWarmerFactory.java create mode 100644 server/src/main/java/org/opensearch/index/engine/RemoteStoreMergedSegmentWarmer.java diff --git a/CHANGELOG.md b/CHANGELOG.md index ec1c7b626dc5f..9f00995092684 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -41,6 +41,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Support sub agg in filter rewrite optimization ([#17447](https://github.com/opensearch-project/OpenSearch/pull/17447) - Disable scoring of keyword term search by default, fallback logic with new use_similarity:true parameter ([#17889](https://github.com/opensearch-project/OpenSearch/pull/17889)) - Add versioning support in pull-based ingestion ([#17918](https://github.com/opensearch-project/OpenSearch/pull/17918)) +- Introducing MergedSegmentWarmerFactory to support the extension of IndexWriter.IndexReaderWarmer ([#17881](https://github.com/opensearch-project/OpenSearch/pull/17881)) ### Changed - Migrate BC libs to their FIPS counterparts ([#14912](https://github.com/opensearch-project/OpenSearch/pull/14912)) diff --git a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java index 2d0918ff6e89a..37e6d23f22fcb 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java @@ -74,6 +74,7 @@ import org.opensearch.index.VersionType; import org.opensearch.index.engine.CommitStats; import org.opensearch.index.engine.Engine; +import org.opensearch.index.engine.MergedSegmentWarmerFactory; import org.opensearch.index.engine.NoOpEngine; import org.opensearch.index.flush.FlushStats; import org.opensearch.index.mapper.MapperService; @@ -719,7 +720,8 @@ public static final IndexShard newIndexShard( DefaultRemoteStoreSettings.INSTANCE, false, IndexShardTestUtils.getFakeDiscoveryNodes(initializingShardRouting), - mock(Function.class) + mock(Function.class), + new MergedSegmentWarmerFactory(null, null, null) ); } diff --git a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java index 962e51b53f5fd..07c026cfaa6e3 100644 --- a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java @@ -38,6 +38,7 @@ protected FeatureFlagSettings( FeatureFlags.STAR_TREE_INDEX_SETTING, FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES_SETTING, FeatureFlags.TERM_VERSION_PRECOMMIT_ENABLE_SETTING, - FeatureFlags.ARROW_STREAMS_SETTING + FeatureFlags.ARROW_STREAMS_SETTING, + FeatureFlags.MERGED_SEGMENT_WARMER_EXPERIMENTAL_SETTING ); } diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java index 1a1a28dc9def8..5633fe91d51a9 100644 --- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -75,6 +75,12 @@ public class FeatureFlags { */ public static final String BACKGROUND_TASK_EXECUTION_EXPERIMENTAL = FEATURE_FLAG_PREFIX + "task.background.enabled"; + /** + * Gates the functionality of merged segment warmer in local/remote segment replication. + * Once the feature is ready for release, this feature flag can be removed. + */ + public static final String MERGED_SEGMENT_WARMER_EXPERIMENTAL_FLAG = "opensearch.experimental.feature.merged_segment_warmer.enabled"; + public static final Setting REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING = Setting.boolSetting( REMOTE_STORE_MIGRATION_EXPERIMENTAL, false, @@ -97,6 +103,12 @@ public class FeatureFlags { Property.NodeScope ); + public static final Setting MERGED_SEGMENT_WARMER_EXPERIMENTAL_SETTING = Setting.boolSetting( + MERGED_SEGMENT_WARMER_EXPERIMENTAL_FLAG, + false, + Property.NodeScope + ); + /** * Gates the functionality of star tree index, which improves the performance of search * aggregations. @@ -153,6 +165,7 @@ static class FeatureFlagsImpl { SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY_SETTING, SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY_SETTING.getDefault(Settings.EMPTY) ); + put(MERGED_SEGMENT_WARMER_EXPERIMENTAL_SETTING, MERGED_SEGMENT_WARMER_EXPERIMENTAL_SETTING.getDefault(Settings.EMPTY)); } }; diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index b455ad18c7227..6d4a5ff995bd5 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -76,6 +76,7 @@ import org.opensearch.index.engine.Engine; import org.opensearch.index.engine.EngineConfigFactory; import org.opensearch.index.engine.EngineFactory; +import org.opensearch.index.engine.MergedSegmentWarmerFactory; import org.opensearch.index.fielddata.IndexFieldDataCache; import org.opensearch.index.fielddata.IndexFieldDataService; import org.opensearch.index.mapper.MapperService; @@ -574,7 +575,8 @@ public synchronized IndexShard createShard( final RepositoriesService repositoriesService, final DiscoveryNode targetNode, @Nullable DiscoveryNode sourceNode, - DiscoveryNodes discoveryNodes + DiscoveryNodes discoveryNodes, + MergedSegmentWarmerFactory mergedSegmentWarmerFactory ) throws IOException { Objects.requireNonNull(retentionLeaseSyncer); /* @@ -701,7 +703,8 @@ protected void closeInternal() { remoteStoreSettings, seedRemote, discoveryNodes, - segmentReplicationStatsProvider + segmentReplicationStatsProvider, + mergedSegmentWarmerFactory ); eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created"); eventListener.afterIndexShardCreated(indexShard); diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 38604ffd8bf8f..ae35a2593cc56 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -1340,6 +1340,10 @@ public boolean isSegRepLocalEnabled() { return ReplicationType.SEGMENT.equals(replicationType) && !isRemoteStoreEnabled(); } + public boolean isDocumentReplication() { + return ReplicationType.DOCUMENT.equals(replicationType); + } + /** * Returns if remote store is enabled for this index. */ diff --git a/server/src/main/java/org/opensearch/index/engine/EngineConfig.java b/server/src/main/java/org/opensearch/index/engine/EngineConfig.java index 6540e69e7dfcd..f2facbd0dffa9 100644 --- a/server/src/main/java/org/opensearch/index/engine/EngineConfig.java +++ b/server/src/main/java/org/opensearch/index/engine/EngineConfig.java @@ -33,6 +33,7 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.codecs.Codec; +import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.MergePolicy; import org.apache.lucene.search.QueryCache; @@ -248,6 +249,8 @@ private static void doValidateCodecSettings(final String codec) { private final TranslogFactory translogFactory; + private final IndexWriter.IndexReaderWarmer indexReaderWarmer; + /** * Creates a new {@link org.opensearch.index.engine.EngineConfig} */ @@ -299,6 +302,7 @@ private EngineConfig(Builder builder) { this.translogFactory = builder.translogFactory; this.leafSorter = builder.leafSorter; this.documentMapperForTypeSupplier = builder.documentMapperForTypeSupplier; + this.indexReaderWarmer = builder.indexReaderWarmer; } /** @@ -523,6 +527,14 @@ public TranslogFactory getTranslogFactory() { return translogFactory; } + /** + * Returns the underlying indexReaderWarmer + * @return the indexReaderWarmer + */ + public IndexWriter.IndexReaderWarmer getIndexReaderWarmer() { + return indexReaderWarmer; + } + /** * A supplier supplies tombstone documents which will be used in soft-update methods. * The returned document consists only _uid, _seqno, _term and _version fields; other metadata fields are excluded. @@ -598,6 +610,7 @@ public static class Builder { private TranslogFactory translogFactory = new InternalTranslogFactory(); private Supplier documentMapperForTypeSupplier; Comparator leafSorter; + private IndexWriter.IndexReaderWarmer indexReaderWarmer; public Builder shardId(ShardId shardId) { this.shardId = shardId; @@ -739,6 +752,11 @@ public Builder leafSorter(Comparator leafSorter) { return this; } + public Builder indexReaderWarmer(IndexWriter.IndexReaderWarmer indexReaderWarmer) { + this.indexReaderWarmer = indexReaderWarmer; + return this; + } + public EngineConfig build() { return new EngineConfig(this); } diff --git a/server/src/main/java/org/opensearch/index/engine/EngineConfigFactory.java b/server/src/main/java/org/opensearch/index/engine/EngineConfigFactory.java index d892d6e95346c..81fa90dfdd78f 100644 --- a/server/src/main/java/org/opensearch/index/engine/EngineConfigFactory.java +++ b/server/src/main/java/org/opensearch/index/engine/EngineConfigFactory.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.MergePolicy; import org.apache.lucene.search.QueryCache; @@ -156,7 +157,8 @@ public EngineConfig newEngineConfig( BooleanSupplier startedPrimarySupplier, TranslogFactory translogFactory, Comparator leafSorter, - Supplier documentMapperForTypeSupplier + Supplier documentMapperForTypeSupplier, + IndexWriter.IndexReaderWarmer indexReaderWarmer ) { CodecService codecServiceToUse = codecService; if (codecService == null && this.codecServiceFactory != null) { @@ -191,6 +193,7 @@ public EngineConfig newEngineConfig( .translogFactory(translogFactory) .leafSorter(leafSorter) .documentMapperForTypeSupplier(documentMapperForTypeSupplier) + .indexReaderWarmer(indexReaderWarmer) .build(); } diff --git a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java index e6baed6cd16b6..62bfd27516964 100644 --- a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java @@ -81,6 +81,7 @@ import org.opensearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndSeqNo; import org.opensearch.common.metrics.CounterMetric; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.KeyedLock; import org.opensearch.common.util.concurrent.ReleasableLock; @@ -2382,6 +2383,11 @@ private IndexWriterConfig getIndexWriterConfig() { if (config().getLeafSorter() != null) { iwc.setLeafSorter(config().getLeafSorter()); // The default segment search order } + if (FeatureFlags.isEnabled(FeatureFlags.MERGED_SEGMENT_WARMER_EXPERIMENTAL_SETTING) + && config().getIndexSettings().isSegRepEnabledOrRemoteNode()) { + assert null != config().getIndexReaderWarmer(); + iwc.setMergedSegmentWarmer(config().getIndexReaderWarmer()); + } return iwc; } diff --git a/server/src/main/java/org/opensearch/index/engine/LocalMergedSegmentWarmer.java b/server/src/main/java/org/opensearch/index/engine/LocalMergedSegmentWarmer.java new file mode 100644 index 0000000000000..4eabd9399eadf --- /dev/null +++ b/server/src/main/java/org/opensearch/index/engine/LocalMergedSegmentWarmer.java @@ -0,0 +1,63 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.index.engine; + +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.LeafReader; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.transport.TransportService; + +import java.io.IOException; + +/** + * Implementation of a {@link IndexWriter.IndexReaderWarmer} when local on-disk segment replication is enabled. + * + * @opensearch.internal + */ +public class LocalMergedSegmentWarmer implements IndexWriter.IndexReaderWarmer { + private final TransportService transportService; + private final RecoverySettings recoverySettings; + private final ClusterService clusterService; + + public LocalMergedSegmentWarmer(TransportService transportService, RecoverySettings recoverySettings, ClusterService clusterService) { + this.transportService = transportService; + this.recoverySettings = recoverySettings; + this.clusterService = clusterService; + } + + @Override + public void warm(LeafReader leafReader) throws IOException { + // TODO: node-node segment replication merged segment warmer + } +} diff --git a/server/src/main/java/org/opensearch/index/engine/MergedSegmentWarmerFactory.java b/server/src/main/java/org/opensearch/index/engine/MergedSegmentWarmerFactory.java new file mode 100644 index 0000000000000..fd6a9851167fc --- /dev/null +++ b/server/src/main/java/org/opensearch/index/engine/MergedSegmentWarmerFactory.java @@ -0,0 +1,73 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.index.engine; + +import org.apache.lucene.index.IndexWriter; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.transport.TransportService; + +/** + * MergedSegmentWarmerFactory to enable creation of various local on-disk + * and remote store flavors of {@link IndexWriter.IndexReaderWarmer} + * + * @opensearch.internal + */ +@ExperimentalApi +public class MergedSegmentWarmerFactory { + private final TransportService transportService; + private final RecoverySettings recoverySettings; + private final ClusterService clusterService; + + public MergedSegmentWarmerFactory(TransportService transportService, RecoverySettings recoverySettings, ClusterService clusterService) { + this.transportService = transportService; + this.recoverySettings = recoverySettings; + this.clusterService = clusterService; + } + + public IndexWriter.IndexReaderWarmer get(IndexShard shard) { + if (shard.indexSettings().isAssignedOnRemoteNode()) { + return new RemoteStoreMergedSegmentWarmer(transportService, recoverySettings, clusterService); + } else if (shard.indexSettings().isSegRepLocalEnabled()) { + return new LocalMergedSegmentWarmer(transportService, recoverySettings, clusterService); + } else if (shard.indexSettings().isDocumentReplication()) { + // MergedSegmentWarmerFactory#get is called when IndexShard is initialized. In scenario document replication, + // IndexWriter.IndexReaderWarmer should be null. + return null; + } + // We just handle known cases and throw exception at the last. This will allow predictability on the IndexReaderWarmer behaviour. + throw new IllegalStateException(shard.shardId() + " can't determine IndexReaderWarmer"); + } +} diff --git a/server/src/main/java/org/opensearch/index/engine/RemoteStoreMergedSegmentWarmer.java b/server/src/main/java/org/opensearch/index/engine/RemoteStoreMergedSegmentWarmer.java new file mode 100644 index 0000000000000..f3d5f8a4cc93b --- /dev/null +++ b/server/src/main/java/org/opensearch/index/engine/RemoteStoreMergedSegmentWarmer.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.index.engine; + +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.LeafReader; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.transport.TransportService; + +import java.io.IOException; + +/** + * Implementation of a {@link IndexWriter.IndexReaderWarmer} when remote store is enabled. + * + * @opensearch.internal + */ +public class RemoteStoreMergedSegmentWarmer implements IndexWriter.IndexReaderWarmer { + private final TransportService transportService; + private final RecoverySettings recoverySettings; + private final ClusterService clusterService; + + public RemoteStoreMergedSegmentWarmer( + TransportService transportService, + RecoverySettings recoverySettings, + ClusterService clusterService + ) { + this.transportService = transportService; + this.recoverySettings = recoverySettings; + this.clusterService = clusterService; + } + + @Override + public void warm(LeafReader leafReader) throws IOException { + // TODO: remote store merged segment warmer + } +} diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index b0a5212b1d330..e38f6408b9b17 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -129,6 +129,7 @@ import org.opensearch.index.engine.EngineException; import org.opensearch.index.engine.EngineFactory; import org.opensearch.index.engine.IngestionEngine; +import org.opensearch.index.engine.MergedSegmentWarmerFactory; import org.opensearch.index.engine.NRTReplicationEngine; import org.opensearch.index.engine.ReadOnlyEngine; import org.opensearch.index.engine.RefreshFailedEngineException; @@ -367,6 +368,7 @@ Runnable getGlobalCheckpointSyncer() { private final ShardMigrationState shardMigrationState; private DiscoveryNodes discoveryNodes; private final Function segmentReplicationStatsProvider; + private final MergedSegmentWarmerFactory mergedSegmentWarmerFactory; public IndexShard( final ShardRouting shardRouting, @@ -398,7 +400,8 @@ public IndexShard( final RemoteStoreSettings remoteStoreSettings, boolean seedRemote, final DiscoveryNodes discoveryNodes, - final Function segmentReplicationStatsProvider + final Function segmentReplicationStatsProvider, + final MergedSegmentWarmerFactory mergedSegmentWarmerFactory ) throws IOException { super(shardRouting.shardId(), indexSettings); assert shardRouting.initializing(); @@ -501,6 +504,7 @@ public boolean shouldCache(Query query) { this.shardMigrationState = getShardMigrationState(indexSettings, seedRemote); this.discoveryNodes = discoveryNodes; this.segmentReplicationStatsProvider = segmentReplicationStatsProvider; + this.mergedSegmentWarmerFactory = mergedSegmentWarmerFactory; } /** @@ -4096,7 +4100,8 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) thro translogFactorySupplier.apply(indexSettings, shardRouting), isTimeSeriesDescSortOptimizationEnabled() ? DataStream.TIMESERIES_LEAF_SORTER : null, // DESC @timestamp default order for // timeseries - () -> docMapper() + () -> docMapper(), + mergedSegmentWarmerFactory.get(this) ); } diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index ec60fa61d0c99..a24db145e5421 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -114,6 +114,7 @@ import org.opensearch.index.engine.EngineConfigFactory; import org.opensearch.index.engine.EngineFactory; import org.opensearch.index.engine.InternalEngineFactory; +import org.opensearch.index.engine.MergedSegmentWarmerFactory; import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.engine.NoOpEngine; import org.opensearch.index.engine.ReadOnlyEngine; @@ -1166,7 +1167,8 @@ public IndexShard createShard( final DiscoveryNode targetNode, final DiscoveryNode sourceNode, final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, - final DiscoveryNodes discoveryNodes + final DiscoveryNodes discoveryNodes, + final MergedSegmentWarmerFactory mergedSegmentWarmerFactory ) throws IOException { Objects.requireNonNull(retentionLeaseSyncer); ensureChangesAllowed(); @@ -1182,7 +1184,8 @@ public IndexShard createShard( repositoriesService, targetNode, sourceNode, - discoveryNodes + discoveryNodes, + mergedSegmentWarmerFactory ); indexShard.addShardFailureCallback(onShardFailure); indexShard.startRecovery(recoveryState, recoveryTargetService, recoveryListener, repositoriesService, mapping -> { diff --git a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java index d35ae46442fa3..f7ac53db126b5 100644 --- a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java @@ -66,6 +66,7 @@ import org.opensearch.index.IndexComponent; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; +import org.opensearch.index.engine.MergedSegmentWarmerFactory; import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; import org.opensearch.index.seqno.GlobalCheckpointSyncAction; import org.opensearch.index.seqno.ReplicationTracker; @@ -151,6 +152,8 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple private final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory; + private final MergedSegmentWarmerFactory mergedSegmentWarmerFactory; + @Inject public IndicesClusterStateService( final Settings settings, @@ -170,7 +173,8 @@ public IndicesClusterStateService( final GlobalCheckpointSyncAction globalCheckpointSyncAction, final RetentionLeaseSyncer retentionLeaseSyncer, final SegmentReplicationCheckpointPublisher checkpointPublisher, - final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory + final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, + final MergedSegmentWarmerFactory mergedSegmentWarmerFactory ) { this( settings, @@ -190,7 +194,8 @@ public IndicesClusterStateService( primaryReplicaSyncer, globalCheckpointSyncAction::updateGlobalCheckpointForShard, retentionLeaseSyncer, - remoteStoreStatsTrackerFactory + remoteStoreStatsTrackerFactory, + mergedSegmentWarmerFactory ); } @@ -213,7 +218,8 @@ public IndicesClusterStateService( final PrimaryReplicaSyncer primaryReplicaSyncer, final Consumer globalCheckpointSyncer, final RetentionLeaseSyncer retentionLeaseSyncer, - final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory + final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, + final MergedSegmentWarmerFactory mergedSegmentWarmerFactory ) { this.settings = settings; this.checkpointPublisher = checkpointPublisher; @@ -238,6 +244,7 @@ public IndicesClusterStateService( this.retentionLeaseSyncer = Objects.requireNonNull(retentionLeaseSyncer); this.sendRefreshMapping = settings.getAsBoolean("indices.cluster.send_refresh_mapping", true); this.remoteStoreStatsTrackerFactory = remoteStoreStatsTrackerFactory; + this.mergedSegmentWarmerFactory = mergedSegmentWarmerFactory; } @Override @@ -681,7 +688,8 @@ private void createShard(DiscoveryNodes nodes, RoutingTable routingTable, ShardR nodes.getLocalNode(), sourceNode, remoteStoreStatsTrackerFactory, - nodes + nodes, + mergedSegmentWarmerFactory ); } catch (Exception e) { failAndRemoveShard(shardRouting, true, "failed to create shard", e, state); @@ -1062,7 +1070,8 @@ T createShard( DiscoveryNode targetNode, @Nullable DiscoveryNode sourceNode, RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, - DiscoveryNodes discoveryNodes + DiscoveryNodes discoveryNodes, + MergedSegmentWarmerFactory mergedSegmentWarmerFactory ) throws IOException; /** diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 569216073cf2f..88513e0bdae12 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -154,6 +154,7 @@ import org.opensearch.index.analysis.AnalysisRegistry; import org.opensearch.index.compositeindex.CompositeIndexSettings; import org.opensearch.index.engine.EngineFactory; +import org.opensearch.index.engine.MergedSegmentWarmerFactory; import org.opensearch.index.recovery.RemoteStoreRestoreService; import org.opensearch.index.remote.RemoteIndexPathUploader; import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; @@ -452,6 +453,7 @@ public static class DiscoverySettings { private final AtomicReference runnableTaskListener; private FileCache fileCache; private final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory; + private final MergedSegmentWarmerFactory mergedSegmentWarmerFactory; public Node(Environment environment) { this(environment, Collections.emptyList(), true); @@ -1454,6 +1456,8 @@ protected Node( resourcesToClose.add(persistentTasksClusterService); final PersistentTasksService persistentTasksService = new PersistentTasksService(clusterService, threadPool, client); + mergedSegmentWarmerFactory = new MergedSegmentWarmerFactory(transportService, recoverySettings, clusterService); + modules.add(b -> { b.bind(Node.class).toInstance(this); b.bind(NodeService.class).toInstance(nodeService); @@ -1555,6 +1559,7 @@ protected Node( b.bind(SegmentReplicationStatsTracker.class).toInstance(segmentReplicationStatsTracker); b.bind(SearchRequestOperationsCompositeListenerFactory.class).toInstance(searchRequestOperationsCompositeListenerFactory); b.bind(SegmentReplicator.class).toInstance(segmentReplicator); + b.bind(MergedSegmentWarmerFactory.class).toInstance(mergedSegmentWarmerFactory); taskManagerClientOptional.ifPresent(value -> b.bind(TaskManagerClient.class).toInstance(value)); }); diff --git a/server/src/test/java/org/opensearch/index/engine/EngineConfigFactoryTests.java b/server/src/test/java/org/opensearch/index/engine/EngineConfigFactoryTests.java index f8ed68eb2e0a3..acb55c33a3265 100644 --- a/server/src/test/java/org/opensearch/index/engine/EngineConfigFactoryTests.java +++ b/server/src/test/java/org/opensearch/index/engine/EngineConfigFactoryTests.java @@ -71,6 +71,7 @@ public void testCreateEngineConfigFromFactory() { () -> Boolean.TRUE, new InternalTranslogFactory(), null, + null, null ); @@ -152,6 +153,7 @@ public void testCreateCodecServiceFromFactory() { () -> Boolean.TRUE, new InternalTranslogFactory(), null, + null, null ); assertNotNull(config.getCodec()); diff --git a/server/src/test/java/org/opensearch/indices/IndicesLifecycleListenerSingleNodeTests.java b/server/src/test/java/org/opensearch/indices/IndicesLifecycleListenerSingleNodeTests.java index 0428bdf0655b0..0d41230f7a20b 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesLifecycleListenerSingleNodeTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesLifecycleListenerSingleNodeTests.java @@ -44,6 +44,7 @@ import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; +import org.opensearch.index.engine.MergedSegmentWarmerFactory; import org.opensearch.index.seqno.RetentionLeaseSyncer; import org.opensearch.index.shard.IndexEventListener; import org.opensearch.index.shard.IndexShard; @@ -166,7 +167,8 @@ public void afterIndexRemoved(Index index, IndexSettings indexSettings, IndexRem null, localNode, null, - DiscoveryNodes.builder().add(localNode).build() + DiscoveryNodes.builder().add(localNode).build(), + new MergedSegmentWarmerFactory(null, null, null) ); IndexShardTestCase.updateRoutingEntry(shard, newRouting); assertEquals(5, counter.get()); diff --git a/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java b/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java index a60e9b381a8d3..0ba97f1d5db09 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java @@ -81,6 +81,7 @@ import org.opensearch.index.IndexService; import org.opensearch.index.cache.request.RequestCacheStats; import org.opensearch.index.cache.request.ShardRequestCache; +import org.opensearch.index.engine.MergedSegmentWarmerFactory; import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.index.seqno.RetentionLeaseSyncer; import org.opensearch.index.shard.IndexShard; @@ -1420,7 +1421,8 @@ public void testDeleteAndCreateIndexShardOnSameNodeAndVerifyStats() throws Excep null, localNode, null, - DiscoveryNodes.builder().add(localNode).build() + DiscoveryNodes.builder().add(localNode).build(), + new MergedSegmentWarmerFactory(null, null, null) ); // Verify that the new shard requestStats entries are empty. diff --git a/server/src/test/java/org/opensearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java b/server/src/test/java/org/opensearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java index 0490228a5cc16..9cd8bee123dca 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java +++ b/server/src/test/java/org/opensearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java @@ -47,6 +47,7 @@ import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; +import org.opensearch.index.engine.MergedSegmentWarmerFactory; import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; import org.opensearch.index.seqno.RetentionLeaseSyncer; import org.opensearch.index.shard.IndexEventListener; @@ -266,7 +267,8 @@ public MockIndexShard createShard( final DiscoveryNode targetNode, final DiscoveryNode sourceNode, final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, - final DiscoveryNodes discoveryNodes + final DiscoveryNodes discoveryNodes, + final MergedSegmentWarmerFactory mergedSegmentWarmerFactory ) throws IOException { failRandomly(); RecoveryState recoveryState = new RecoveryState(shardRouting, targetNode, sourceNode); diff --git a/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index 22bf337b05598..7cc59aaaa9347 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -584,6 +584,7 @@ private IndicesClusterStateService createIndicesClusterStateService( primaryReplicaSyncer, s -> {}, RetentionLeaseSyncer.EMPTY, + null, null ); } diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 580b507292ea8..d987871eaca7d 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -187,6 +187,7 @@ import org.opensearch.index.SegmentReplicationPressureService; import org.opensearch.index.SegmentReplicationStatsTracker; import org.opensearch.index.analysis.AnalysisRegistry; +import org.opensearch.index.engine.MergedSegmentWarmerFactory; import org.opensearch.index.remote.RemoteStorePressureService; import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; import org.opensearch.index.seqno.GlobalCheckpointSyncAction; @@ -2171,7 +2172,8 @@ public void onFailure(final Exception e) { ), RetentionLeaseSyncer.EMPTY, SegmentReplicationCheckpointPublisher.EMPTY, - mock(RemoteStoreStatsTrackerFactory.class) + mock(RemoteStoreStatsTrackerFactory.class), + new MergedSegmentWarmerFactory(null, null, null) ); final SystemIndices systemIndices = new SystemIndices(emptyMap()); diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index bdd4b40e398d5..75004f0b404d7 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -96,6 +96,7 @@ import org.opensearch.index.engine.EngineFactory; import org.opensearch.index.engine.EngineTestCase; import org.opensearch.index.engine.InternalEngineFactory; +import org.opensearch.index.engine.MergedSegmentWarmerFactory; import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.SourceToParse; @@ -722,7 +723,8 @@ protected IndexShard newShard( DefaultRemoteStoreSettings.INSTANCE, false, discoveryNodes, - mockReplicationStatsProvider + mockReplicationStatsProvider, + new MergedSegmentWarmerFactory(null, null, null) ); indexShard.addShardFailureCallback(DEFAULT_SHARD_FAILURE_HANDLER); if (remoteStoreStatsTrackerFactory != null) { From f1a8d0efaca97fef73a4f88b197e4dced411aaca Mon Sep 17 00:00:00 2001 From: guojialiang Date: Wed, 16 Apr 2025 01:58:00 +0800 Subject: [PATCH 222/550] fix BoolQueryBuilderTests.testToQuery (#17944) Signed-off-by: guojialiang --- .../org/opensearch/index/query/BoolQueryBuilderTests.java | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/server/src/test/java/org/opensearch/index/query/BoolQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/BoolQueryBuilderTests.java index 9223d82339252..94f7bb3898903 100644 --- a/server/src/test/java/org/opensearch/index/query/BoolQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/BoolQueryBuilderTests.java @@ -95,7 +95,8 @@ protected BoolQueryBuilder doCreateTestQueryBuilder() { @Override protected void doAssertLuceneQuery(BoolQueryBuilder queryBuilder, Query query, QueryShardContext context) throws IOException { if (!queryBuilder.hasClauses()) { - assertThat(query, instanceOf(MatchAllDocsQuery.class)); + assertThat(query, instanceOf(ApproximateScoreQuery.class)); + assertThat(((ApproximateScoreQuery) query).getOriginalQuery(), instanceOf(MatchAllDocsQuery.class)); } else { List clauses = new ArrayList<>(); clauses.addAll(getBooleanClauses(queryBuilder.must(), BooleanClause.Occur.MUST, context)); @@ -104,7 +105,8 @@ protected void doAssertLuceneQuery(BoolQueryBuilder queryBuilder, Query query, Q clauses.addAll(getBooleanClauses(queryBuilder.filter(), BooleanClause.Occur.FILTER, context)); if (clauses.isEmpty()) { - assertThat(query, instanceOf(MatchAllDocsQuery.class)); + assertThat(query, instanceOf(ApproximateScoreQuery.class)); + assertThat(((ApproximateScoreQuery) query).getOriginalQuery(), instanceOf(MatchAllDocsQuery.class)); } else if (query instanceof MatchNoDocsQuery == false) { assertThat(query, instanceOf(BooleanQuery.class)); BooleanQuery booleanQuery = (BooleanQuery) query; From aa0e724eb072fc7371daf577d7f2cc22775412ae Mon Sep 17 00:00:00 2001 From: Bo Zhang Date: Tue, 15 Apr 2025 13:51:39 -0700 Subject: [PATCH 223/550] Introduce mapping transformer (#17635) Signed-off-by: Bo Zhang --- CHANGELOG.md | 1 + .../create/TransportCreateIndexAction.java | 50 ++++--- .../put/TransportPutMappingAction.java | 15 +- .../TransportPutComponentTemplateAction.java | 49 +++++-- ...sportPutComposableIndexTemplateAction.java | 48 +++++-- .../put/TransportPutIndexTemplateAction.java | 51 ++++--- .../MetadataIndexTemplateService.java | 4 + .../opensearch/cluster/metadata/Template.java | 6 +- .../index/mapper/MappingTransformer.java | 37 +++++ .../mapper/MappingTransformerRegistry.java | 134 ++++++++++++++++++ .../main/java/org/opensearch/node/Node.java | 7 +- .../org/opensearch/plugins/MapperPlugin.java | 10 ++ .../TransportCreateIndexActionTests.java | 107 ++++++++++++++ .../put/TransportPutMappingActionTests.java | 110 ++++++++++++++ ...nsportPutComponentTemplateActionTests.java | 111 +++++++++++++++ ...PutComposableIndexTemplateActionTests.java | 110 ++++++++++++++ .../TransportPutIndexTemplateActionTests.java | 105 ++++++++++++++ .../MappingTransformerRegistryTests.java | 116 +++++++++++++++ .../indices/cluster/ClusterStateChanges.java | 20 ++- .../snapshots/SnapshotResiliencyTests.java | 12 +- 20 files changed, 1034 insertions(+), 69 deletions(-) create mode 100644 server/src/main/java/org/opensearch/index/mapper/MappingTransformer.java create mode 100644 server/src/main/java/org/opensearch/index/mapper/MappingTransformerRegistry.java create mode 100644 server/src/test/java/org/opensearch/action/admin/indices/create/TransportCreateIndexActionTests.java create mode 100644 server/src/test/java/org/opensearch/action/admin/indices/mapping/put/TransportPutMappingActionTests.java create mode 100644 server/src/test/java/org/opensearch/action/admin/indices/template/put/TransportPutComponentTemplateActionTests.java create mode 100644 server/src/test/java/org/opensearch/action/admin/indices/template/put/TransportPutComposableIndexTemplateActionTests.java create mode 100644 server/src/test/java/org/opensearch/action/admin/indices/template/put/TransportPutIndexTemplateActionTests.java create mode 100644 server/src/test/java/org/opensearch/index/mapper/MappingTransformerRegistryTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 9f00995092684..1922d74339770 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -42,6 +42,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Disable scoring of keyword term search by default, fallback logic with new use_similarity:true parameter ([#17889](https://github.com/opensearch-project/OpenSearch/pull/17889)) - Add versioning support in pull-based ingestion ([#17918](https://github.com/opensearch-project/OpenSearch/pull/17918)) - Introducing MergedSegmentWarmerFactory to support the extension of IndexWriter.IndexReaderWarmer ([#17881](https://github.com/opensearch-project/OpenSearch/pull/17881)) +- Introduce mapping transformer to allow transform mappings during index create/update or index template create/update ([#17635](https://github.com/opensearch-project/OpenSearch/pull/17635)) ### Changed - Migrate BC libs to their FIPS counterparts ([#14912](https://github.com/opensearch-project/OpenSearch/pull/14912)) diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/TransportCreateIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/create/TransportCreateIndexAction.java index 38dc1a418ec8b..c1f7c6ef87e8a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/TransportCreateIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/TransportCreateIndexAction.java @@ -43,6 +43,7 @@ import org.opensearch.common.inject.Inject; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.index.mapper.MappingTransformerRegistry; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -56,6 +57,7 @@ public class TransportCreateIndexAction extends TransportClusterManagerNodeAction { private final MetadataCreateIndexService createIndexService; + private final MappingTransformerRegistry mappingTransformerRegistry; @Inject public TransportCreateIndexAction( @@ -64,7 +66,8 @@ public TransportCreateIndexAction( ThreadPool threadPool, MetadataCreateIndexService createIndexService, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver + IndexNameExpressionResolver indexNameExpressionResolver, + MappingTransformerRegistry mappingTransformerRegistry ) { super( CreateIndexAction.NAME, @@ -76,6 +79,7 @@ public TransportCreateIndexAction( indexNameExpressionResolver ); this.createIndexService = createIndexService; + this.mappingTransformerRegistry = mappingTransformerRegistry; } @Override @@ -112,25 +116,31 @@ protected void clusterManagerOperation( } final String indexName = indexNameExpressionResolver.resolveDateMathExpression(request.index()); - final CreateIndexClusterStateUpdateRequest updateRequest = new CreateIndexClusterStateUpdateRequest( - cause, - indexName, - request.index() - ).ackTimeout(request.timeout()) - .clusterManagerNodeTimeout(request.clusterManagerNodeTimeout()) - .settings(request.settings()) - .mappings(request.mappings()) - .aliases(request.aliases()) - .context(request.context()) - .waitForActiveShards(request.waitForActiveShards()); - - createIndexService.createIndex( - updateRequest, - ActionListener.map( - listener, - response -> new CreateIndexResponse(response.isAcknowledged(), response.isShardsAcknowledged(), indexName) - ) - ); + + final String finalCause = cause; + final ActionListener mappingTransformListener = ActionListener.wrap(transformedMappings -> { + final CreateIndexClusterStateUpdateRequest updateRequest = new CreateIndexClusterStateUpdateRequest( + finalCause, + indexName, + request.index() + ).ackTimeout(request.timeout()) + .clusterManagerNodeTimeout(request.clusterManagerNodeTimeout()) + .settings(request.settings()) + .mappings(transformedMappings) + .aliases(request.aliases()) + .context(request.context()) + .waitForActiveShards(request.waitForActiveShards()); + + createIndexService.createIndex( + updateRequest, + ActionListener.map( + listener, + response -> new CreateIndexResponse(response.isAcknowledged(), response.isShardsAcknowledged(), indexName) + ) + ); + }, listener::onFailure); + + mappingTransformerRegistry.applyTransformers(request.mappings(), null, mappingTransformListener); } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportPutMappingAction.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportPutMappingAction.java index ed936822bfdcd..4c37592714185 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportPutMappingAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportPutMappingAction.java @@ -50,7 +50,9 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.Index; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexNotFoundException; +import org.opensearch.index.mapper.MappingTransformerRegistry; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -72,6 +74,7 @@ public class TransportPutMappingAction extends TransportClusterManagerNodeAction private final MetadataMappingService metadataMappingService; private final RequestValidators requestValidators; + private final MappingTransformerRegistry mappingTransformerRegistry; @Inject public TransportPutMappingAction( @@ -81,7 +84,8 @@ public TransportPutMappingAction( final MetadataMappingService metadataMappingService, final ActionFilters actionFilters, final IndexNameExpressionResolver indexNameExpressionResolver, - final RequestValidators requestValidators + final RequestValidators requestValidators, + final MappingTransformerRegistry mappingTransformerRegistry ) { super( PutMappingAction.NAME, @@ -94,6 +98,7 @@ public TransportPutMappingAction( ); this.metadataMappingService = metadataMappingService; this.requestValidators = Objects.requireNonNull(requestValidators); + this.mappingTransformerRegistry = mappingTransformerRegistry; } @Override @@ -132,7 +137,13 @@ protected void clusterManagerOperation( listener.onFailure(maybeValidationException.get()); return; } - performMappingUpdate(concreteIndices, request, listener, metadataMappingService); + + final ActionListener mappingTransformListener = ActionListener.wrap(transformedMapping -> { + request.source(transformedMapping, MediaTypeRegistry.JSON); + performMappingUpdate(concreteIndices, request, listener, metadataMappingService); + }, listener::onFailure); + + mappingTransformerRegistry.applyTransformers(request.source(), null, mappingTransformListener); } catch (IndexNotFoundException ex) { logger.debug(() -> new ParameterizedMessage("failed to put mappings on indices [{}]", Arrays.asList(request.indices())), ex); throw ex; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComponentTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComponentTemplateAction.java index 66e2fe5c535db..a8002ec01d8cb 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComponentTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComponentTemplateAction.java @@ -44,16 +44,20 @@ import org.opensearch.cluster.metadata.MetadataIndexTemplateService; import org.opensearch.cluster.metadata.Template; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.inject.Inject; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Settings; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.index.mapper.MappingTransformerRegistry; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; import java.io.IOException; +import reactor.util.annotation.NonNull; + /** * An action for putting a single component template into the cluster state * @@ -65,6 +69,7 @@ public class TransportPutComponentTemplateAction extends TransportClusterManager private final MetadataIndexTemplateService indexTemplateService; private final IndexScopedSettings indexScopedSettings; + private final MappingTransformerRegistry mappingTransformerRegistry; @Inject public TransportPutComponentTemplateAction( @@ -74,7 +79,8 @@ public TransportPutComponentTemplateAction( MetadataIndexTemplateService indexTemplateService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - IndexScopedSettings indexScopedSettings + IndexScopedSettings indexScopedSettings, + MappingTransformerRegistry mappingTransformerRegistry ) { super( PutComponentTemplateAction.NAME, @@ -87,6 +93,7 @@ public TransportPutComponentTemplateAction( ); this.indexTemplateService = indexTemplateService; this.indexScopedSettings = indexScopedSettings; + this.mappingTransformerRegistry = mappingTransformerRegistry; } @Override @@ -121,13 +128,37 @@ protected void clusterManagerOperation( template = new Template(settings, template.mappings(), template.aliases()); componentTemplate = new ComponentTemplate(template, componentTemplate.version(), componentTemplate.metadata()); } - indexTemplateService.putComponentTemplate( - request.cause(), - request.create(), - request.name(), - request.clusterManagerNodeTimeout(), - componentTemplate, - listener - ); + + final ActionListener mappingTransformListener = getMappingTransformListener(request, listener, componentTemplate); + + transformMapping(template, mappingTransformListener); + } + + private ActionListener getMappingTransformListener( + @NonNull final PutComponentTemplateAction.Request request, + @NonNull final ActionListener listener, + @NonNull final ComponentTemplate componentTemplate + ) { + return ActionListener.wrap(transformedMappings -> { + if (transformedMappings != null && componentTemplate.template() != null) { + componentTemplate.template().setMappings(new CompressedXContent(transformedMappings)); + } + indexTemplateService.putComponentTemplate( + request.cause(), + request.create(), + request.name(), + request.clusterManagerNodeTimeout(), + componentTemplate, + listener + ); + }, listener::onFailure); + } + + private void transformMapping(final Template template, @NonNull final ActionListener mappingTransformListener) { + if (template == null || template.mappings() == null) { + mappingTransformListener.onResponse(null); + } else { + mappingTransformerRegistry.applyTransformers(template.mappings().string(), null, mappingTransformListener); + } } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComposableIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComposableIndexTemplateAction.java index a5c3590a0a6d7..5554a540a57f2 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComposableIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComposableIndexTemplateAction.java @@ -41,15 +41,20 @@ import org.opensearch.cluster.metadata.ComposableIndexTemplate; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.metadata.MetadataIndexTemplateService; +import org.opensearch.cluster.metadata.Template; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.inject.Inject; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.index.mapper.MappingTransformerRegistry; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; import java.io.IOException; +import reactor.util.annotation.NonNull; + /** * An action for putting a composable index template into the cluster state * @@ -60,6 +65,7 @@ public class TransportPutComposableIndexTemplateAction extends TransportClusterM AcknowledgedResponse> { private final MetadataIndexTemplateService indexTemplateService; + private final MappingTransformerRegistry mappingTransformerRegistry; @Inject public TransportPutComposableIndexTemplateAction( @@ -68,7 +74,8 @@ public TransportPutComposableIndexTemplateAction( ThreadPool threadPool, MetadataIndexTemplateService indexTemplateService, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver + IndexNameExpressionResolver indexNameExpressionResolver, + MappingTransformerRegistry mappingTransformerRegistry ) { super( PutComposableIndexTemplateAction.NAME, @@ -80,6 +87,7 @@ public TransportPutComposableIndexTemplateAction( indexNameExpressionResolver ); this.indexTemplateService = indexTemplateService; + this.mappingTransformerRegistry = mappingTransformerRegistry; } @Override @@ -103,15 +111,35 @@ protected void clusterManagerOperation( final PutComposableIndexTemplateAction.Request request, final ClusterState state, final ActionListener listener + ) throws IOException { + final ComposableIndexTemplate indexTemplate = request.indexTemplate(); + + final ActionListener mappingTransformListener = ActionListener.wrap(transformedMappings -> { + if (transformedMappings != null && indexTemplate.template() != null) { + indexTemplate.template().setMappings(new CompressedXContent(transformedMappings)); + } + indexTemplateService.putIndexTemplateV2( + request.cause(), + request.create(), + request.name(), + request.clusterManagerNodeTimeout(), + indexTemplate, + listener + ); + }, listener::onFailure); + + transformMapping(indexTemplate, mappingTransformListener); + } + + private void transformMapping( + @NonNull final ComposableIndexTemplate indexTemplate, + @NonNull final ActionListener mappingTransformListener ) { - ComposableIndexTemplate indexTemplate = request.indexTemplate(); - indexTemplateService.putIndexTemplateV2( - request.cause(), - request.create(), - request.name(), - request.clusterManagerNodeTimeout(), - indexTemplate, - listener - ); + final Template template = indexTemplate.template(); + if (template == null || template.mappings() == null) { + mappingTransformListener.onResponse(null); + } else { + mappingTransformerRegistry.applyTransformers(template.mappings().string(), null, mappingTransformListener); + } } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java index b9f27c00d0d98..2467fc0346313 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java @@ -49,6 +49,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.index.mapper.MappingTransformerRegistry; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -65,6 +66,7 @@ public class TransportPutIndexTemplateAction extends TransportClusterManagerNode private final MetadataIndexTemplateService indexTemplateService; private final IndexScopedSettings indexScopedSettings; + private final MappingTransformerRegistry mappingTransformerRegistry; @Inject public TransportPutIndexTemplateAction( @@ -74,7 +76,8 @@ public TransportPutIndexTemplateAction( MetadataIndexTemplateService indexTemplateService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - IndexScopedSettings indexScopedSettings + IndexScopedSettings indexScopedSettings, + MappingTransformerRegistry mappingTransformerRegistry ) { super( PutIndexTemplateAction.NAME, @@ -87,6 +90,7 @@ public TransportPutIndexTemplateAction( ); this.indexTemplateService = indexTemplateService; this.indexScopedSettings = indexScopedSettings; + this.mappingTransformerRegistry = mappingTransformerRegistry; } @Override @@ -118,28 +122,33 @@ protected void clusterManagerOperation( final Settings.Builder templateSettingsBuilder = Settings.builder(); templateSettingsBuilder.put(request.settings()).normalizePrefix(IndexMetadata.INDEX_SETTING_PREFIX); indexScopedSettings.validate(templateSettingsBuilder.build(), true); // templates must be consistent with regards to dependencies - indexTemplateService.putTemplate( - new MetadataIndexTemplateService.PutRequest(cause, request.name()).patterns(request.patterns()) - .order(request.order()) - .settings(templateSettingsBuilder.build()) - .mappings(request.mappings()) - .aliases(request.aliases()) - .create(request.create()) - .clusterManagerTimeout(request.clusterManagerNodeTimeout()) - .version(request.version()), + final String finalCause = cause; + final ActionListener mappingTransformListener = ActionListener.wrap(transformedMappings -> { + indexTemplateService.putTemplate( + new MetadataIndexTemplateService.PutRequest(finalCause, request.name()).patterns(request.patterns()) + .order(request.order()) + .settings(templateSettingsBuilder.build()) + .mappings(transformedMappings) + .aliases(request.aliases()) + .create(request.create()) + .clusterManagerTimeout(request.clusterManagerNodeTimeout()) + .version(request.version()), - new MetadataIndexTemplateService.PutListener() { - @Override - public void onResponse(MetadataIndexTemplateService.PutResponse response) { - listener.onResponse(new AcknowledgedResponse(response.acknowledged())); - } + new MetadataIndexTemplateService.PutListener() { + @Override + public void onResponse(MetadataIndexTemplateService.PutResponse response) { + listener.onResponse(new AcknowledgedResponse(response.acknowledged())); + } - @Override - public void onFailure(Exception e) { - logger.debug(() -> new ParameterizedMessage("failed to put template [{}]", request.name()), e); - listener.onFailure(e); + @Override + public void onFailure(Exception e) { + logger.debug(() -> new ParameterizedMessage("failed to put template [{}]", request.name()), e); + listener.onFailure(e); + } } - } - ); + ); + }, listener::onFailure); + + mappingTransformerRegistry.applyTransformers(request.mappings(), null, mappingTransformListener); } } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java index b032ade720612..de106d14c6fd9 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java @@ -1754,6 +1754,10 @@ public PutRequest mappings(String mappings) { return this; } + public String getMappings() { + return mappings; + } + public PutRequest aliases(Set aliases) { this.aliases.addAll(aliases); return this; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Template.java b/server/src/main/java/org/opensearch/cluster/metadata/Template.java index bd110c6af8975..be29a73e9c0ad 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Template.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Template.java @@ -94,7 +94,7 @@ public class Template extends AbstractDiffable