Skip to content
Original file line number Diff line number Diff line change
Expand Up @@ -405,7 +405,7 @@ static ClusterState createClusterState(
: GeoIpDownloader.DATABASES_INDEX;
Index index = new Index(indexName, UUID.randomUUID().toString());
IndexMetadata.Builder idxMeta = IndexMetadata.builder(index.getName())
.settings(indexSettings(IndexVersion.current(), 1, 0).put("index.uuid", index.getUUID()));
.settings(indexSettings(IndexVersion.current(), index.getUUID(), 1, 0));
if (aliasGeoipDatabase) {
idxMeta.putAlias(AliasMetadata.builder(GeoIpDownloader.DATABASES_INDEX));
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
import org.apache.lucene.store.Directory;
import org.apache.lucene.tests.index.RandomIndexWriter;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
import org.elasticsearch.core.Tuple;
import org.elasticsearch.index.Index;
Expand Down Expand Up @@ -86,7 +87,7 @@ public void testParentChild() throws IOException {

DirectoryReader indexReader = ElasticsearchDirectoryReader.wrap(
DirectoryReader.open(directory),
new ShardId(new Index("foo", "_na_"), 1)
new ShardId(new Index("foo", IndexMetadata.INDEX_UUID_NA_VALUE), 1)
);
// verify with all documents
testCase(new MatchAllDocsQuery(), indexReader, parent -> {
Expand Down Expand Up @@ -151,7 +152,7 @@ public void testParentChildTerms() throws IOException {

DirectoryReader indexReader = ElasticsearchDirectoryReader.wrap(
DirectoryReader.open(directory),
new ShardId(new Index("foo", "_na_"), 1)
new ShardId(new Index("foo", IndexMetadata.INDEX_UUID_NA_VALUE), 1)
);
// verify a terms-aggregation inside the parent-aggregation
testCaseTerms(new MatchAllDocsQuery(), indexReader, parent -> {
Expand Down Expand Up @@ -192,7 +193,7 @@ public void testTermsParentChildTerms() throws IOException {

DirectoryReader indexReader = ElasticsearchDirectoryReader.wrap(
DirectoryReader.open(directory),
new ShardId(new Index("foo", "_na_"), 1)
new ShardId(new Index("foo", IndexMetadata.INDEX_UUID_NA_VALUE), 1)
);
// verify a terms-aggregation inside the parent-aggregation which itself is inside a
// terms-aggregation on the child-documents
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
import org.apache.lucene.store.Directory;
import org.apache.lucene.tests.index.RandomIndexWriter;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
import org.elasticsearch.core.Tuple;
import org.elasticsearch.index.Index;
Expand Down Expand Up @@ -89,7 +90,7 @@ public void testParentChild() throws IOException {

DirectoryReader indexReader = ElasticsearchDirectoryReader.wrap(
DirectoryReader.open(directory),
new ShardId(new Index("foo", "_na_"), 1)
new ShardId(new Index("foo", IndexMetadata.INDEX_UUID_NA_VALUE), 1)
);
testCase(new MatchAllDocsQuery(), indexReader, child -> {
int expectedTotalChildren = 0;
Expand Down Expand Up @@ -128,7 +129,7 @@ public void testParentChildAsSubAgg() throws IOException {
try (
DirectoryReader indexReader = ElasticsearchDirectoryReader.wrap(
DirectoryReader.open(directory),
new ShardId(new Index("foo", "_na_"), 1)
new ShardId(new Index("foo", IndexMetadata.INDEX_UUID_NA_VALUE), 1)
)
) {
AggregationBuilder request = new TermsAggregationBuilder("t").field("kwd")
Expand Down Expand Up @@ -180,7 +181,7 @@ public void testBestDeferringCollectorWithSubAggOfChildrenAggNeedingScores() thr
try (
var indexReader = ElasticsearchDirectoryReader.wrap(
DirectoryReader.open(directory),
new ShardId(new Index("foo", "_na_"), 1)
new ShardId(new Index("foo", IndexMetadata.INDEX_UUID_NA_VALUE), 1)
)
) {
// invalid usage,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,6 @@ private static TestAnalysis createTestAnalysis() throws IOException {
.build();
Settings nodeSettings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), home).build();

return createTestAnalysis(new Index("test", "_na_"), nodeSettings, settings, new AnalysisICUPlugin());
return createTestAnalysis(new Index("test", IndexMetadata.INDEX_UUID_NA_VALUE), nodeSettings, settings, new AnalysisICUPlugin());
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@

package org.elasticsearch.plugin.analysis.icu;

import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.analysis.CharFilterFactory;
Expand All @@ -22,7 +23,11 @@

public class SimpleIcuAnalysisTests extends ESTestCase {
public void testDefaultsIcuAnalysis() throws IOException {
TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), Settings.EMPTY, new AnalysisICUPlugin());
TestAnalysis analysis = createTestAnalysis(
new Index("test", IndexMetadata.INDEX_UUID_NA_VALUE),
Settings.EMPTY,
new AnalysisICUPlugin()
);

TokenizerFactory tokenizerFactory = analysis.tokenizer.get("icu_tokenizer");
assertThat(tokenizerFactory, instanceOf(IcuTokenizerFactory.class));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.analysis.TokenFilterFactory;
Expand All @@ -37,7 +38,7 @@ public void testDefaultUsage() throws Exception {
.put("index.analysis.filter.myCollator.type", "icu_collation")
.put("index.analysis.filter.myCollator.strength", "primary")
.build();
TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TestAnalysis analysis = createTestAnalysis(new Index("test", IndexMetadata.INDEX_UUID_NA_VALUE), settings, new AnalysisICUPlugin());

TokenFilterFactory filterFactory = analysis.tokenFilter.get("myCollator");
assertCollatesToSame(filterFactory, "FOO", "foo");
Expand All @@ -55,7 +56,7 @@ public void testBasicUsage() throws Exception {
.put("index.analysis.filter.myCollator.language", "tr")
.put("index.analysis.filter.myCollator.strength", "primary")
.build();
TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TestAnalysis analysis = createTestAnalysis(new Index("test", IndexMetadata.INDEX_UUID_NA_VALUE), settings, new AnalysisICUPlugin());

TokenFilterFactory filterFactory = analysis.tokenFilter.get("myCollator");
assertCollatesToSame(filterFactory, "I WİLL USE TURKİSH CASING", "ı will use turkish casıng");
Expand All @@ -71,7 +72,7 @@ public void testNormalization() throws IOException {
.put("index.analysis.filter.myCollator.strength", "primary")
.put("index.analysis.filter.myCollator.decomposition", "canonical")
.build();
TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TestAnalysis analysis = createTestAnalysis(new Index("test", IndexMetadata.INDEX_UUID_NA_VALUE), settings, new AnalysisICUPlugin());

TokenFilterFactory filterFactory = analysis.tokenFilter.get("myCollator");
assertCollatesToSame(filterFactory, "I W\u0049\u0307LL USE TURKİSH CASING", "ı will use turkish casıng");
Expand All @@ -87,7 +88,7 @@ public void testSecondaryStrength() throws IOException {
.put("index.analysis.filter.myCollator.strength", "secondary")
.put("index.analysis.filter.myCollator.decomposition", "no")
.build();
TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TestAnalysis analysis = createTestAnalysis(new Index("test", IndexMetadata.INDEX_UUID_NA_VALUE), settings, new AnalysisICUPlugin());

TokenFilterFactory filterFactory = analysis.tokenFilter.get("myCollator");
assertCollatesToSame(filterFactory, "TESTING", "testing");
Expand All @@ -104,7 +105,7 @@ public void testIgnorePunctuation() throws IOException {
.put("index.analysis.filter.myCollator.strength", "primary")
.put("index.analysis.filter.myCollator.alternate", "shifted")
.build();
TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TestAnalysis analysis = createTestAnalysis(new Index("test", IndexMetadata.INDEX_UUID_NA_VALUE), settings, new AnalysisICUPlugin());

TokenFilterFactory filterFactory = analysis.tokenFilter.get("myCollator");
assertCollatesToSame(filterFactory, "foo-bar", "foo bar");
Expand All @@ -122,7 +123,7 @@ public void testIgnoreWhitespace() throws IOException {
.put("index.analysis.filter.myCollator.alternate", "shifted")
.put("index.analysis.filter.myCollator.variableTop", " ")
.build();
TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TestAnalysis analysis = createTestAnalysis(new Index("test", IndexMetadata.INDEX_UUID_NA_VALUE), settings, new AnalysisICUPlugin());

TokenFilterFactory filterFactory = analysis.tokenFilter.get("myCollator");
assertCollatesToSame(filterFactory, "foo bar", "foobar");
Expand All @@ -140,7 +141,7 @@ public void testNumerics() throws IOException {
.put("index.analysis.filter.myCollator.language", "en")
.put("index.analysis.filter.myCollator.numeric", "true")
.build();
TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TestAnalysis analysis = createTestAnalysis(new Index("test", IndexMetadata.INDEX_UUID_NA_VALUE), settings, new AnalysisICUPlugin());

TokenFilterFactory filterFactory = analysis.tokenFilter.get("myCollator");
assertCollation(filterFactory, "foobar-9", "foobar-10", -1);
Expand All @@ -157,7 +158,7 @@ public void testIgnoreAccentsButNotCase() throws IOException {
.put("index.analysis.filter.myCollator.strength", "primary")
.put("index.analysis.filter.myCollator.caseLevel", "true")
.build();
TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TestAnalysis analysis = createTestAnalysis(new Index("test", IndexMetadata.INDEX_UUID_NA_VALUE), settings, new AnalysisICUPlugin());

TokenFilterFactory filterFactory = analysis.tokenFilter.get("myCollator");
assertCollatesToSame(filterFactory, "résumé", "resume");
Expand All @@ -177,7 +178,7 @@ public void testUpperCaseFirst() throws IOException {
.put("index.analysis.filter.myCollator.strength", "tertiary")
.put("index.analysis.filter.myCollator.caseFirst", "upper")
.build();
TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TestAnalysis analysis = createTestAnalysis(new Index("test", IndexMetadata.INDEX_UUID_NA_VALUE), settings, new AnalysisICUPlugin());

TokenFilterFactory filterFactory = analysis.tokenFilter.get("myCollator");
assertCollation(filterFactory, "Resume", "resume", -1);
Expand All @@ -202,7 +203,7 @@ public void testCustomRules() throws Exception {
.put("index.analysis.filter.myCollator.rules", tailoredRules)
.put("index.analysis.filter.myCollator.strength", "primary")
.build();
TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TestAnalysis analysis = createTestAnalysis(new Index("test", IndexMetadata.INDEX_UUID_NA_VALUE), settings, new AnalysisICUPlugin());

TokenFilterFactory filterFactory = analysis.tokenFilter.get("myCollator");
assertCollatesToSame(filterFactory, "Töne", "Toene");
Expand All @@ -218,7 +219,7 @@ public void testBasicCustomRules() throws Exception {
.put("index.analysis.filter.myCollator.type", "icu_collation")
.put("index.analysis.filter.myCollator.rules", "&a < g")
.build();
TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TestAnalysis analysis = createTestAnalysis(new Index("test", IndexMetadata.INDEX_UUID_NA_VALUE), settings, new AnalysisICUPlugin());

TokenFilterFactory filterFactory = analysis.tokenFilter.get("myCollator");
assertCollation(filterFactory, "green", "bird", -1);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
import com.ibm.icu.text.Normalizer2;

import org.apache.lucene.analysis.CharFilter;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.analysis.CharFilterFactory;
Expand All @@ -25,7 +26,7 @@
public class SimpleIcuNormalizerCharFilterTests extends ESTestCase {
public void testDefaultSetting() throws Exception {
Settings settings = Settings.builder().put("index.analysis.char_filter.myNormalizerChar.type", "icu_normalizer").build();
TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TestAnalysis analysis = createTestAnalysis(new Index("test", IndexMetadata.INDEX_UUID_NA_VALUE), settings, new AnalysisICUPlugin());
CharFilterFactory charFilterFactory = analysis.charFilter.get("myNormalizerChar");

String input = "ʰ㌰゙5℃№㈱㌘,バッファーの正規化のテスト.㋐㋑㋒㋓㋔カキクケコザジズゼゾg̈각/각நிเกषिchkʷक्षि";
Expand All @@ -49,7 +50,7 @@ public void testNameAndModeSetting() throws Exception {
.put("index.analysis.char_filter.myNormalizerChar.name", "nfkc")
.put("index.analysis.char_filter.myNormalizerChar.mode", "decompose")
.build();
TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TestAnalysis analysis = createTestAnalysis(new Index("test", IndexMetadata.INDEX_UUID_NA_VALUE), settings, new AnalysisICUPlugin());
CharFilterFactory charFilterFactory = analysis.charFilter.get("myNormalizerChar");

String input = "ʰ㌰゙5℃№㈱㌘,バッファーの正規化のテスト.㋐㋑㋒㋓㋔カキクケコザジズゼゾg̈각/각நிเกषिchkʷक्षि";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -290,7 +290,12 @@ private static TestAnalysis createTestAnalysis() throws IOException {
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.build();
Settings nodeSettings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), home).build();
return createTestAnalysis(new Index("test", "_na_"), nodeSettings, settings, new AnalysisKuromojiPlugin());
return createTestAnalysis(
new Index("test", IndexMetadata.INDEX_UUID_NA_VALUE),
nodeSettings,
settings,
new AnalysisKuromojiPlugin()
);
}

public static void assertSimpleTSOutput(TokenStream stream, String[] expected) throws IOException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ public void setup() throws IOException {
.loadFromStream(yaml, getClass().getResourceAsStream(yaml), false)
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.build();
this.analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisPhoneticPlugin());
this.analysis = createTestAnalysis(new Index("test", IndexMetadata.INDEX_UUID_NA_VALUE), settings, new AnalysisPhoneticPlugin());
}

public void testPhoneticTokenFilterFactory() throws IOException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@

package org.elasticsearch.plugin.analysis.smartcn;

import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.analysis.TokenizerFactory;
Expand All @@ -21,7 +22,11 @@

public class SimpleSmartChineseAnalysisTests extends ESTestCase {
public void testDefaultsIcuAnalysis() throws IOException {
final TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), Settings.EMPTY, new AnalysisSmartChinesePlugin());
final TestAnalysis analysis = createTestAnalysis(
new Index("test", IndexMetadata.INDEX_UUID_NA_VALUE),
Settings.EMPTY,
new AnalysisSmartChinesePlugin()
);
TokenizerFactory tokenizerFactory = analysis.tokenizer.get("smartcn_tokenizer");
MatcherAssert.assertThat(tokenizerFactory, instanceOf(SmartChineseTokenizerTokenizerFactory.class));
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,9 +42,10 @@ protected Map<String, Class<?>> getTokenFilters() {

public void testThreadSafety() throws IOException {
// TODO: is this the right boilerplate? I forked this out of TransportAnalyzeAction.java:
Settings settings = indexSettings(IndexVersion.current(), 1, 0).put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID())
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
Settings settings = indexSettings(IndexVersion.current(), UUIDs.randomBase64UUID(), 1, 0).put(
Environment.PATH_HOME_SETTING.getKey(),
createTempDir().toString()
).build();
Environment environment = TestEnvironment.newEnvironment(settings);
IndexMetadata metadata = IndexMetadata.builder(IndexMetadata.INDEX_UUID_NA_VALUE).settings(settings).build();
IndexSettings indexSettings = new IndexSettings(metadata, Settings.EMPTY);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.pl.PolishAnalyzer;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.analysis.pl.PolishStemTokenFilterFactory;
Expand All @@ -24,7 +25,11 @@

public class PolishAnalysisTests extends ESTestCase {
public void testDefaultsPolishAnalysis() throws IOException {
final TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), Settings.EMPTY, new AnalysisStempelPlugin());
final TestAnalysis analysis = createTestAnalysis(
new Index("test", IndexMetadata.INDEX_UUID_NA_VALUE),
Settings.EMPTY,
new AnalysisStempelPlugin()
);
TokenFilterFactory tokenizerFactory = analysis.tokenFilter.get("polish_stem");
MatcherAssert.assertThat(tokenizerFactory, instanceOf(PolishStemTokenFilterFactory.class));

Expand Down
Loading