Skip to content

Commit

Permalink
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
PHOENIX-7474 Migrate IndexTool tables and make sure they are created
Browse files Browse the repository at this point in the history
richardantal committed Dec 3, 2024
1 parent a75c4dc commit 7c2b511
Showing 11 changed files with 691 additions and 82 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,144 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.phoenix.mapreduce.index;



import java.io.IOException;
import java.sql.Connection;

import java.sql.SQLException;
import java.util.UUID;


import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.client.*;
import org.apache.phoenix.coprocessorclient.MetaDataProtocol;
import org.apache.phoenix.query.QueryConstants;

import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.hbase.TableName;

import org.apache.phoenix.jdbc.PhoenixConnection;

import org.apache.phoenix.query.ConnectionQueryServices;

import org.apache.phoenix.schema.PTableType;
import org.apache.phoenix.util.SchemaUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import static org.apache.phoenix.query.QueryConstants.SYSTEM_SCHEMA_NAME;

/**
* Utility class to create index tables and/or migrate them.
*
*/
public class IndexToolTableUtil extends Configured {
private static final Logger LOGGER = LoggerFactory.getLogger(IndexToolTableUtil.class);

public final static String OUTPUT_TABLE_NAME = "PHOENIX_INDEX_TOOL";
public static String SYSTEM_OUTPUT_TABLE_NAME = SchemaUtil.getTableName(SYSTEM_SCHEMA_NAME,
OUTPUT_TABLE_NAME);

public final static String RESULT_TABLE_NAME = "PHOENIX_INDEX_TOOL_RESULT";
public static String SYSTEM_RESULT_TABLE_NAME = SchemaUtil.getTableName(SYSTEM_SCHEMA_NAME,
RESULT_TABLE_NAME);

public static void setIndexToolTableName(Connection connection) throws Exception {
ConnectionQueryServices queryServices = connection.unwrap(PhoenixConnection.class).getQueryServices();
if (SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, queryServices.getConfiguration())) {
SYSTEM_OUTPUT_TABLE_NAME = SYSTEM_OUTPUT_TABLE_NAME.replace(
QueryConstants.NAME_SEPARATOR,
QueryConstants.NAMESPACE_SEPARATOR);
SYSTEM_RESULT_TABLE_NAME = SYSTEM_RESULT_TABLE_NAME.replace(
QueryConstants.NAME_SEPARATOR,
QueryConstants.NAMESPACE_SEPARATOR);
}
}

public static Table createResultTable(Connection connection) throws IOException, SQLException {
ConnectionQueryServices queryServices = connection.unwrap(PhoenixConnection.class).getQueryServices();
try (Admin admin = queryServices.getAdmin()) {
TableName resultTableName = TableName.valueOf(SYSTEM_RESULT_TABLE_NAME);
return createTable(admin, resultTableName);
}
}

public static Table createOutputTable(Connection connection) throws IOException, SQLException {
ConnectionQueryServices queryServices = connection.unwrap(PhoenixConnection.class).getQueryServices();
try (Admin admin = queryServices.getAdmin()) {
TableName outputTableName = TableName.valueOf(SYSTEM_OUTPUT_TABLE_NAME);
return createTable(admin, outputTableName);
}
}

@VisibleForTesting
private static Table createTable(Admin admin, TableName tableName) throws IOException {
if (!admin.tableExists(tableName)) {
ColumnFamilyDescriptor columnDescriptor =
ColumnFamilyDescriptorBuilder
.newBuilder(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES)
.setTimeToLive(MetaDataProtocol.DEFAULT_LOG_TTL)
.build();
TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName)
.setColumnFamily(columnDescriptor).build();
try {
admin.createTable(tableDescriptor);
} catch (TableExistsException e) {
LOGGER.warn("Table exists, ignoring", e);
}
}
return admin.getConnection().getTable(tableName);
}


public static void createNewIndexToolTables(Connection connection) throws Exception {
setIndexToolTableName(connection);

migrateTable(connection, OUTPUT_TABLE_NAME);
migrateTable(connection, RESULT_TABLE_NAME);
}

private static void migrateTable(Connection connection, String tableName) throws Exception {
if (!tableName.equals(OUTPUT_TABLE_NAME) && !tableName.equals(RESULT_TABLE_NAME)) {
LOGGER.info("Only migrating PHOENIX_INDEX_TOOL tables!");
} else {
ConnectionQueryServices queryServices = connection.unwrap(PhoenixConnection.class).getQueryServices();
try (Admin admin = queryServices.getAdmin()) {
TableName oldTableName = TableName.valueOf(tableName);
String newTableNameString = tableName.equals(OUTPUT_TABLE_NAME) ?
SYSTEM_OUTPUT_TABLE_NAME : SYSTEM_RESULT_TABLE_NAME;

TableName newTableName = TableName.valueOf(newTableNameString);

if (admin.tableExists(oldTableName)) {
String snapshotName = tableName + "_" + UUID.randomUUID();
admin.disableTable(oldTableName);
admin.snapshot(snapshotName, oldTableName);
admin.cloneSnapshot(snapshotName, newTableName);
admin.deleteSnapshot(snapshotName);
admin.deleteTable(oldTableName);
} else {
createTable(admin, newTableName);
}
}
}
}
}
Original file line number Diff line number Diff line change
@@ -323,6 +323,8 @@
import org.apache.phoenix.thirdparty.com.google.common.collect.Maps;
import org.apache.phoenix.thirdparty.com.google.common.collect.Sets;

import org.apache.phoenix.mapreduce.index.IndexToolTableUtil;

public class ConnectionQueryServicesImpl extends DelegateQueryServices implements ConnectionQueryServices {
private static final Logger LOGGER =
LoggerFactory.getLogger(ConnectionQueryServicesImpl.class);
@@ -3844,6 +3846,12 @@ Collections.<Class<? extends Exception>> singletonList(
LOGGER.error("Upgrade is required. Must run 'EXECUTE UPGRADE' "
+ "before any other command");
}
try {
// check if we have old PHOENIX_INDEX_TOOL tables
// move data to the new tables under System, or simply create the new tables
IndexToolTableUtil.createNewIndexToolTables(metaConnection);

} catch (Exception ignore) {}
}
success = true;
} catch (RetriableUpgradeException e) {
Original file line number Diff line number Diff line change
@@ -19,20 +19,12 @@

import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting;

import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.coprocessorclient.MetaDataProtocol;
import org.apache.phoenix.hbase.index.table.HTableFactory;
import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
import org.apache.phoenix.jdbc.PhoenixConnection;
@@ -60,7 +52,7 @@ public class IndexVerificationOutputRepository implements AutoCloseable {
IndexTool.IndexDisableLoggingType.NONE;
private boolean shouldLogBeyondMaxLookback = true;

public final static String OUTPUT_TABLE_NAME = "PHOENIX_INDEX_TOOL";
public final static String OUTPUT_TABLE_NAME = IndexToolTableUtil.SYSTEM_OUTPUT_TABLE_NAME;
public final static byte[] OUTPUT_TABLE_NAME_BYTES = Bytes.toBytes(OUTPUT_TABLE_NAME);
public final static byte[] OUTPUT_TABLE_COLUMN_FAMILY = QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES;

@@ -177,26 +169,7 @@ private static byte[] generatePartialOutputTableRowKey(long ts, byte[] indexTabl
}

public void createOutputTable(Connection connection) throws IOException, SQLException {
ConnectionQueryServices queryServices = connection.unwrap(PhoenixConnection.class).getQueryServices();
try (Admin admin = queryServices.getAdmin()) {
TableName outputTableName = TableName.valueOf(OUTPUT_TABLE_NAME);
if (!admin.tableExists(outputTableName)) {
ColumnFamilyDescriptor columnDescriptor =
ColumnFamilyDescriptorBuilder
.newBuilder(OUTPUT_TABLE_COLUMN_FAMILY)
.setTimeToLive(MetaDataProtocol.DEFAULT_LOG_TTL)
.build();
TableDescriptor tableDescriptor = TableDescriptorBuilder
.newBuilder(TableName.valueOf(OUTPUT_TABLE_NAME))
.setColumnFamily(columnDescriptor).build();
try {
admin.createTable(tableDescriptor);
} catch (TableExistsException e) {
LOGGER.warn("Table exists, ignoring", e);
}
outputTable = admin.getConnection().getTable(outputTableName);
}
}
outputTable = IndexToolTableUtil.createOutputTable(connection);
}

@VisibleForTesting
Original file line number Diff line number Diff line change
@@ -19,27 +19,18 @@

import org.apache.hadoop.hbase.Cell;

import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.coprocessor.IndexToolVerificationResult;
import org.apache.phoenix.coprocessorclient.MetaDataProtocol;
import org.apache.phoenix.hbase.index.table.HTableFactory;
import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.query.ConnectionQueryServices;
import org.apache.phoenix.query.QueryConstants;
import org.apache.phoenix.util.ByteUtil;
import org.slf4j.Logger;
@@ -58,7 +49,7 @@ public class IndexVerificationResultRepository implements AutoCloseable {
private Table indexTable;
public static final String ROW_KEY_SEPARATOR = "|";
public static final byte[] ROW_KEY_SEPARATOR_BYTE = Bytes.toBytes(ROW_KEY_SEPARATOR);
public final static String RESULT_TABLE_NAME = "PHOENIX_INDEX_TOOL_RESULT";
public final static String RESULT_TABLE_NAME = IndexToolTableUtil.SYSTEM_RESULT_TABLE_NAME;
public final static byte[] RESULT_TABLE_NAME_BYTES = Bytes.toBytes(RESULT_TABLE_NAME);
public final static byte[] RESULT_TABLE_COLUMN_FAMILY = QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES;
public final static String SCANNED_DATA_ROW_COUNT = "ScannedDataRowCount";
@@ -163,26 +154,7 @@ public IndexVerificationResultRepository(byte[] indexName,
}

public void createResultTable(Connection connection) throws IOException, SQLException {
ConnectionQueryServices queryServices = connection.unwrap(PhoenixConnection.class).getQueryServices();
try (Admin admin = queryServices.getAdmin()) {
TableName resultTableName = TableName.valueOf(RESULT_TABLE_NAME);
if (!admin.tableExists(resultTableName)) {
ColumnFamilyDescriptor columnDescriptor =
ColumnFamilyDescriptorBuilder
.newBuilder(RESULT_TABLE_COLUMN_FAMILY)
.setTimeToLive(MetaDataProtocol.DEFAULT_LOG_TTL)
.build();
TableDescriptor tableDescriptor =
TableDescriptorBuilder.newBuilder(resultTableName)
.setColumnFamily(columnDescriptor).build();
try {
admin.createTable(tableDescriptor);
} catch (TableExistsException e) {
LOGGER.warn("Table exists, ignoring", e);
}
resultTable = admin.getConnection().getTable(resultTableName);
}
}
resultTable = IndexToolTableUtil.createResultTable(connection);
}

private static byte[] generatePartialResultTableRowKey(long ts, byte[] indexTableName) {

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
@@ -217,8 +217,8 @@ public void initUsersAndTables() {
view2TableName = tableName + "_V2";
}

private static void enablePhoenixHBaseAuthorization(Configuration config,
boolean useCustomAccessController) {
static void enablePhoenixHBaseAuthorization(Configuration config,
boolean useCustomAccessController) {
config.set("hbase.superuser", SUPER_USER + "," + "superUser2");
config.set("hbase.security.authorization", Boolean.TRUE.toString());
config.set("hbase.security.exec.permission.checks", Boolean.TRUE.toString());
@@ -242,11 +242,11 @@ private static void enablePhoenixHBaseAuthorization(Configuration config,
config.set("hbase.regionserver.wal.codec", "org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec");
}

private static void configureNamespacesOnServer(Configuration conf, boolean isNamespaceMapped) {
static void configureNamespacesOnServer(Configuration conf, boolean isNamespaceMapped) {
conf.set(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, Boolean.toString(isNamespaceMapped));
}

private static void configureStatsConfigurations(Configuration conf) {
static void configureStatsConfigurations(Configuration conf) {
conf.set(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(20));
conf.set(QueryServices.STATS_UPDATE_FREQ_MS_ATTRIB, Long.toString(5));
conf.set(QueryServices.MAX_SERVER_METADATA_CACHE_TIME_TO_LIVE_MS_ATTRIB, Long.toString(5));
Original file line number Diff line number Diff line change
@@ -847,10 +847,20 @@ private static List<String> getArgList (boolean useSnapshot, String schemaName,
}

private static List<String> getArgList (boolean useSnapshot, String schemaName,
String dataTable, String indxTable, String tenantId,
IndexTool.IndexVerifyType verifyType, Long startTime, Long endTime,
String dataTable, String indxTable, String tenantId,
IndexTool.IndexVerifyType verifyType, Long startTime, Long endTime,
IndexTool.IndexDisableLoggingType disableLoggingType,
Long incrementalVerify, boolean useIndexTableAsSource) {
return getArgList(useSnapshot, schemaName, dataTable, indxTable, tenantId,verifyType, startTime,
endTime, disableLoggingType, incrementalVerify, useIndexTableAsSource, "/tmp/" + UUID.randomUUID().toString());
}

private static List<String> getArgList (boolean useSnapshot, String schemaName,
String dataTable, String indxTable, String tenantId,
IndexTool.IndexVerifyType verifyType, Long startTime, Long endTime,
IndexTool.IndexDisableLoggingType disableLoggingType,
Long incrementalVerify, boolean useIndexTableAsSource,
String outputPath) {
List<String> args = Lists.newArrayList();
if (schemaName != null) {
args.add("--schema=" + schemaName);
@@ -895,7 +905,7 @@ private static List<String> getArgList (boolean useSnapshot, String schemaName,
}

args.add("-op");
args.add("/tmp/" + UUID.randomUUID().toString());
args.add(outputPath);
return args;
}

@@ -914,6 +924,14 @@ public static String[] getArgValues(boolean useSnapshot, String schemaName,
return args.toArray(new String[0]);
}

public static String[] getArgValues(boolean useSnapshot, String schemaName,
String dataTable, String indexTable, String tenantId, IndexTool.IndexVerifyType verifyType,
IndexTool.IndexDisableLoggingType disableLoggingType, String outputPath) {
List<String> args = getArgList(useSnapshot, schemaName, dataTable, indexTable,
tenantId, verifyType, null, null, disableLoggingType, null, false, outputPath);
return args.toArray(new String[0]);
}

public static String [] getArgValues(boolean useSnapshot, String schemaName,
String dataTable, String indexTable, String tenantId,
IndexTool.IndexVerifyType verifyType, Long startTime, Long endTime) {
@@ -1010,8 +1028,31 @@ public static IndexTool runIndexTool(Configuration conf, boolean useSnapshot, St
IndexTool indexingTool = new IndexTool();
conf.set(QueryServices.TRANSACTIONS_ENABLED, Boolean.TRUE.toString());
indexingTool.setConf(conf);
final String[] cmdArgs = getArgValues(useSnapshot, schemaName, dataTableName,
indexTableName, tenantId, verifyType, disableLoggingType);
boolean additionalArgsContainPath = false;
String path = "";
List<String> newadditionalArgs = Lists.newArrayList();
for (String arg : additionalArgs){
if (additionalArgsContainPath == true) {
path = arg;
}
else if ( arg.equals("-op") || arg.equals("-output-path")) {
additionalArgsContainPath = true;
}
else {
newadditionalArgs.add(arg);
}
}
additionalArgs = newadditionalArgs.toArray(new String[0]);

String[] cmdArgs;
if (additionalArgsContainPath) {
cmdArgs = getArgValues(useSnapshot, schemaName, dataTableName,
indexTableName, tenantId, verifyType, disableLoggingType, path);
}
else {
cmdArgs = getArgValues(useSnapshot, schemaName, dataTableName,
indexTableName, tenantId, verifyType, disableLoggingType);
}
List<String> cmdArgList = new ArrayList<>(Arrays.asList(cmdArgs));
cmdArgList.addAll(Arrays.asList(additionalArgs));
LOGGER.info("Running IndexTool with {}", Arrays.toString(cmdArgList.toArray()), new Exception("Stack Trace"));
Original file line number Diff line number Diff line change
@@ -74,22 +74,13 @@ public abstract class LoadSystemTableSnapshotBase extends BaseTest {
private static final Logger LOGGER = LoggerFactory.getLogger(
LoadSystemTableSnapshotBase.class);

public static final String SNAPSHOT_DIR = "snapshots4_7/";
public static String SNAPSHOT_DIR;
public static String rootDir;

private static final HashMap<String, String> SNAPSHOTS_TO_LOAD;
private static HashMap<String, String> SNAPSHOTS_TO_LOAD;

public static final byte[] MUTEX_LOCKED = "MUTEX_LOCKED".getBytes(StandardCharsets.UTF_8);

static {
SNAPSHOTS_TO_LOAD = new HashMap<>();
//Add any HBase tables, including Phoenix System tables

SNAPSHOTS_TO_LOAD.put("SYSTEM.CATALOG_SNAPSHOT", "SYSTEM.CATALOG");
SNAPSHOTS_TO_LOAD.put("SYSTEM.FUNCTION_SNAPSHOT", "SYSTEM.FUNCTION");
SNAPSHOTS_TO_LOAD.put("SYSTEM.SEQUENCE_SNAPSHOT", "SYSTEM.SEQUENCE");
SNAPSHOTS_TO_LOAD.put("SYSTEM.STATS_SNAPSHOT", "SYSTEM.STATS");
}

private static void decompress(String in, File out) throws IOException {
try (TarArchiveInputStream fin = new TarArchiveInputStream(new FileInputStream(in))){
@@ -109,11 +100,24 @@ private static void decompress(String in, File out) throws IOException {
}

public static synchronized void setupCluster(boolean createBlockUpgradeMutex) throws Exception {
//Add any HBase tables, including Phoenix System tables
HashMap<String, String> snapshotsToLoad = new HashMap<>();
snapshotsToLoad.put("SYSTEM.CATALOG_SNAPSHOT", "SYSTEM.CATALOG");
snapshotsToLoad.put("SYSTEM.FUNCTION_SNAPSHOT", "SYSTEM.FUNCTION");
snapshotsToLoad.put("SYSTEM.SEQUENCE_SNAPSHOT", "SYSTEM.SEQUENCE");
snapshotsToLoad.put("SYSTEM.STATS_SNAPSHOT", "SYSTEM.STATS");

setupCluster(createBlockUpgradeMutex, "snapshots47.tar.gz", "snapshots4_7/", snapshotsToLoad, "true");
}

public static synchronized void setupCluster(boolean createBlockUpgradeMutex, String tarName, String snapshotDir, HashMap<String, String> snapshotsToLoad, String nameSpaceMapping) throws Exception {
SNAPSHOT_DIR = snapshotDir;
SNAPSHOTS_TO_LOAD = snapshotsToLoad;
Map<String, String> serverProps = Maps.newHashMapWithExpectedSize(2);
serverProps.put(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB, QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS);
serverProps.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, "true");
serverProps.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, nameSpaceMapping);
Map<String, String> clientProps = Maps.newHashMapWithExpectedSize(2);
clientProps.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, "true");
clientProps.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, nameSpaceMapping);

//Start minicluster without Phoenix first
checkClusterInitialized(new ReadOnlyProps(serverProps.entrySet().iterator()));
@@ -122,7 +126,7 @@ public static synchronized void setupCluster(boolean createBlockUpgradeMutex) th
.getResource(SNAPSHOT_DIR);

// extract the tar
File archive = new File(folderUrl.getFile() + "snapshots47.tar.gz");
File archive = new File(folderUrl.getFile() + tarName);
File destination = new File(folderUrl.getFile());

decompress(archive.toString(), destination);
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.phoenix.end2end;


import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;

import org.apache.phoenix.mapreduce.index.IndexToolTableUtil;
import org.apache.phoenix.query.QueryConstants;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.query.QueryServicesOptions;
import org.apache.phoenix.thirdparty.com.google.common.collect.Maps;
import org.apache.phoenix.util.ReadOnlyProps;
import org.apache.phoenix.util.SchemaUtil;
import org.junit.Before;
import org.junit.Test;
import java.util.HashMap;
import java.util.Map;

import static org.apache.phoenix.mapreduce.index.IndexToolTableUtil.RESULT_TABLE_NAME;
import static org.apache.phoenix.query.QueryConstants.SYSTEM_SCHEMA_NAME;
import static org.junit.Assert.*;

public class UpgradeIndexToolTablesIT extends LoadSystemTableSnapshotBase {
protected String nameSpaceMapping = "true";

@Before
public synchronized void doSetup() throws Exception {
setupCluster(nameSpaceMapping);
}

public synchronized void setupCluster(String nameSpaceMappingEnabled) throws Exception {
HashMap<String, String> snapshotsToLoad = new HashMap<>();
snapshotsToLoad.put("phoenixtoolresultsnapshot", "PHOENIX_INDEX_TOOL_RESULT");
setupCluster(false, "indexToolsnapshot.tar.gz", "indexToolResultSnapshot/", snapshotsToLoad, nameSpaceMappingEnabled);
}

@Test
public void testPhoenixUpgradeIndexToolTables() throws Exception {
try (Admin admin = utility.getAdmin()) {
// we load the RESULT_TABLE_NAME from snapshot
assertTrue(admin.tableExists(TableName.valueOf(IndexToolTableUtil.RESULT_TABLE_NAME)));
assertFalse(admin.tableExists(TableName.valueOf(IndexToolTableUtil.SYSTEM_RESULT_TABLE_NAME)));
// we don't load the OUTPUT_TABLE_NAME
assertFalse(admin.tableExists(TableName.valueOf(IndexToolTableUtil.OUTPUT_TABLE_NAME)));
assertFalse(admin.tableExists(TableName.valueOf(IndexToolTableUtil.SYSTEM_OUTPUT_TABLE_NAME)));
}

Map<String, String> serverProps = Maps.newHashMapWithExpectedSize(2);
serverProps.put(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB, QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS);
serverProps.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, nameSpaceMapping);
Map<String, String> clientProps = Maps.newHashMapWithExpectedSize(2);
clientProps.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, nameSpaceMapping);


//Now we can start Phoenix
setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), new ReadOnlyProps(clientProps.entrySet()
.iterator()));
assertTrue(true);


// Check the IndexTool Tables after upgrade
try (Admin admin = utility.getAdmin()) {
assertFalse(admin.tableExists(TableName.valueOf(IndexToolTableUtil.OUTPUT_TABLE_NAME)));
assertFalse(admin.tableExists(TableName.valueOf(IndexToolTableUtil.RESULT_TABLE_NAME)));
assertTrue(admin.tableExists(TableName.valueOf(IndexToolTableUtil.SYSTEM_OUTPUT_TABLE_NAME)));
assertTrue(admin.tableExists(TableName.valueOf(IndexToolTableUtil.SYSTEM_RESULT_TABLE_NAME)));
}

String tableName = SchemaUtil.getTableName(SYSTEM_SCHEMA_NAME, RESULT_TABLE_NAME);
if (nameSpaceMapping.equals("true")) {
assertEquals(IndexToolTableUtil.SYSTEM_RESULT_TABLE_NAME, tableName.replace(QueryConstants.NAME_SEPARATOR,
QueryConstants.NAMESPACE_SEPARATOR));
} else {
assertEquals(IndexToolTableUtil.SYSTEM_RESULT_TABLE_NAME, tableName);
}

}

}
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.phoenix.end2end;

import org.junit.Before;


public class UpgradeIndexToolTablesNameSpaceMappingDisabledIT extends UpgradeIndexToolTablesIT {

@Override
@Before
public synchronized void doSetup() throws Exception {
nameSpaceMapping = "false";
setupCluster(nameSpaceMapping);
}
}
Binary file not shown.

0 comments on commit 7c2b511

Please sign in to comment.