Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions exec/java-exec/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -626,6 +626,12 @@
<artifactId>swagger-jaxrs2-servlet-initializer-v2</artifactId>
<version>${swagger.version}</version>
</dependency>

<dependency>
<groupId>com.github.ben-manes.caffeine</groupId>
<artifactId>caffeine</artifactId>
<version>2.9.3</version>
</dependency>
</dependencies>

<profiles>
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.drill.exec.cache;

import java.util.concurrent.TimeUnit;

import org.apache.calcite.rel.RelNode;
import org.apache.drill.common.config.DrillConfig;
import org.apache.drill.exec.physical.PhysicalPlan;
import org.apache.drill.exec.planner.sql.handlers.DefaultSqlHandler.CacheKey;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.github.benmanes.caffeine.cache.Cache;
import com.github.benmanes.caffeine.cache.Caffeine;

public class CustomCacheManager {
private static final Logger logger = LoggerFactory.getLogger(CustomCacheManager.class);

private static Cache<String, PhysicalPlan> queryCache;
private static Cache<CacheKey, RelNode> transformCache;

private static int queryMaxEntries;
private static int queryTtlMinutes;
private static int transformMaxEntries;
private static int transformTtlMinutes;

static {
loadConfig();
}

private static void loadConfig() {
DrillConfig config = DrillConfig.create();

queryMaxEntries = getConfigInt(config, "planner.query.cache.max_entries_amount", 100);
queryTtlMinutes = getConfigInt(config, "planner.query.cache.plan_cache_ttl_minutes", 300);
transformMaxEntries = getConfigInt(config, "planner.transform.cache.max_entries_amount", 100);
transformTtlMinutes = getConfigInt(config, "planner.transform.cache.plan_cache_ttl_minutes", 300);

queryCache = Caffeine.newBuilder()
.maximumSize(queryMaxEntries)
.expireAfterWrite(queryTtlMinutes, TimeUnit.MINUTES)
.recordStats()
.build();

transformCache = Caffeine.newBuilder()
.maximumSize(transformMaxEntries)
.expireAfterWrite(transformTtlMinutes, TimeUnit.MINUTES)
.recordStats()
.build();
}

private static int getConfigInt(DrillConfig config, String path, int defaultValue) {
logger.info("Fetching: " + path);
Boolean pathFound = config.hasPath(path);
int value = pathFound ? config.getInt(path) : defaultValue;
if (!pathFound) {
logger.info("Using default value: " + defaultValue);
} else {
logger.info("Using found value: " + value);
}
return value;
}

public static PhysicalPlan getQueryPlan(String sql) {
return queryCache.getIfPresent(sql);
}

public static void putQueryPlan(String sql, PhysicalPlan plan) {
queryCache.put(sql, plan);
}

public static RelNode getTransformedPlan(CacheKey key) {
return transformCache.getIfPresent(key);
}

public static void putTransformedPlan(CacheKey key, RelNode plan) {
transformCache.put(key, plan);
}

public static void logCacheStats() {
logger.info("Query Cache Stats: " + queryCache.stats());
logger.info("Query Cache Size: " + queryCache.estimatedSize());

logger.info("Transform Cache Stats: " + transformCache.stats());
logger.info("Transform Cache Size: " + transformCache.estimatedSize());
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,20 @@ public class PlannerSettings implements Context{
public static final String UNIONALL_DISTRIBUTE_KEY = "planner.enable_unionall_distribute";
public static final BooleanValidator UNIONALL_DISTRIBUTE = new BooleanValidator(UNIONALL_DISTRIBUTE_KEY, null);

public static final BooleanValidator PLAN_CACHE = new BooleanValidator("planner.cache.enable",
new OptionDescription("Enables caching of generated query plans in memory, so repeated queries can bypass the planning phase and execute faster.")
);

// Only settable in config, due to pub-sub requirements for recreating the cache on value change
// public static final RangeLongValidator PLAN_CACHE_TTL = new RangeLongValidator("planner.cache.ttl_minutes",
// 0, Long.MAX_VALUE,
// new OptionDescription("Time-to-live for cached query plans in minutes. Plans older than this are evicted. Default is 0 (disabled)")
// );
// public static final RangeLongValidator MAX_CACHE_ENTRIES = new RangeLongValidator("planner.cache.max_entries",
// 1, Long.MAX_VALUE,
// new OptionDescription("Maximum total number of entries for cached query plans. When exceeded, least recently used plans are evicted.")
// );

// ------------------------------------------- Index planning related options BEGIN --------------------------------------------------------------
public static final String USE_SIMPLE_OPTIMIZER_KEY = "planner.use_simple_optimizer";
public static final BooleanValidator USE_SIMPLE_OPTIMIZER = new BooleanValidator(USE_SIMPLE_OPTIMIZER_KEY,
Expand Down Expand Up @@ -416,6 +430,10 @@ public boolean isUnionAllDistributeEnabled() {
return options.getOption(UNIONALL_DISTRIBUTE);
}

public boolean isPlanCacheEnabled() {
return options.getOption(PLAN_CACHE);
}

public boolean isParquetRowGroupFilterPushdownPlanningEnabled() {
return options.getOption(PARQUET_ROWGROUP_FILTER_PUSHDOWN_PLANNING);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,9 @@
package org.apache.drill.exec.planner.sql;

import java.io.IOException;
import java.util.Objects;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;

import org.apache.calcite.sql.SqlDescribeSchema;
import org.apache.calcite.sql.SqlKind;
Expand All @@ -29,12 +32,14 @@
import org.apache.calcite.sql.parser.SqlParserPos;
import org.apache.calcite.tools.RelConversionException;
import org.apache.calcite.tools.ValidationException;
import org.apache.calcite.util.Litmus;
import org.apache.drill.common.exceptions.UserException;
import org.apache.drill.exec.ExecConstants;
import org.apache.drill.exec.exception.MetadataException;
import org.apache.drill.exec.ops.QueryContext;
import org.apache.drill.exec.ops.QueryContext.SqlStatementType;
import org.apache.drill.exec.physical.PhysicalPlan;
import org.apache.drill.exec.planner.sql.conversion.SqlConverter;
import org.apache.drill.exec.planner.sql.handlers.AbstractSqlHandler;
import org.apache.drill.exec.planner.sql.handlers.AnalyzeTableHandler;
import org.apache.drill.exec.planner.sql.handlers.DefaultSqlHandler;
Expand All @@ -52,7 +57,6 @@
import org.apache.drill.exec.planner.sql.parser.DrillSqlDescribeTable;
import org.apache.drill.exec.planner.sql.parser.DrillSqlResetOption;
import org.apache.drill.exec.planner.sql.parser.SqlSchema;
import org.apache.drill.exec.planner.sql.conversion.SqlConverter;
import org.apache.drill.exec.proto.UserBitShared.DrillPBError;
import org.apache.drill.exec.testing.ControlsInjector;
import org.apache.drill.exec.testing.ControlsInjectorFactory;
Expand Down Expand Up @@ -128,6 +132,8 @@ private static PhysicalPlan convertPlan(QueryContext context, String sql, Pointe
try {
return getPhysicalPlan(context, sql, textPlan, retryAttempts);
} catch (Exception e) {
logger.info("DrillSqlWorker.convertPlan() retrying???: attempt # {}", retryAttempts);
e.printStackTrace(System.out);
logger.trace("There was an error during conversion into physical plan.", e);

// It is prohibited to retry query planning for ANALYZE statement since it changes
Expand Down Expand Up @@ -176,9 +182,11 @@ private static PhysicalPlan convertPlan(QueryContext context, String sql, Pointe
private static PhysicalPlan getPhysicalPlan(QueryContext context, String sql, Pointer<String> textPlan,
long retryAttempts) throws ForemanSetupException, RelConversionException, IOException, ValidationException {
try {
logger.info("DrillSqlWorker.getPhysicalPlan() is called {}", retryAttempts);
return getQueryPlan(context, sql, textPlan);
} catch (Exception e) {
Throwable rootCause = Throwables.getRootCause(e);
logger.info("DrillSqlWorker.getPhysicalPlan() is called {}", rootCause.getMessage());
// Calcite wraps exceptions thrown during planning, so checks whether original exception is OutdatedMetadataException
if (rootCause instanceof MetadataException) {
// resets SqlStatementType to avoid errors when it is set during further attempts
Expand Down Expand Up @@ -216,12 +224,21 @@ private static PhysicalPlan getPhysicalPlan(QueryContext context, String sql, Po
* @param textPlan text plan
* @return query physical plan
*/

private static ConcurrentMap<QueryPlanCacheKey, PhysicalPlan> getQueryPlanCache = new ConcurrentHashMap<>();

private static PhysicalPlan getQueryPlan(QueryContext context, String sql, Pointer<String> textPlan)
throws ForemanSetupException, RelConversionException, IOException, ValidationException {

final SqlConverter parser = new SqlConverter(context);
injector.injectChecked(context.getExecutionControls(), "sql-parsing", ForemanSetupException.class);
final SqlNode sqlNode = checkAndApplyAutoLimit(parser, context, sql);
QueryPlanCacheKey queryPlanCacheKey = new QueryPlanCacheKey(sqlNode);

if(getQueryPlanCache.containsKey(queryPlanCacheKey)) {
logger.info("Using getQueryPlanCache");
return getQueryPlanCache.get(queryPlanCacheKey);
}
final AbstractSqlHandler handler;
final SqlHandlerConfig config = new SqlHandlerConfig(context, parser);

Expand Down Expand Up @@ -287,6 +304,8 @@ private static PhysicalPlan getQueryPlan(QueryContext context, String sql, Point
context.setSQLStatementType(SqlStatementType.OTHER);
}



// Determines whether result set should be returned for the query based on return result set option and sql node kind.
// Overrides the option on a query level if it differs from the current value.
boolean currentReturnResultValue = context.getOptions().getBoolean(ExecConstants.RETURN_RESULT_SET_FOR_DDL);
Expand All @@ -295,7 +314,34 @@ private static PhysicalPlan getQueryPlan(QueryContext context, String sql, Point
context.getOptions().setLocalOption(ExecConstants.RETURN_RESULT_SET_FOR_DDL, true);
}

return handler.getPlan(sqlNode);
PhysicalPlan physicalPlan = handler.getPlan(sqlNode);
getQueryPlanCache.put(queryPlanCacheKey, physicalPlan);
return physicalPlan;
}

private static class QueryPlanCacheKey {
private final SqlNode sqlNode;

public QueryPlanCacheKey(SqlNode sqlNode) {
this.sqlNode = sqlNode;
}

@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
QueryPlanCacheKey cacheKey = (QueryPlanCacheKey) o;
return sqlNode.equalsDeep(cacheKey.sqlNode, Litmus.IGNORE);
}

@Override
public int hashCode() {
return Objects.hash(sqlNode);
}
}

private static boolean isAutoLimitShouldBeApplied(SqlNode sqlNode, int queryMaxRows) {
Expand Down
Loading
Loading