Skip to content

Commit

Permalink
DRILL-6834: Introduce option to disable result set for DDL queries fo…
Browse files Browse the repository at this point in the history
…r JDBC connection

- Added session-scoped option `drill.exec.fetch_resultset_for_ddl` to control whether update count or result set should be returned for JDBC connection session. By default the option is set to `true` which ensures that result set is returned;
- Updated Drill JDBC: `DrillCursor` and `DrillStatement` to achieve desired behaviour.

closes apache#1549
  • Loading branch information
KazydubB authored and xiangt920 committed Dec 26, 2019
1 parent ff2693d commit 00b1b60
Show file tree
Hide file tree
Showing 27 changed files with 812 additions and 276 deletions.
310 changes: 197 additions & 113 deletions contrib/native/client/src/protobuf/UserBitShared.pb.cc

Large diffs are not rendered by default.

68 changes: 66 additions & 2 deletions contrib/native/client/src/protobuf/UserBitShared.pb.h

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Expand Up @@ -876,5 +876,10 @@ public static String bootDefaultFor(String name) {

public static final String LIST_FILES_RECURSIVELY = "storage.list_files_recursively";
public static final BooleanValidator LIST_FILES_RECURSIVELY_VALIDATOR = new BooleanValidator(LIST_FILES_RECURSIVELY,
new OptionDescription("Enables recursive files listing when querying the `INFORMATION_SCHEMA.FILES` table or executing the SHOW FILES command. Default is false. (Drill 1.15+"));
new OptionDescription("Enables recursive files listing when querying the `INFORMATION_SCHEMA.FILES` table or executing the SHOW FILES command. Default is false. (Drill 1.15+)"));

public static final String RETURN_RESULT_SET_FOR_DDL = "exec.return_result_set_for_ddl";
public static final BooleanValidator RETURN_RESULT_SET_FOR_DDL_VALIDATOR = new BooleanValidator(RETURN_RESULT_SET_FOR_DDL,
new OptionDescription("Controls whether to return result set for CREATE TABLE/VIEW, DROP TABLE/VIEW, SET, USE etc. queries. " +
"If set to false affected rows count will be returned instead and result set will be null. Default is true. (Drill 1.15+)"));
}
Expand Up @@ -17,6 +17,7 @@
*/
package org.apache.drill.exec.physical.impl.materialize;

import org.apache.drill.exec.ExecConstants;
import org.apache.drill.exec.memory.BufferAllocator;
import org.apache.drill.exec.ops.FragmentContext;
import org.apache.drill.exec.ops.OperatorContext;
Expand All @@ -25,35 +26,35 @@
import org.apache.drill.exec.record.BatchSchema;
import org.apache.drill.exec.record.RecordBatch;
import org.apache.drill.exec.record.WritableBatch;
import org.apache.drill.exec.server.options.OptionManager;

public class VectorRecordMaterializer implements RecordMaterializer{
public class VectorRecordMaterializer implements RecordMaterializer {
static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(VectorRecordMaterializer.class);

private QueryId queryId;
private RecordBatch batch;
private BufferAllocator allocator;
private OptionManager options;

public VectorRecordMaterializer(FragmentContext context, OperatorContext oContext, RecordBatch batch) {
this.queryId = context.getHandle().getQueryId();
this.batch = batch;
this.allocator = oContext.getAllocator();
BatchSchema schema = batch.getSchema();
assert schema != null : "Schema must be defined.";

// for (MaterializedField f : batch.getSchema()) {
// logger.debug("New Field: {}", f);
// }
options = context.getOptions();
}

public QueryWritableBatch convertNext() {
//batch.getWritableBatch().getDef().getRecordCount()
WritableBatch w = batch.getWritableBatch().transfer(allocator);

QueryData header = QueryData.newBuilder() //
.setQueryId(queryId) //
.setRowCount(batch.getRecordCount()) //
.setDef(w.getDef()).build();
QueryWritableBatch batch = new QueryWritableBatch(header, w.getBuffers());
return batch;
QueryData.Builder builder = QueryData.newBuilder()
.setQueryId(queryId)
.setRowCount(batch.getRecordCount())
.setDef(w.getDef());
if (!options.getBoolean(ExecConstants.RETURN_RESULT_SET_FOR_DDL)) {
int count = w.getDef().getAffectedRowsCount();
builder.setAffectedRowsCount(count == -1 ? 0 : count);
}
return new QueryWritableBatch(builder.build(), w.getBuffers());
}
}
Expand Up @@ -20,10 +20,12 @@
import java.io.IOException;

import org.apache.calcite.sql.SqlDescribeSchema;
import org.apache.calcite.sql.SqlKind;
import org.apache.calcite.sql.SqlNode;
import org.apache.calcite.tools.RelConversionException;
import org.apache.calcite.tools.ValidationException;
import org.apache.drill.common.exceptions.UserException;
import org.apache.drill.exec.ExecConstants;
import org.apache.drill.exec.ops.QueryContext;
import org.apache.drill.exec.physical.PhysicalPlan;
import org.apache.drill.exec.planner.sql.handlers.AbstractSqlHandler;
Expand All @@ -35,7 +37,6 @@
import org.apache.drill.exec.planner.sql.handlers.SqlHandlerConfig;
import org.apache.drill.exec.planner.sql.parser.DrillSqlCall;
import org.apache.drill.exec.planner.sql.parser.DrillSqlDescribeTable;
import org.apache.drill.exec.planner.sql.parser.SqlCreateTable;
import org.apache.drill.exec.testing.ControlsInjector;
import org.apache.drill.exec.testing.ControlsInjectorFactory;
import org.apache.drill.exec.util.Pointer;
Expand Down Expand Up @@ -110,7 +111,7 @@ private static PhysicalPlan getQueryPlan(QueryContext context, String sql, Point
final AbstractSqlHandler handler;
final SqlHandlerConfig config = new SqlHandlerConfig(context, parser);

switch(sqlNode.getKind()){
switch(sqlNode.getKind()) {
case EXPLAIN:
handler = new ExplainHandler(config, textPlan);
break;
Expand All @@ -127,21 +128,29 @@ private static PhysicalPlan getQueryPlan(QueryContext context, String sql, Point
handler = new DescribeSchemaHandler(config);
break;
}
case CREATE_TABLE:
handler = ((DrillSqlCall) sqlNode).getSqlHandler(config, textPlan);
break;
case DROP_TABLE:
case CREATE_VIEW:
case DROP_VIEW:
case OTHER_DDL:
case OTHER:
if(sqlNode instanceof SqlCreateTable) {
handler = ((DrillSqlCall)sqlNode).getSqlHandler(config, textPlan);
break;
}

if (sqlNode instanceof DrillSqlCall) {
handler = ((DrillSqlCall)sqlNode).getSqlHandler(config);
handler = ((DrillSqlCall) sqlNode).getSqlHandler(config);
break;
}
// fallthrough
default:
handler = new DefaultSqlHandler(config, textPlan);
}

boolean returnResultSet = context.getOptions().getBoolean(ExecConstants.RETURN_RESULT_SET_FOR_DDL);
// Determine whether result set should be returned for the query based on `exec.return_result_set_for_ddl`
// and sql node kind. Overrides the option on a query level.
context.getOptions().setLocalOption(ExecConstants.RETURN_RESULT_SET_FOR_DDL,
returnResultSet || !SqlKind.DDL.contains(sqlNode.getKind()));

try {
return handler.getPlan(sqlNode);
} catch(ValidationException e) {
Expand Down
Expand Up @@ -36,7 +36,7 @@ public class SqlCreateFunction extends DrillSqlCall {

private final SqlNode jar;

public static final SqlSpecialOperator OPERATOR = new SqlSpecialOperator("CREATE_FUNCTION", SqlKind.OTHER) {
public static final SqlSpecialOperator OPERATOR = new SqlSpecialOperator("CREATE_FUNCTION", SqlKind.OTHER_DDL) {
@Override
public SqlCall createCall(SqlLiteral functionQualifier, SqlParserPos pos, SqlNode... operands) {
return new SqlCreateFunction(pos, operands[0]);
Expand Down
Expand Up @@ -41,7 +41,7 @@
import org.apache.drill.exec.util.Pointer;

public class SqlCreateTable extends DrillSqlCall {
public static final SqlSpecialOperator OPERATOR = new SqlSpecialOperator("CREATE_TABLE", SqlKind.OTHER) {
public static final SqlSpecialOperator OPERATOR = new SqlSpecialOperator("CREATE_TABLE", SqlKind.CREATE_TABLE) {
@Override
public SqlCall createCall(SqlLiteral functionQualifier, SqlParserPos pos, SqlNode... operands) {
Preconditions.checkArgument(operands.length == 6, "SqlCreateTable.createCall() has to get 6 operands!");
Expand Down
Expand Up @@ -37,7 +37,7 @@
import java.util.List;

public class SqlCreateView extends DrillSqlCall {
public static final SqlSpecialOperator OPERATOR = new SqlSpecialOperator("CREATE_VIEW", SqlKind.OTHER) {
public static final SqlSpecialOperator OPERATOR = new SqlSpecialOperator("CREATE_VIEW", SqlKind.CREATE_VIEW) {
@Override
public SqlCall createCall(SqlLiteral functionQualifier, SqlParserPos pos, SqlNode... operands) {
return new SqlCreateView(pos, (SqlIdentifier) operands[0], (SqlNodeList) operands[1], operands[2], (SqlLiteral) operands[3]);
Expand Down
Expand Up @@ -36,7 +36,7 @@ public class SqlDropFunction extends DrillSqlCall {

private final SqlNode jar;

public static final SqlSpecialOperator OPERATOR = new SqlSpecialOperator("DROP_FUNCTION", SqlKind.OTHER) {
public static final SqlSpecialOperator OPERATOR = new SqlSpecialOperator("DROP_FUNCTION", SqlKind.OTHER_DDL) {
@Override
public SqlCall createCall(SqlLiteral functionQualifier, SqlParserPos pos, SqlNode... operands) {
return new SqlDropFunction(pos, operands[0]);
Expand Down
Expand Up @@ -35,7 +35,7 @@
import org.apache.drill.shaded.guava.com.google.common.collect.ImmutableList;

public class SqlDropTable extends DrillSqlCall {
public static final SqlSpecialOperator OPERATOR = new SqlSpecialOperator("DROP_TABLE", SqlKind.OTHER) {
public static final SqlSpecialOperator OPERATOR = new SqlSpecialOperator("DROP_TABLE", SqlKind.DROP_TABLE) {
@Override
public SqlCall createCall(SqlLiteral functionQualifier, SqlParserPos pos, SqlNode... operands) {
return new SqlDropTable(pos, (SqlIdentifier) operands[0], (SqlLiteral) operands[1]);
Expand Down
Expand Up @@ -35,7 +35,7 @@
import org.apache.drill.shaded.guava.com.google.common.collect.ImmutableList;

public class SqlDropView extends DrillSqlCall {
public static final SqlSpecialOperator OPERATOR = new SqlSpecialOperator("DROP_VIEW", SqlKind.OTHER) {
public static final SqlSpecialOperator OPERATOR = new SqlSpecialOperator("DROP_VIEW", SqlKind.DROP_VIEW) {
@Override
public SqlCall createCall(SqlLiteral functionQualifier, SqlParserPos pos, SqlNode... operands) {
return new SqlDropView(pos, (SqlIdentifier) operands[0], (SqlLiteral) operands[1]);
Expand Down
Expand Up @@ -40,7 +40,7 @@
* REFRESH TABLE METADATA tblname
*/
public class SqlRefreshMetadata extends DrillSqlCall {
public static final SqlSpecialOperator OPERATOR = new SqlSpecialOperator("REFRESH_TABLE_METADATA", SqlKind.OTHER) {
public static final SqlSpecialOperator OPERATOR = new SqlSpecialOperator("REFRESH_TABLE_METADATA", SqlKind.OTHER_DDL) {
@Override
public SqlCall createCall(SqlLiteral functionQualifier, SqlParserPos pos, SqlNode... operands) {
return new SqlRefreshMetadata(pos, (SqlIdentifier) operands[0]);
Expand Down
Expand Up @@ -38,8 +38,7 @@
*/
public class SqlUseSchema extends DrillSqlCall {

public static final SqlSpecialOperator OPERATOR =
new SqlSpecialOperator("USE_SCHEMA", SqlKind.OTHER){
public static final SqlSpecialOperator OPERATOR = new SqlSpecialOperator("USE_SCHEMA", SqlKind.OTHER_DDL) {
@Override
public SqlCall createCall(SqlLiteral functionQualifier, SqlParserPos pos, SqlNode... operands) {
return new SqlUseSchema(pos, (SqlIdentifier) operands[0]);
Expand Down
Expand Up @@ -18,10 +18,18 @@
package org.apache.drill.exec.server.options;

import java.util.ArrayList;
import java.util.Collection;

@SuppressWarnings("serial")
public class OptionList extends ArrayList<OptionValue>{

public OptionList() {
}

public OptionList(Collection<OptionValue> options) {
super(options);
}

public void merge(OptionList list){
this.addAll(list);
}
Expand Down
Expand Up @@ -19,6 +19,9 @@

import org.apache.drill.common.map.CaseInsensitiveMap;

import java.util.HashMap;
import java.util.Map;

/**
* {@link OptionManager} that holds options within {@link org.apache.drill.exec.ops.QueryContext}.
*/
Expand All @@ -31,9 +34,14 @@ public QueryOptionManager(OptionManager sessionOptions) {

@Override
public OptionList getOptionList() {
OptionList list = super.getOptionList();
list.merge(fallback.getOptionList());
return list;
Map<String, OptionValue> optionMap = new HashMap<>();
for (OptionValue option : fallback.getOptionList()) {
optionMap.put(option.name, option);
}
for (OptionValue option : super.getOptionList()) {
optionMap.put(option.name, option);
}
return new OptionList(optionMap.values());
}

@Override
Expand Down
Expand Up @@ -267,7 +267,8 @@ public static CaseInsensitiveMap<OptionDefinition> createDefaultOptionDefinition
new OptionDefinition(ExecConstants.OUTPUT_BATCH_SIZE_AVAIL_MEM_FACTOR_VALIDATOR, new OptionMetaData(OptionValue.AccessibleScopes.SYSTEM, true, false)),
new OptionDefinition(ExecConstants.FRAG_RUNNER_RPC_TIMEOUT_VALIDATOR, new OptionMetaData(OptionValue.AccessibleScopes.SYSTEM, true, true)),
new OptionDefinition(ExecConstants.LIST_FILES_RECURSIVELY_VALIDATOR),
new OptionDefinition(ExecConstants.QUERY_ROWKEYJOIN_BATCHSIZE)
new OptionDefinition(ExecConstants.QUERY_ROWKEYJOIN_BATCHSIZE),
new OptionDefinition(ExecConstants.RETURN_RESULT_SET_FOR_DDL_VALIDATOR)
};

CaseInsensitiveMap<OptionDefinition> map = Arrays.stream(definitions)
Expand Down

0 comments on commit 00b1b60

Please sign in to comment.