Skip to content

Commit

Permalink
Add Session limitation (#2354)
Browse files Browse the repository at this point in the history
* add session creation limitation

Signed-off-by: Peng Huo <penghuo@gmail.com>

* add doc

Signed-off-by: Peng Huo <penghuo@gmail.com>

---------

Signed-off-by: Peng Huo <penghuo@gmail.com>
(cherry picked from commit a2014ee)
Signed-off-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
  • Loading branch information
github-actions[bot] committed Oct 25, 2023
1 parent 9a40591 commit 41bd845
Show file tree
Hide file tree
Showing 8 changed files with 180 additions and 1 deletion.
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,8 @@ public enum Key {
METRICS_ROLLING_INTERVAL("plugins.query.metrics.rolling_interval"),
SPARK_EXECUTION_ENGINE_CONFIG("plugins.query.executionengine.spark.config"),
CLUSTER_NAME("cluster.name"),
SPARK_EXECUTION_SESSION_ENABLED("plugins.query.executionengine.spark.session.enabled");
SPARK_EXECUTION_SESSION_ENABLED("plugins.query.executionengine.spark.session.enabled"),
SPARK_EXECUTION_SESSION_LIMIT("plugins.query.executionengine.spark.session.limit");

@Getter private final String keyValue;

Expand Down
36 changes: 36 additions & 0 deletions docs/user/admin/settings.rst
Original file line number Diff line number Diff line change
Expand Up @@ -347,3 +347,39 @@ SQL query::
}
}

plugins.query.executionengine.spark.session.limit
===================================================

Description
-----------

Each datasource can have maximum 100 sessions running in parallel by default. You can increase limit by this setting.

1. The default value is 100.
2. This setting is node scope.
3. This setting can be updated dynamically.

You can update the setting with a new value like this.

SQL query::

sh$ curl -sS -H 'Content-Type: application/json' -X PUT localhost:9200/_plugins/_query/settings \
... -d '{"transient":{"plugins.query.executionengine.spark.session.limit":200}}'
{
"acknowledged": true,
"persistent": {},
"transient": {
"plugins": {
"query": {
"executionengine": {
"spark": {
"session": {
"limit": "200"
}
}
}
}
}
}
}

Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,13 @@ public class OpenSearchSettings extends Settings {
Setting.Property.NodeScope,
Setting.Property.Dynamic);

public static final Setting<?> SPARK_EXECUTION_SESSION_LIMIT_SETTING =
Setting.intSetting(
Key.SPARK_EXECUTION_SESSION_LIMIT.getKeyValue(),
100,
Setting.Property.NodeScope,
Setting.Property.Dynamic);

/** Construct OpenSearchSetting. The OpenSearchSetting must be singleton. */
@SuppressWarnings("unchecked")
public OpenSearchSettings(ClusterSettings clusterSettings) {
Expand Down Expand Up @@ -218,6 +225,12 @@ public OpenSearchSettings(ClusterSettings clusterSettings) {
Key.SPARK_EXECUTION_SESSION_ENABLED,
SPARK_EXECUTION_SESSION_ENABLED_SETTING,
new Updater(Key.SPARK_EXECUTION_SESSION_ENABLED));
register(
settingBuilder,
clusterSettings,
Key.SPARK_EXECUTION_SESSION_LIMIT,
SPARK_EXECUTION_SESSION_LIMIT_SETTING,
new Updater(Key.SPARK_EXECUTION_SESSION_LIMIT));
registerNonDynamicSettings(
settingBuilder, clusterSettings, Key.CLUSTER_NAME, ClusterName.CLUSTER_NAME_SETTING);
defaultSettings = settingBuilder.build();
Expand Down Expand Up @@ -284,6 +297,7 @@ public static List<Setting<?>> pluginSettings() {
.add(DATASOURCE_URI_HOSTS_DENY_LIST)
.add(SPARK_EXECUTION_ENGINE_CONFIG)
.add(SPARK_EXECUTION_SESSION_ENABLED_SETTING)
.add(SPARK_EXECUTION_SESSION_LIMIT_SETTING)
.build();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,11 @@
package org.opensearch.sql.spark.execution.session;

import static org.opensearch.sql.common.setting.Settings.Key.SPARK_EXECUTION_SESSION_ENABLED;
import static org.opensearch.sql.common.setting.Settings.Key.SPARK_EXECUTION_SESSION_LIMIT;
import static org.opensearch.sql.spark.execution.session.SessionId.newSessionId;
import static org.opensearch.sql.spark.execution.statestore.StateStore.activeSessionsCount;

import java.util.Locale;
import java.util.Optional;
import lombok.RequiredArgsConstructor;
import org.opensearch.sql.common.setting.Settings;
Expand All @@ -26,6 +29,15 @@ public class SessionManager {
private final Settings settings;

public Session createSession(CreateSessionRequest request) {
int sessionMaxLimit = sessionMaxLimit();
if (activeSessionsCount(stateStore, request.getDatasourceName()).get() >= sessionMaxLimit) {
String errorMsg =
String.format(
Locale.ROOT,
"The maximum number of active sessions can be " + "supported is %d",
sessionMaxLimit);
throw new IllegalArgumentException(errorMsg);
}
InteractiveSession session =
InteractiveSession.builder()
.sessionId(newSessionId(request.getDatasourceName()))
Expand Down Expand Up @@ -55,4 +67,8 @@ public Optional<Session> getSession(SessionId sid) {
public boolean isEnabled() {
return settings.getSettingValue(SPARK_EXECUTION_SESSION_ENABLED);
}

public int sessionMaxLimit() {
return settings.getSettingValue(SPARK_EXECUTION_SESSION_LIMIT);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
import java.util.Optional;
import java.util.function.BiFunction;
import java.util.function.Function;
import java.util.function.Supplier;
import lombok.RequiredArgsConstructor;
import org.apache.commons.io.IOUtils;
import org.apache.logging.log4j.LogManager;
Expand All @@ -25,6 +26,8 @@
import org.opensearch.action.get.GetResponse;
import org.opensearch.action.index.IndexRequest;
import org.opensearch.action.index.IndexResponse;
import org.opensearch.action.search.SearchRequest;
import org.opensearch.action.search.SearchResponse;
import org.opensearch.action.support.WriteRequest;
import org.opensearch.action.update.UpdateRequest;
import org.opensearch.action.update.UpdateResponse;
Expand All @@ -38,9 +41,13 @@
import org.opensearch.core.xcontent.NamedXContentRegistry;
import org.opensearch.core.xcontent.ToXContent;
import org.opensearch.core.xcontent.XContentParser;
import org.opensearch.index.query.QueryBuilder;
import org.opensearch.index.query.QueryBuilders;
import org.opensearch.search.builder.SearchSourceBuilder;
import org.opensearch.sql.spark.asyncquery.model.AsyncQueryJobMetadata;
import org.opensearch.sql.spark.execution.session.SessionModel;
import org.opensearch.sql.spark.execution.session.SessionState;
import org.opensearch.sql.spark.execution.session.SessionType;
import org.opensearch.sql.spark.execution.statement.StatementModel;
import org.opensearch.sql.spark.execution.statement.StatementState;

Expand Down Expand Up @@ -182,6 +189,35 @@ private void createIndex(String indexName) {
}
}

private long count(String indexName, QueryBuilder query) {
if (!this.clusterService.state().routingTable().hasIndex(indexName)) {
return 0;
}
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
searchSourceBuilder.query(query);
searchSourceBuilder.size(0);

// https://github.com/opensearch-project/sql/issues/1801.
SearchRequest searchRequest =
new SearchRequest()
.indices(indexName)
.preference("_primary_first")
.source(searchSourceBuilder);

ActionFuture<SearchResponse> searchResponseActionFuture;
try (ThreadContext.StoredContext ignored =
client.threadPool().getThreadContext().stashContext()) {
searchResponseActionFuture = client.search(searchRequest);
}
SearchResponse searchResponse = searchResponseActionFuture.actionGet();
if (searchResponse.status().getStatus() != 200) {
throw new RuntimeException(
"Fetching job metadata information failed with status : " + searchResponse.status());

Check warning on line 215 in spark/src/main/java/org/opensearch/sql/spark/execution/statestore/StateStore.java

View check run for this annotation

Codecov / codecov/patch

spark/src/main/java/org/opensearch/sql/spark/execution/statestore/StateStore.java#L214-L215

Added lines #L214 - L215 were not covered by tests
} else {
return searchResponse.getHits().getTotalHits().value;
}
}

private String loadConfigFromResource(String fileName) throws IOException {
InputStream fileStream = StateStore.class.getClassLoader().getResourceAsStream(fileName);
return IOUtils.toString(fileStream, StandardCharsets.UTF_8);
Expand Down Expand Up @@ -253,4 +289,19 @@ public static Function<String, Optional<AsyncQueryJobMetadata>> getJobMetaData(
AsyncQueryJobMetadata::fromXContent,
DATASOURCE_TO_REQUEST_INDEX.apply(datasourceName));
}

public static Supplier<Long> activeSessionsCount(StateStore stateStore, String datasourceName) {
return () ->
stateStore.count(
DATASOURCE_TO_REQUEST_INDEX.apply(datasourceName),
QueryBuilders.boolQuery()
.must(QueryBuilders.termQuery(SessionModel.TYPE, SessionModel.SESSION_DOC_TYPE))
.must(
QueryBuilders.termQuery(
SessionModel.SESSION_TYPE, SessionType.INTERACTIVE.getSessionType()))
.must(QueryBuilders.termQuery(SessionModel.DATASOURCE_NAME, datasourceName))
.must(
QueryBuilders.termQuery(
SessionModel.SESSION_STATE, SessionState.RUNNING.getSessionState())));
}
}
2 changes: 2 additions & 0 deletions spark/src/main/resources/query_execution_request_mapping.yml
Original file line number Diff line number Diff line change
Expand Up @@ -40,3 +40,5 @@ properties:
format: strict_date_time||epoch_millis
queryId:
type: keyword
excludeJobIds:
type: keyword
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
package org.opensearch.sql.spark.asyncquery;

import static org.opensearch.sql.opensearch.setting.OpenSearchSettings.SPARK_EXECUTION_SESSION_ENABLED_SETTING;
import static org.opensearch.sql.opensearch.setting.OpenSearchSettings.SPARK_EXECUTION_SESSION_LIMIT_SETTING;
import static org.opensearch.sql.spark.data.constants.SparkConstants.DEFAULT_CLASS_NAME;
import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_JOB_REQUEST_INDEX;
import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_JOB_SESSION_ID;
Expand All @@ -16,7 +17,9 @@
import static org.opensearch.sql.spark.execution.statement.StatementModel.SESSION_ID;
import static org.opensearch.sql.spark.execution.statement.StatementModel.STATEMENT_DOC_TYPE;
import static org.opensearch.sql.spark.execution.statestore.StateStore.DATASOURCE_TO_REQUEST_INDEX;
import static org.opensearch.sql.spark.execution.statestore.StateStore.getSession;
import static org.opensearch.sql.spark.execution.statestore.StateStore.getStatement;
import static org.opensearch.sql.spark.execution.statestore.StateStore.updateSessionState;
import static org.opensearch.sql.spark.execution.statestore.StateStore.updateStatementState;

import com.amazonaws.services.emrserverless.model.CancelJobRunResult;
Expand Down Expand Up @@ -61,6 +64,8 @@
import org.opensearch.sql.spark.config.SparkExecutionEngineConfig;
import org.opensearch.sql.spark.dispatcher.SparkQueryDispatcher;
import org.opensearch.sql.spark.execution.session.SessionManager;
import org.opensearch.sql.spark.execution.session.SessionModel;
import org.opensearch.sql.spark.execution.session.SessionState;
import org.opensearch.sql.spark.execution.statement.StatementModel;
import org.opensearch.sql.spark.execution.statement.StatementState;
import org.opensearch.sql.spark.execution.statestore.StateStore;
Expand Down Expand Up @@ -129,6 +134,13 @@ public void clean() {
.setTransientSettings(
Settings.builder().putNull(SPARK_EXECUTION_SESSION_ENABLED_SETTING.getKey()).build())
.get();
client
.admin()
.cluster()
.prepareUpdateSettings()
.setTransientSettings(
Settings.builder().putNull(SPARK_EXECUTION_SESSION_LIMIT_SETTING.getKey()).build())
.get();
}

@Test
Expand Down Expand Up @@ -378,6 +390,35 @@ public void withSessionCreateAsyncQueryFailed() {
assertEquals("mock error", asyncQueryResults.getError());
}

@Test
public void createSessionMoreThanLimitFailed() {
LocalEMRSClient emrsClient = new LocalEMRSClient();
AsyncQueryExecutorService asyncQueryExecutorService =
createAsyncQueryExecutorService(emrsClient);

// enable session
enableSession(true);
// only allow one session in domain.
setSessionLimit(1);

// 1. create async query.
CreateAsyncQueryResponse first =
asyncQueryExecutorService.createAsyncQuery(
new CreateAsyncQueryRequest("select 1", DATASOURCE, LangType.SQL, null));
assertNotNull(first.getSessionId());
setSessionState(first.getSessionId(), SessionState.RUNNING);

// 2. create async query without session.
IllegalArgumentException exception =
assertThrows(
IllegalArgumentException.class,
() ->
asyncQueryExecutorService.createAsyncQuery(
new CreateAsyncQueryRequest("select 1", DATASOURCE, LangType.SQL, null)));
assertEquals(
"The maximum number of active sessions can be supported is 1", exception.getMessage());
}

private DataSourceServiceImpl createDataSourceService() {
String masterKey = "a57d991d9b573f75b9bba1df";
DataSourceMetadataStorage dataSourceMetadataStorage =
Expand Down Expand Up @@ -470,6 +511,16 @@ public void enableSession(boolean enabled) {
.get();
}

public void setSessionLimit(long limit) {
client
.admin()
.cluster()
.prepareUpdateSettings()
.setTransientSettings(
Settings.builder().put(SPARK_EXECUTION_SESSION_LIMIT_SETTING.getKey(), limit).build())
.get();
}

int search(QueryBuilder query) {
SearchRequest searchRequest = new SearchRequest();
searchRequest.indices(DATASOURCE_TO_REQUEST_INDEX.apply(DATASOURCE));
Expand All @@ -480,4 +531,11 @@ int search(QueryBuilder query) {

return searchResponse.getHits().getHits().length;
}

void setSessionState(String sessionId, SessionState sessionState) {
Optional<SessionModel> model = getSession(stateStore, DATASOURCE).apply(sessionId);
SessionModel updated =
updateSessionState(stateStore, DATASOURCE).apply(model.get(), sessionState);
assertEquals(SessionState.RUNNING, updated.getSessionState());
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ public void sessionEnable() {
public static Settings sessionSetting(boolean enabled) {
Map<Settings.Key, Object> settings = new HashMap<>();
settings.put(Settings.Key.SPARK_EXECUTION_SESSION_ENABLED, enabled);
settings.put(Settings.Key.SPARK_EXECUTION_SESSION_LIMIT, 100);
return settings(settings);
}

Expand Down

0 comments on commit 41bd845

Please sign in to comment.