Skip to content

Commit

Permalink
Prevent index level setting from being configured on a node level
Browse files Browse the repository at this point in the history
Today we allow to set all kinds of index level settings on the node level which
is error prone and difficult to get right in a consistent manner.
For instance if some analyzers are setup in a yaml config file some nodes might
not have these analyzers and then index creation fails.

Nevertheless, this change allows some selected settings to be specified on a node level
for instance:
 * `index.codec` which is used in a hot/cold node architecture and it's value is really per node or per index
 * `index.store.fs.fs_lock` which is also dependent on the filesystem a node uses

All other index level setting must be specified on the index level. For existing clusters the index must be closed
and all settings must be updated via the API on each of the indices.

Closes #16799
  • Loading branch information
s1monw committed Mar 17, 2016
1 parent 6441852 commit e91a141
Show file tree
Hide file tree
Showing 36 changed files with 275 additions and 297 deletions.
2 changes: 0 additions & 2 deletions buildSrc/src/main/resources/checkstyle_suppressions.xml
Expand Up @@ -460,7 +460,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]codec[/\\]PerFieldMappingPostingFormatCodec.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]engine[/\\]ElasticsearchConcurrentMergeScheduler.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]engine[/\\]Engine.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]engine[/\\]EngineConfig.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]engine[/\\]InternalEngine.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]engine[/\\]LiveVersionMap.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]engine[/\\]ShadowEngine.java" checks="LineLength" />
Expand Down Expand Up @@ -613,7 +612,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]similarity[/\\]SimilarityService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]snapshots[/\\]blobstore[/\\]BlobStoreIndexShardRepository.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]snapshots[/\\]blobstore[/\\]BlobStoreIndexShardSnapshots.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]FsDirectoryService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]IndexStore.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]IndexStoreConfig.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]LegacyVerification.java" checks="LineLength" />
Expand Down
Expand Up @@ -58,9 +58,8 @@ protected AbstractScopedSettings(Settings settings, Set<Setting<?>> settingsSet,
if (setting.getProperties().contains(scope) == false) {
throw new IllegalArgumentException("Setting must be a " + scope + " setting but has: " + setting.getProperties());
}
if (isValidKey(setting.getKey()) == false && (setting.isGroupSetting() && isValidGroupKey(setting.getKey())) == false) {
throw new IllegalArgumentException("illegal settings key: [" + setting.getKey() + "]");
}
validateSettingKey(setting);

if (setting.hasComplexMatcher()) {
Setting<?> overlappingSetting = findOverlappingSetting(setting, complexMatchers);
if (overlappingSetting != null) {
Expand All @@ -76,6 +75,12 @@ protected AbstractScopedSettings(Settings settings, Set<Setting<?>> settingsSet,
this.keySettings = Collections.unmodifiableMap(keySettings);
}

protected void validateSettingKey(Setting setting) {
if (isValidKey(setting.getKey()) == false && (setting.isGroupSetting() && isValidGroupKey(setting.getKey())) == false) {
throw new IllegalArgumentException("illegal settings key: [" + setting.getKey() + "]");
}
}

protected AbstractScopedSettings(Settings nodeSettings, Settings scopeSettings, AbstractScopedSettings other) {
super(nodeSettings);
this.lastSettingsApplied = scopeSettings;
Expand Down
Expand Up @@ -163,6 +163,14 @@ public IndexScopedSettings copy(Settings settings, IndexMetaData metaData) {
return new IndexScopedSettings(settings, this, metaData);
}

@Override
protected void validateSettingKey(Setting setting) {
if (setting.getKey().startsWith("index.") == false) {
throw new IllegalArgumentException("illegal settings key: [" + setting.getKey() + "] must start with [index.]");
}
super.validateSettingKey(setting);
}

public boolean isPrivateSetting(String key) {
switch (key) {
case IndexMetaData.SETTING_CREATION_DATE:
Expand Down
Expand Up @@ -761,6 +761,14 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
return builder;
}

/**
* Returns <tt>true</tt> if this settings object contains no settings
* @return <tt>true</tt> if this settings object contains no settings
*/
public boolean isEmpty() {
return this.settings.isEmpty();
}

/**
* A builder allowing to put different settings and then {@link #build()} an immutable
* settings implementation. Use {@link Settings#settingsBuilder()} in order to
Expand Down
Expand Up @@ -54,8 +54,7 @@ protected void configure() {
final IndexScopedSettings indexScopedSettings = new IndexScopedSettings(settings, new HashSet<>(this.indexSettings.values()));
final ClusterSettings clusterSettings = new ClusterSettings(settings, new HashSet<>(this.nodeSettings.values()));
// by now we are fully configured, lets check node level settings for unregistered index settings
indexScopedSettings.validate(settings.filter(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE));
final Predicate<String> acceptOnlyClusterSettings = TRIBE_CLIENT_NODE_SETTINGS_PREDICATE.or(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE).negate();
final Predicate<String> acceptOnlyClusterSettings = TRIBE_CLIENT_NODE_SETTINGS_PREDICATE.negate();
clusterSettings.validate(settings.filter(acceptOnlyClusterSettings));
validateTribeSettings(settings, clusterSettings);
bind(Settings.class).toInstance(settings);
Expand All @@ -76,21 +75,19 @@ public void registerSetting(Setting<?> setting) {
registerSettingsFilter(setting.getKey());
}
}

// We validate scope settings. We should have one and only one scope.
if (setting.hasNodeScope() && setting.hasIndexScope()) {
throw new IllegalArgumentException("More than one scope has been added to the setting [" + setting.getKey() + "]");
}
if (setting.hasNodeScope()) {
if (nodeSettings.containsKey(setting.getKey())) {
throw new IllegalArgumentException("Cannot register setting [" + setting.getKey() + "] twice");
if (setting.hasNodeScope() || setting.hasIndexScope()) {
if (setting.hasNodeScope()) {
if (nodeSettings.containsKey(setting.getKey())) {
throw new IllegalArgumentException("Cannot register setting [" + setting.getKey() + "] twice");
}
nodeSettings.put(setting.getKey(), setting);
}
nodeSettings.put(setting.getKey(), setting);
} else if (setting.hasIndexScope()) {
if (indexSettings.containsKey(setting.getKey())) {
throw new IllegalArgumentException("Cannot register setting [" + setting.getKey() + "] twice");
if (setting.hasIndexScope()) {
if (indexSettings.containsKey(setting.getKey())) {
throw new IllegalArgumentException("Cannot register setting [" + setting.getKey() + "] twice");
}
indexSettings.put(setting.getKey(), setting);
}
indexSettings.put(setting.getKey(), setting);
} else {
throw new IllegalArgumentException("No scope found for setting [" + setting.getKey() + "]");
}
Expand Down
19 changes: 16 additions & 3 deletions core/src/main/java/org/elasticsearch/index/IndexModule.java
Expand Up @@ -67,12 +67,13 @@
public final class IndexModule {

public static final Setting<String> INDEX_STORE_TYPE_SETTING =
new Setting<>("index.store.type", "", Function.identity(), Property.IndexScope);
new Setting<>("index.store.type", "", Function.identity(), Property.IndexScope, Property.NodeScope);
public static final String SIMILARITY_SETTINGS_PREFIX = "index.similarity";
public static final String INDEX_QUERY_CACHE = "index";
public static final String NONE_QUERY_CACHE = "none";
public static final Setting<String> INDEX_QUERY_CACHE_TYPE_SETTING =
new Setting<>("index.queries.cache.type", INDEX_QUERY_CACHE, Function.identity(), Property.IndexScope);

// for test purposes only
public static final Setting<Boolean> INDEX_QUERY_CACHE_EVERYTHING_SETTING =
Setting.boolSetting("index.queries.cache.everything", false, Property.IndexScope);
Expand All @@ -87,7 +88,7 @@ public final class IndexModule {
private final Map<String, BiFunction<String, Settings, SimilarityProvider>> similarities = new HashMap<>();
private final Map<String, BiFunction<IndexSettings, IndexStoreConfig, IndexStore>> storeTypes = new HashMap<>();
private final Map<String, BiFunction<IndexSettings, IndicesQueryCache, QueryCache>> queryCaches = new HashMap<>();

private final SetOnce<String> forceQueryCacheType = new SetOnce<>();

public IndexModule(IndexSettings indexSettings, IndexStoreConfig indexStoreConfig, AnalysisRegistry analysisRegistry) {
this.indexStoreConfig = indexStoreConfig;
Expand Down Expand Up @@ -265,11 +266,23 @@ public IndexService newIndexService(NodeEnvironment environment, IndexService.Sh
}
indexSettings.getScopedSettings().addSettingsUpdateConsumer(IndexStore.INDEX_STORE_THROTTLE_TYPE_SETTING, store::setType);
indexSettings.getScopedSettings().addSettingsUpdateConsumer(IndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING, store::setMaxRate);
final String queryCacheType = indexSettings.getValue(INDEX_QUERY_CACHE_TYPE_SETTING);
final String queryCacheType = forceQueryCacheType.get() != null ? forceQueryCacheType.get() : indexSettings.getValue(INDEX_QUERY_CACHE_TYPE_SETTING);
final BiFunction<IndexSettings, IndicesQueryCache, QueryCache> queryCacheProvider = queryCaches.get(queryCacheType);
final QueryCache queryCache = queryCacheProvider.apply(indexSettings, indicesQueryCache);
return new IndexService(indexSettings, environment, new SimilarityService(indexSettings, similarities), shardStoreDeleter, analysisRegistry, engineFactory.get(),
servicesProvider, queryCache, store, eventListener, searcherWrapperFactory, mapperRegistry, indicesFieldDataCache, listeners);
}

/**
* Forces a certain query cache type. If this is set
* the given cache type is overriding the default as well as the type
* set on the index level.
* NOTE: this can only be set once
*
* @see #INDEX_QUERY_CACHE_TYPE_SETTING
*/
public void forceQueryCacheType(String type) {
this.forceQueryCacheType.set(type);
}

}
Expand Up @@ -85,6 +85,10 @@ public AnalysisRegistry(HunspellService hunspellService, Environment environment
this.analyzers = Collections.unmodifiableMap(analyzerBuilder);
}

public HunspellService getHunspellService() {
return hunspellService;
}

/**
* Returns a registered {@link TokenizerFactory} provider by name or <code>null</code> if the tokenizer was not registered
*/
Expand Down
34 changes: 22 additions & 12 deletions core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java
Expand Up @@ -40,6 +40,8 @@
import org.elasticsearch.indices.IndexingMemoryController;
import org.elasticsearch.threadpool.ThreadPool;

import java.util.function.Function;

/*
* Holds all the configuration that is used to create an {@link Engine}.
* Once {@link Engine} has been created with this object, changes to this
Expand Down Expand Up @@ -69,20 +71,23 @@ public final class EngineConfig {
/**
* Index setting to change the low level lucene codec used for writing new segments.
* This setting is <b>not</b> realtime updateable.
* This setting is also settable on the node and the index level, it's commonly used in hot/cold node archs where index is likely
* allocated on both `kind` of nodes.
*/
public static final Setting<String> INDEX_CODEC_SETTING = new Setting<>("index.codec", "default", (s) -> {
switch(s) {
public static final Setting<String> INDEX_CODEC_SETTING = new Setting<>("index.codec", "default", s -> {
switch (s) {
case "default":
case "best_compression":
case "lucene_default":
return s;
default:
if (Codec.availableCodecs().contains(s) == false) { // we don't error message the not officially supported ones
throw new IllegalArgumentException("unknown value for [index.codec] must be one of [default, best_compression] but was: " + s);
throw new IllegalArgumentException(
"unknown value for [index.codec] must be one of [default, best_compression] but was: " + s);
}
return s;
}
}, Property.IndexScope);
}, Property.IndexScope, Property.NodeScope);

/** if set to true the engine will start even if the translog id in the commit point can not be found */
public static final String INDEX_FORCE_NEW_TRANSLOG = "index.engine.force_new_translog";
Expand All @@ -97,7 +102,8 @@ public EngineConfig(ShardId shardId, ThreadPool threadPool,
IndexSettings indexSettings, Engine.Warmer warmer, Store store, SnapshotDeletionPolicy deletionPolicy,
MergePolicy mergePolicy,Analyzer analyzer,
Similarity similarity, CodecService codecService, Engine.EventListener eventListener,
TranslogRecoveryPerformer translogRecoveryPerformer, QueryCache queryCache, QueryCachingPolicy queryCachingPolicy, TranslogConfig translogConfig, TimeValue flushMergesAfter) {
TranslogRecoveryPerformer translogRecoveryPerformer, QueryCache queryCache, QueryCachingPolicy queryCachingPolicy,
TranslogConfig translogConfig, TimeValue flushMergesAfter) {
this.shardId = shardId;
final Settings settings = indexSettings.getSettings();
this.indexSettings = indexSettings;
Expand Down Expand Up @@ -138,19 +144,21 @@ public void setEnableGcDeletes(boolean enableGcDeletes) {
}

/**
* Returns the initial index buffer size. This setting is only read on startup and otherwise controlled by {@link IndexingMemoryController}
* Returns the initial index buffer size. This setting is only read on startup and otherwise controlled
* by {@link IndexingMemoryController}
*/
public ByteSizeValue getIndexingBufferSize() {
return indexingBufferSize;
}

/**
* Returns <code>true</code> iff delete garbage collection in the engine should be enabled. This setting is updateable
* in realtime and forces a volatile read. Consumers can safely read this value directly go fetch it's latest value. The default is <code>true</code>
* in realtime and forces a volatile read. Consumers can safely read this value directly go fetch it's latest value.
* The default is <code>true</code>
* <p>
* Engine GC deletion if enabled collects deleted documents from in-memory realtime data structures after a certain amount of
* time ({@link IndexSettings#getGcDeletesInMillis()} if enabled. Before deletes are GCed they will cause re-adding the document that was deleted
* to fail.
* time ({@link IndexSettings#getGcDeletesInMillis()} if enabled. Before deletes are GCed they will cause re-adding the document
* that was deleted to fail.
* </p>
*/
public boolean isEnableGcDeletes() {
Expand All @@ -168,7 +176,8 @@ public Codec getCodec() {
}

/**
* Returns a thread-pool mainly used to get estimated time stamps from {@link org.elasticsearch.threadpool.ThreadPool#estimatedTimeInMillis()} and to schedule
* Returns a thread-pool mainly used to get estimated time stamps from
* {@link org.elasticsearch.threadpool.ThreadPool#estimatedTimeInMillis()} and to schedule
* async force merge calls on the {@link org.elasticsearch.threadpool.ThreadPool.Names#FORCE_MERGE} thread-pool
*/
public ThreadPool getThreadPool() {
Expand All @@ -183,8 +192,9 @@ public Engine.Warmer getWarmer() {
}

/**
* Returns the {@link org.elasticsearch.index.store.Store} instance that provides access to the {@link org.apache.lucene.store.Directory}
* used for the engines {@link org.apache.lucene.index.IndexWriter} to write it's index files to.
* Returns the {@link org.elasticsearch.index.store.Store} instance that provides access to the
* {@link org.apache.lucene.store.Directory} used for the engines {@link org.apache.lucene.index.IndexWriter} to write it's index files
* to.
* <p>
* Note: In order to use this instance the consumer needs to increment the stores reference before it's used the first time and hold
* it's reference until it's not needed anymore.
Expand Down
Expand Up @@ -61,8 +61,9 @@ public class FsDirectoryService extends DirectoryService implements StoreRateLim
return SimpleFSLockFactory.INSTANCE;
default:
throw new IllegalArgumentException("unrecognized [index.store.fs.fs_lock] \"" + s + "\": must be native or simple");
}
}, Property.IndexScope);
} // can we set on both - node and index level, some nodes might be running on NFS so they might need simple rather than native
}, Property.IndexScope, Property.NodeScope);

private final CounterMetric rateLimitingTimeInNanos = new CounterMetric();
private final ShardPath path;

Expand Down Expand Up @@ -108,7 +109,8 @@ public void onPause(long nanos) {


protected Directory newFSDirectory(Path location, LockFactory lockFactory) throws IOException {
final String storeType = indexSettings.getSettings().get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.DEFAULT.getSettingsKey());
final String storeType = indexSettings.getSettings().get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(),
IndexModule.Type.DEFAULT.getSettingsKey());
if (IndexModule.Type.FS.match(storeType) || IndexModule.Type.DEFAULT.match(storeType)) {
final FSDirectory open = FSDirectory.open(location, lockFactory); // use lucene defaults
if (open instanceof MMapDirectory && Constants.WINDOWS == false) {
Expand Down
Expand Up @@ -160,15 +160,21 @@ public void registerHunspellDictionary(String name, Dictionary dictionary) {
@Override
protected void configure() {
try {
HunspellService service = new HunspellService(environment.settings(), environment, knownDictionaries);
AnalysisRegistry registry = new AnalysisRegistry(service, environment, charFilters, tokenFilters, tokenizers, analyzers);
bind(HunspellService.class).toInstance(service);
AnalysisRegistry registry = buildRegistry();
bind(HunspellService.class).toInstance(registry.getHunspellService());
bind(AnalysisRegistry.class).toInstance(registry);
} catch (IOException e) {
throw new ElasticsearchException("failed to load hunspell service", e);
}
}

/**
* Builds an {@link AnalysisRegistry} from the current configuration.
*/
public AnalysisRegistry buildRegistry() throws IOException {
return new AnalysisRegistry(new HunspellService(environment.settings(), environment, knownDictionaries), environment, charFilters, tokenFilters, tokenizers, analyzers);
}

/**
* AnalysisProvider is the basic factory interface for registering analysis components like:
* <ul>
Expand Down
Expand Up @@ -99,7 +99,6 @@ public void testCommitStats() throws Exception {
assertThat(commitStats.getId(), notNullValue());
assertThat(commitStats.getUserData(), hasKey(Translog.TRANSLOG_GENERATION_KEY));
assertThat(commitStats.getUserData(), hasKey(Translog.TRANSLOG_UUID_KEY));

}
}

Expand Down

0 comments on commit e91a141

Please sign in to comment.