diff --git a/dev-tools/scripts/SOLR-2452.patch.hack.pl b/dev-tools/scripts/SOLR-2452.patch.hack.pl index 7da5c543c4b6..9f0d3394960e 100755 --- a/dev-tools/scripts/SOLR-2452.patch.hack.pl +++ b/dev-tools/scripts/SOLR-2452.patch.hack.pl @@ -167,8 +167,8 @@ 'solr/core/src/test-files/solr/conf/schema-replication1.xml' => 'solr/solrj/src/test-files/solrj/solr/conf/schema-replication1.xml', - 'solr/core/src/test-files/solr/conf/solrconfig-slave1.xml' - => 'solr/solrj/src/test-files/solrj/solr/conf/solrconfig-slave1.xml', + 'solr/core/src/test-files/solr/conf/solrconfig-secondary1.xml' + => 'solr/solrj/src/test-files/solrj/solr/conf/solrconfig-secondary1.xml', ); my $diff; diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt index b46ccfea8860..978baf1dc827 100644 --- a/solr/CHANGES.txt +++ b/solr/CHANGES.txt @@ -21,6 +21,7 @@ New Features * SOLR-14588: Introduce Circuit Breaker Infrastructure and a JVM heap usage memory tracking circuit breaker implementation (Atri Sharma) +* SOLR-14702: Pull oppressive language out the project (marcussorealheis via Kevin Risden) Improvements ---------------------- diff --git a/solr/contrib/dataimporthandler-extras/src/test-files/dihextras/solr/collection1/conf/dataimport-solrconfig.xml b/solr/contrib/dataimporthandler-extras/src/test-files/dihextras/solr/collection1/conf/dataimport-solrconfig.xml index f9f53045840b..85304ebe3dc8 100644 --- a/solr/contrib/dataimporthandler-extras/src/test-files/dihextras/solr/collection1/conf/dataimport-solrconfig.xml +++ b/solr/contrib/dataimporthandler-extras/src/test-files/dihextras/solr/collection1/conf/dataimport-solrconfig.xml @@ -162,7 +162,7 @@ + 1-2 for read-only secondaries, higher for primaries w/o cache warming. --> 4 diff --git a/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/contentstream-solrconfig.xml b/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/contentstream-solrconfig.xml index d3ee34c0bc81..fcb6d896e1ba 100644 --- a/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/contentstream-solrconfig.xml +++ b/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/contentstream-solrconfig.xml @@ -162,7 +162,7 @@ + 1-2 for read-only secondaries, higher for primaries w/o cache warming. --> 4 diff --git a/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/dataimport-nodatasource-solrconfig.xml b/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/dataimport-nodatasource-solrconfig.xml index 2fd15b950987..03781f92e625 100644 --- a/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/dataimport-nodatasource-solrconfig.xml +++ b/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/dataimport-nodatasource-solrconfig.xml @@ -164,7 +164,7 @@ + 1-2 for read-only secondaries, higher for primaries w/o cache warming. --> 4 diff --git a/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/dataimport-solrconfig.xml b/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/dataimport-solrconfig.xml index ec6e6a9e8393..ace92264d990 100644 --- a/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/dataimport-solrconfig.xml +++ b/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/dataimport-solrconfig.xml @@ -162,7 +162,7 @@ + 1-2 for read-only secondaries, higher for primaries w/o cache warming. --> 4 diff --git a/solr/contrib/dataimporthandler/src/test-files/solr/configsets/dihconfigset/conf/solrconfig.xml b/solr/contrib/dataimporthandler/src/test-files/solr/configsets/dihconfigset/conf/solrconfig.xml index ec6e6a9e8393..ace92264d990 100644 --- a/solr/contrib/dataimporthandler/src/test-files/solr/configsets/dihconfigset/conf/solrconfig.xml +++ b/solr/contrib/dataimporthandler/src/test-files/solr/configsets/dihconfigset/conf/solrconfig.xml @@ -162,7 +162,7 @@ + 1-2 for read-only secondaries, higher for primaries w/o cache warming. --> 4 diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestContentStreamDataSource.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestContentStreamDataSource.java index 34e50e00c7e3..a6bd5ef6d628 100644 --- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestContentStreamDataSource.java +++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestContentStreamDataSource.java @@ -120,7 +120,7 @@ private static class SolrInstance { File dataDir; /** - * if masterPort is null, this instance is a master -- otherwise this instance is a slave, and assumes the master is + * if primaryPort is null, this instance is a primary -- otherwise this instance is a secondary, and assumes the primary is * on localhost at the specified port. */ public SolrInstance(String name, Integer port) { diff --git a/solr/contrib/prometheus-exporter/conf/solr-exporter-config.xml b/solr/contrib/prometheus-exporter/conf/solr-exporter-config.xml index b043835ec19f..feef851e1b85 100644 --- a/solr/contrib/prometheus-exporter/conf/solr-exporter-config.xml +++ b/solr/contrib/prometheus-exporter/conf/solr-exporter-config.xml @@ -985,7 +985,7 @@ (if $parent_key_item_len == 5 then $parent_key_items[3] else "" end) as $shard | (if $parent_key_item_len == 5 then $parent_key_items[4] else "" end) as $replica | (if $parent_key_item_len == 5 then ($collection + "_" + $shard + "_" + $replica) else $core end) as $core | - $parent.value | to_entries | .[] | select(.key == "REPLICATION./replication.isMaster") as $object | + $parent.value | to_entries | .[] | select(.key == "REPLICATION./replication.isPrimary") as $object | $object.key | split(".")[0] as $category | $object.key | split(".")[1] as $handler | (if $object.value == true then 1.0 else 0.0 end) as $value | @@ -1018,13 +1018,13 @@ (if $parent_key_item_len == 5 then $parent_key_items[3] else "" end) as $shard | (if $parent_key_item_len == 5 then $parent_key_items[4] else "" end) as $replica | (if $parent_key_item_len == 5 then ($collection + "_" + $shard + "_" + $replica) else $core end) as $core | - $parent.value | to_entries | .[] | select(.key == "REPLICATION./replication.isSlave") as $object | + $parent.value | to_entries | .[] | select(.key == "REPLICATION./replication.isSecondary") as $object | $object.key | split(".")[0] as $category | $object.key | split(".")[1] as $handler | (if $object.value == true then 1.0 else 0.0 end) as $value | if $parent_key_item_len == 3 then { - name: "solr_metrics_core_replication_slave", + name: "solr_metrics_core_replication_secondary", type: "GAUGE", help: "See following URL: https://lucene.apache.org/solr/guide/metrics-reporting.html", label_names: ["category", "handler", "core"], @@ -1033,7 +1033,7 @@ } else { - name: "solr_metrics_core_replication_slave", + name: "solr_metrics_core_replication_secondary", type: "GAUGE", help: "See following URL: https://lucene.apache.org/solr/guide/metrics-reporting.html", label_names: ["category", "handler", "core", "collection", "shard", "replica"], diff --git a/solr/contrib/prometheus-exporter/src/test-files/conf/prometheus-solr-exporter-integration-test-config.xml b/solr/contrib/prometheus-exporter/src/test-files/conf/prometheus-solr-exporter-integration-test-config.xml index 6b306c99e929..1df6c94a86d1 100644 --- a/solr/contrib/prometheus-exporter/src/test-files/conf/prometheus-solr-exporter-integration-test-config.xml +++ b/solr/contrib/prometheus-exporter/src/test-files/conf/prometheus-solr-exporter-integration-test-config.xml @@ -989,7 +989,7 @@ (if $parent_key_item_len == 5 then $parent_key_items[3] else "" end) as $shard | (if $parent_key_item_len == 5 then $parent_key_items[4] else "" end) as $replica | (if $parent_key_item_len == 5 then ($collection + "_" + $shard + "_" + $replica) else $core end) as $core | - $parent.value | to_entries | .[] | select(.key == "REPLICATION./replication.isMaster") as $object | + $parent.value | to_entries | .[] | select(.key == "REPLICATION./replication.isPrimary") as $object | $object.key | split(".")[0] as $category | $object.key | split(".")[1] as $handler | (if $object.value == true then 1.0 else 0.0 end) as $value | @@ -1022,13 +1022,13 @@ (if $parent_key_item_len == 5 then $parent_key_items[3] else "" end) as $shard | (if $parent_key_item_len == 5 then $parent_key_items[4] else "" end) as $replica | (if $parent_key_item_len == 5 then ($collection + "_" + $shard + "_" + $replica) else $core end) as $core | - $parent.value | to_entries | .[] | select(.key == "REPLICATION./replication.isSlave") as $object | + $parent.value | to_entries | .[] | select(.key == "REPLICATION./replication.isSecondary") as $object | $object.key | split(".")[0] as $category | $object.key | split(".")[1] as $handler | (if $object.value == true then 1.0 else 0.0 end) as $value | if $parent_key_item_len == 3 then { - name: "solr_metrics_core_replication_slave", + name: "solr_metrics_core_replication_secondary", type: "GAUGE", help: "See following URL: https://lucene.apache.org/solr/guide/metrics-reporting.html", label_names: ["category", "handler", "core"], @@ -1037,7 +1037,7 @@ } else { - name: "solr_metrics_core_replication_slave", + name: "solr_metrics_core_replication_secondary", type: "GAUGE", help: "See following URL: https://lucene.apache.org/solr/guide/metrics-reporting.html", label_names: ["category", "handler", "core", "collection", "shard", "replica"], diff --git a/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java b/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java index 2be35fbcbd2c..40c60825493a 100644 --- a/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java +++ b/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java @@ -239,7 +239,7 @@ final private void replicate(String nodeName, SolrCore core, ZkNodeProps leaderp } ModifiableSolrParams solrParams = new ModifiableSolrParams(); - solrParams.set(ReplicationHandler.MASTER_URL, leaderUrl); + solrParams.set(ReplicationHandler.PRIMARY_URL, leaderUrl); solrParams.set(ReplicationHandler.SKIP_COMMIT_ON_MASTER_VERSION_ZERO, replicaType == Replica.Type.TLOG); // always download the tlogs from the leader when running with cdcr enabled. We need to have all the tlogs // to ensure leader failover doesn't cause missing docs on the target diff --git a/solr/core/src/java/org/apache/solr/cloud/ReplicateFromLeader.java b/solr/core/src/java/org/apache/solr/cloud/ReplicateFromLeader.java index 17a6ec38b975..549df21ecad9 100644 --- a/solr/core/src/java/org/apache/solr/cloud/ReplicateFromLeader.java +++ b/solr/core/src/java/org/apache/solr/cloud/ReplicateFromLeader.java @@ -76,12 +76,12 @@ public void startReplication(boolean switchTransactionLog) throws InterruptedExc } log.info("Will start replication from leader with poll interval: {}", pollIntervalStr ); - NamedList slaveConfig = new NamedList<>(); - slaveConfig.add("fetchFromLeader", Boolean.TRUE); - slaveConfig.add(ReplicationHandler.SKIP_COMMIT_ON_MASTER_VERSION_ZERO, switchTransactionLog); - slaveConfig.add("pollInterval", pollIntervalStr); + NamedList secondaryConfig = new NamedList<>(); + secondaryConfig.add("fetchFromLeader", Boolean.TRUE); + secondaryConfig.add(ReplicationHandler.SKIP_COMMIT_ON_MASTER_VERSION_ZERO, switchTransactionLog); + secondaryConfig.add("pollInterval", pollIntervalStr); NamedList replicationConfig = new NamedList<>(); - replicationConfig.add("slave", slaveConfig); + replicationConfig.add("secondary", secondaryConfig); String lastCommitVersion = getCommitVersion(core); if (lastCommitVersion != null) { diff --git a/solr/core/src/java/org/apache/solr/handler/CdcrReplicatorManager.java b/solr/core/src/java/org/apache/solr/handler/CdcrReplicatorManager.java index 1f9d1f94cb3f..86f30d2f2f8e 100644 --- a/solr/core/src/java/org/apache/solr/handler/CdcrReplicatorManager.java +++ b/solr/core/src/java/org/apache/solr/handler/CdcrReplicatorManager.java @@ -369,7 +369,7 @@ private BootstrapStatus sendBootstrapCommand() throws InterruptedException { log.info("Attempting to bootstrap target collection: {} shard: {} leader: {}", targetCollection, shard, leaderCoreUrl); try { @SuppressWarnings({"rawtypes"}) - NamedList response = sendCdcrCommand(client, CdcrParams.CdcrAction.BOOTSTRAP, ReplicationHandler.MASTER_URL, myCoreUrl); + NamedList response = sendCdcrCommand(client, CdcrParams.CdcrAction.BOOTSTRAP, ReplicationHandler.PRIMARY_URL, myCoreUrl); log.debug("CDCR Bootstrap response: {}", response); String status = response.get(RESPONSE_STATUS).toString(); return BootstrapStatus.valueOf(status.toUpperCase(Locale.ROOT)); diff --git a/solr/core/src/java/org/apache/solr/handler/CdcrRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/CdcrRequestHandler.java index 8e77a84092e3..5816aaa70d57 100644 --- a/solr/core/src/java/org/apache/solr/handler/CdcrRequestHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/CdcrRequestHandler.java @@ -652,8 +652,8 @@ private void handleBootstrapAction(SolrQueryRequest req, SolrQueryResponse rsp) coreState.setCdcrBootstrapRunning(true); latch.countDown(); // free the latch as current bootstrap is executing //running.set(true); - String masterUrl = req.getParams().get(ReplicationHandler.MASTER_URL); - BootstrapCallable bootstrapCallable = new BootstrapCallable(masterUrl, core); + String primaryUrl = req.getParams().get(ReplicationHandler.PRIMARY_URL); + BootstrapCallable bootstrapCallable = new BootstrapCallable(primaryUrl, core); coreState.setCdcrBootstrapCallable(bootstrapCallable); Future bootstrapFuture = core.getCoreContainer().getUpdateShardHandler().getRecoveryExecutor() .submit(bootstrapCallable); @@ -733,12 +733,12 @@ private void handleBootstrapStatus(SolrQueryRequest req, SolrQueryResponse rsp) } static class BootstrapCallable implements Callable, Closeable { - private final String masterUrl; + private final String primaryUrl; private final SolrCore core; private volatile boolean closed = false; - BootstrapCallable(String masterUrl, SolrCore core) { - this.masterUrl = masterUrl; + BootstrapCallable(String primaryUrl, SolrCore core) { + this.primaryUrl = primaryUrl; this.core = core; } @@ -762,7 +762,7 @@ public Boolean call() throws Exception { // to receive any updates from the source during bootstrap ulog.bufferUpdates(); try { - commitOnLeader(masterUrl); + commitOnLeader(primaryUrl); // use rep handler directly, so we can do this sync rather than async SolrRequestHandler handler = core.getRequestHandler(ReplicationHandler.PATH); ReplicationHandler replicationHandler = (ReplicationHandler) handler; @@ -773,7 +773,7 @@ public Boolean call() throws Exception { } ModifiableSolrParams solrParams = new ModifiableSolrParams(); - solrParams.set(ReplicationHandler.MASTER_URL, masterUrl); + solrParams.set(ReplicationHandler.PRIMARY_URL, primaryUrl); // we do not want the raw tlog files from the source solrParams.set(ReplicationHandler.TLOG_FILES, false); diff --git a/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java b/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java index e78028ef2932..86588687239d 100644 --- a/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java +++ b/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java @@ -112,7 +112,7 @@ /** *

Provides functionality of downloading changed index files as well as config files and a timer for scheduling fetches from the - * master.

+ * primary.

* * * @since solr 1.4 @@ -124,7 +124,7 @@ public class IndexFetcher { private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); - private String masterUrl; + private String primaryUrl; final ReplicationHandler replicationHandler; @@ -169,7 +169,7 @@ public class IndexFetcher { private boolean downloadTlogFiles = false; - private boolean skipCommitOnMasterVersionZero = true; + private boolean skipCommitOnPrimaryVersionZero = true; private boolean clearLocalIndexFirst = false; @@ -236,19 +236,19 @@ public IndexFetcher(@SuppressWarnings({"rawtypes"})final NamedList initArgs, fin if (fetchFromLeader != null && fetchFromLeader instanceof Boolean) { this.fetchFromLeader = (boolean) fetchFromLeader; } - Object skipCommitOnMasterVersionZero = initArgs.get(SKIP_COMMIT_ON_MASTER_VERSION_ZERO); - if (skipCommitOnMasterVersionZero != null && skipCommitOnMasterVersionZero instanceof Boolean) { - this.skipCommitOnMasterVersionZero = (boolean) skipCommitOnMasterVersionZero; + Object skipCommitOnPrimaryVersionZero = initArgs.get(SKIP_COMMIT_ON_MASTER_VERSION_ZERO); + if (skipCommitOnPrimaryVersionZero != null && skipCommitOnPrimaryVersionZero instanceof Boolean) { + this.skipCommitOnPrimaryVersionZero = (boolean) skipCommitOnPrimaryVersionZero; } - String masterUrl = (String) initArgs.get(MASTER_URL); - if (masterUrl == null && !this.fetchFromLeader) + String primaryUrl = (String) initArgs.get(PRIMARY_URL); + if (primaryUrl == null && !this.fetchFromLeader) throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, - "'masterUrl' is required for a slave"); - if (masterUrl != null && masterUrl.endsWith(ReplicationHandler.PATH)) { - masterUrl = masterUrl.substring(0, masterUrl.length()-12); - log.warn("'masterUrl' must be specified without the {} suffix", ReplicationHandler.PATH); + "'primaryUrl' is required for a secondary"); + if (primaryUrl != null && primaryUrl.endsWith(ReplicationHandler.PATH)) { + primaryUrl = primaryUrl.substring(0, primaryUrl.length()-12); + log.warn("'primaryUrl' must be specified without the {} suffix", ReplicationHandler.PATH); } - this.masterUrl = masterUrl; + this.primaryUrl = primaryUrl; this.replicationHandler = handler; String compress = (String) initArgs.get(COMPRESSION); @@ -256,7 +256,7 @@ public IndexFetcher(@SuppressWarnings({"rawtypes"})final NamedList initArgs, fin useExternalCompression = EXTERNAL.equals(compress); connTimeout = getParameter(initArgs, HttpClientUtil.PROP_CONNECTION_TIMEOUT, 30000, null); - // allow a master override for tests - you specify this in /replication slave section of solrconfig and some + // allow a primary override for tests - you specify this in /replication secondary section of solrconfig and some // test don't want to define this soTimeout = Integer.getInteger("solr.indexfetcher.sotimeout", -1); if (soTimeout == -1) { @@ -284,7 +284,7 @@ protected T getParameter(@SuppressWarnings({"rawtypes"})NamedList initArgs, } /** - * Gets the latest commit version and generation from the master + * Gets the latest commit version and generation from the primary */ @SuppressWarnings({"unchecked", "rawtypes"}) NamedList getLatestVersion() throws IOException { @@ -295,7 +295,7 @@ NamedList getLatestVersion() throws IOException { QueryRequest req = new QueryRequest(params); // TODO modify to use shardhandler - try (HttpSolrClient client = new Builder(masterUrl) + try (HttpSolrClient client = new Builder(primaryUrl) .withHttpClient(myHttpClient) .withConnectionTimeout(connTimeout) .withSocketTimeout(soTimeout) @@ -321,7 +321,7 @@ private void fetchFileList(long gen) throws IOException { QueryRequest req = new QueryRequest(params); // TODO modify to use shardhandler - try (HttpSolrClient client = new HttpSolrClient.Builder(masterUrl) + try (HttpSolrClient client = new HttpSolrClient.Builder(primaryUrl) .withHttpClient(myHttpClient) .withConnectionTimeout(connTimeout) .withSocketTimeout(soTimeout) @@ -355,12 +355,12 @@ IndexFetchResult fetchLatestIndex(boolean forceReplication) throws IOException, } /** - * This command downloads all the necessary files from master to install a index commit point. Only changed files are + * This command downloads all the necessary files from primary to install a index commit point. Only changed files are * downloaded. It also downloads the conf files (if they are modified). * * @param forceReplication force a replication in all cases * @param forceCoreReload force a core reload in all cases - * @return true on success, false if slave is already in sync + * @return true on success, false if secondary is already in sync * @throws IOException if an exception occurs */ IndexFetchResult fetchLatestIndex(boolean forceReplication, boolean forceCoreReload) throws IOException, InterruptedException { @@ -404,15 +404,15 @@ IndexFetchResult fetchLatestIndex(boolean forceReplication, boolean forceCoreRel } return IndexFetchResult.LEADER_IS_NOT_ACTIVE; } - if (!replica.getCoreUrl().equals(masterUrl)) { - masterUrl = replica.getCoreUrl(); - log.info("Updated masterUrl to {}", masterUrl); + if (!replica.getCoreUrl().equals(primaryUrl)) { + primaryUrl = replica.getCoreUrl(); + log.info("Updated primaryUrl to {}", primaryUrl); // TODO: Do we need to set forceReplication = true? } else { - log.debug("masterUrl didn't change"); + log.debug("primaryUrl didn't change"); } } - //get the current 'replicateable' index version in the master + //get the current 'replicateable' index version in the primary @SuppressWarnings({"rawtypes"}) NamedList response; try { @@ -420,10 +420,10 @@ IndexFetchResult fetchLatestIndex(boolean forceReplication, boolean forceCoreRel } catch (Exception e) { final String errorMsg = e.toString(); if (!Strings.isNullOrEmpty(errorMsg) && errorMsg.contains(INTERRUPT_RESPONSE_MESSAGE)) { - log.warn("Master at: {} is not available. Index fetch failed by interrupt. Exception: {}", masterUrl, errorMsg); + log.warn("Primary at: {} is not available. Index fetch failed by interrupt. Exception: {}", primaryUrl, errorMsg); return new IndexFetchResult(IndexFetchResult.FAILED_BY_INTERRUPT_MESSAGE, false, e); } else { - log.warn("Master at: {} is not available. Index fetch failed by exception: {}", masterUrl, errorMsg); + log.warn("Primary at: {} is not available. Index fetch failed by exception: {}", primaryUrl, errorMsg); return new IndexFetchResult(IndexFetchResult.FAILED_BY_EXCEPTION_MESSAGE, false, e); } } @@ -431,8 +431,8 @@ IndexFetchResult fetchLatestIndex(boolean forceReplication, boolean forceCoreRel long latestVersion = (Long) response.get(CMD_INDEX_VERSION); long latestGeneration = (Long) response.get(GENERATION); - log.info("Master's generation: {}", latestGeneration); - log.info("Master's version: {}", latestVersion); + log.info("Primary's generation: {}", latestGeneration); + log.info("Primary's version: {}", latestVersion); // TODO: make sure that getLatestCommit only returns commit points for the main index (i.e. no side-car indexes) IndexCommit commit = solrCore.getDeletionPolicy().getLatestCommit(); @@ -453,23 +453,23 @@ IndexFetchResult fetchLatestIndex(boolean forceReplication, boolean forceCoreRel } if (log.isInfoEnabled()) { - log.info("Slave's generation: {}", commit.getGeneration()); - log.info("Slave's version: {}", IndexDeletionPolicyWrapper.getCommitTimestamp(commit)); // logOK + log.info("Secondary's generation: {}", commit.getGeneration()); + log.info("Secondary's version: {}", IndexDeletionPolicyWrapper.getCommitTimestamp(commit)); // logOK } if (latestVersion == 0L) { if (commit.getGeneration() != 0) { // since we won't get the files for an empty index, // we just clear ours and commit - log.info("New index in Master. Deleting mine..."); + log.info("New index in Primary. Deleting mine..."); RefCounted iw = solrCore.getUpdateHandler().getSolrCoreState().getIndexWriter(solrCore); try { iw.get().deleteAll(); } finally { iw.decref(); } - assert TestInjection.injectDelayBeforeSlaveCommitRefresh(); - if (skipCommitOnMasterVersionZero) { + assert TestInjection.injectDelayBeforeSecondaryCommitRefresh(); + if (skipCommitOnPrimaryVersionZero) { openNewSearcherAndUpdateCommitPoint(); } else { SolrQueryRequest req = new LocalSolrQueryRequest(solrCore, new ModifiableSolrParams()); @@ -479,14 +479,14 @@ IndexFetchResult fetchLatestIndex(boolean forceReplication, boolean forceCoreRel //there is nothing to be replicated successfulInstall = true; - log.debug("Nothing to replicate, master's version is 0"); + log.debug("Nothing to replicate, primary's version is 0"); return IndexFetchResult.MASTER_VERSION_ZERO; } // TODO: Should we be comparing timestamps (across machines) here? if (!forceReplication && IndexDeletionPolicyWrapper.getCommitTimestamp(commit) == latestVersion) { - //master and slave are already in sync just return - log.info("Slave in sync with master."); + //primary and secondary are already in sync just return + log.info("Secondary in sync with primary."); successfulInstall = true; return IndexFetchResult.ALREADY_IN_SYNC; } @@ -498,11 +498,11 @@ IndexFetchResult fetchLatestIndex(boolean forceReplication, boolean forceCoreRel return IndexFetchResult.PEER_INDEX_COMMIT_DELETED; } if (log.isInfoEnabled()) { - log.info("Number of files in latest index in master: {}", filesToDownload.size()); + log.info("Number of files in latest index in primary: {}", filesToDownload.size()); } if (tlogFilesToDownload != null) { if (log.isInfoEnabled()) { - log.info("Number of tlog files in master: {}", tlogFilesToDownload.size()); + log.info("Number of tlog files in primary: {}", tlogFilesToDownload.size()); } } @@ -510,7 +510,7 @@ IndexFetchResult fetchLatestIndex(boolean forceReplication, boolean forceCoreRel fsyncService = ExecutorUtil.newMDCAwareSingleThreadExecutor(new SolrNamedThreadFactory("fsyncService")); // use a synchronized list because the list is read by other threads (to show details) filesDownloaded = Collections.synchronizedList(new ArrayList>()); - // if the generation of master is older than that of the slave , it means they are not compatible to be copied + // if the generation of primary is older than that of the secondary , it means they are not compatible to be copied // then a new index directory to be created and all the files need to be copied boolean isFullCopyNeeded = IndexDeletionPolicyWrapper .getCommitTimestamp(commit) >= latestVersion @@ -533,7 +533,7 @@ IndexFetchResult fetchLatestIndex(boolean forceReplication, boolean forceCoreRel try { - // We will compare all the index files from the master vs the index files on disk to see if there is a mismatch + // We will compare all the index files from the primary vs the index files on disk to see if there is a mismatch // in the metadata. If there is a mismatch for the same index file then we download the entire index // (except when differential copy is applicable) again. if (!isFullCopyNeeded && isIndexStale(indexDir)) { @@ -964,7 +964,7 @@ private void reloadCore() { } private void downloadConfFiles(List> confFilesToDownload, long latestGeneration) throws Exception { - log.info("Starting download of configuration files from master: {}", confFilesToDownload); + log.info("Starting download of configuration files from primary: {}", confFilesToDownload); confFilesDownloaded = Collections.synchronizedList(new ArrayList<>()); File tmpconfDir = new File(solrCore.getResourceLoader().getConfigDir(), "conf." + getDateAsStr(new Date())); try { @@ -993,7 +993,7 @@ private void downloadConfFiles(List> confFilesToDownload, lo * Download all the tlog files to the temp tlog directory. */ private long downloadTlogFiles(File tmpTlogDir, long latestGeneration) throws Exception { - log.info("Starting download of tlog files from master: {}", tlogFilesToDownload); + log.info("Starting download of tlog files from primary: {}", tlogFilesToDownload); tlogFilesDownloaded = Collections.synchronizedList(new ArrayList<>()); long bytesDownloaded = 0; @@ -1146,7 +1146,7 @@ private void deleteFilesInAdvance(Directory indexDir, String indexDirPath, long // after considering the files actually available locally we really don't need to do any delete return; } - log.info("This disk does not have enough space to download the index from leader/master. So cleaning up the local index. " + log.info("This disk does not have enough space to download the index from leader/primary. So cleaning up the local index. " + " This may lead to loss of data/or node if index replication fails in between"); //now we should disable searchers and index writers because this core will not have all the required files this.clearLocalIndexFirst = true; @@ -1247,7 +1247,7 @@ private static boolean slowFileExists(Directory dir, String fileName) throws IOE } /** - * All the files which are common between master and slave must have same size and same checksum else we assume + * All the files which are common between primary and secondary must have same size and same checksum else we assume * they are not compatible (stale). * * @return true if the index stale and we need to download a fresh copy, false otherwise. @@ -1480,11 +1480,11 @@ private String getDateAsStr(Date d) { private final Map confFileInfoCache = new HashMap<>(); /** - * The local conf files are compared with the conf files in the master. If they are same (by checksum) do not copy. + * The local conf files are compared with the conf files in the primary. If they are same (by checksum) do not copy. * - * @param confFilesToDownload The list of files obtained from master + * @param confFilesToDownload The list of files obtained from primary * - * @return a list of configuration files which have changed on the master and need to be downloaded. + * @return a list of configuration files which have changed on the primary and need to be downloaded. */ @SuppressWarnings({"unchecked"}) private Collection> getModifiedConfFiles(List> confFilesToDownload) { @@ -1496,7 +1496,7 @@ private Collection> getModifiedConfFiles(List map : confFilesToDownload) { - //if alias is present that is the name the file may have in the slave + //if alias is present that is the name the file may have in the secondary String name = (String) (map.get(ALIAS) == null ? map.get(NAME) : map.get(ALIAS)); nameVsFile.put(name, map); names.add(name, null); @@ -1752,7 +1752,7 @@ private int fetchPackets(FastInputStream fis) throws Exception { } //then read the packet of bytes fis.readFully(buf, 0, packetSize); - //compare the checksum as sent from the master + //compare the checksum as sent from the primary if (includeChecksum) { checksum.reset(); checksum.update(buf, 0, packetSize); @@ -1870,7 +1870,7 @@ private FastInputStream getStream() throws IOException { InputStream is = null; // TODO use shardhandler - try (HttpSolrClient client = new Builder(masterUrl) + try (HttpSolrClient client = new Builder(primaryUrl) .withHttpClient(myHttpClient) .withResponseParser(null) .withConnectionTimeout(connTimeout) @@ -1979,11 +1979,11 @@ private class LocalFsFileFetcher extends FileFetcher { NamedList getDetails() throws IOException, SolrServerException { ModifiableSolrParams params = new ModifiableSolrParams(); params.set(COMMAND, CMD_DETAILS); - params.set("slave", false); + params.set("secondary", false); params.set(CommonParams.QT, ReplicationHandler.PATH); // TODO use shardhandler - try (HttpSolrClient client = new HttpSolrClient.Builder(masterUrl) + try (HttpSolrClient client = new HttpSolrClient.Builder(primaryUrl) .withHttpClient(myHttpClient) .withConnectionTimeout(connTimeout) .withSocketTimeout(soTimeout) @@ -1998,8 +1998,8 @@ public void destroy() { HttpClientUtil.close(myHttpClient); } - String getMasterUrl() { - return masterUrl; + String getPrimaryUrl() { + return primaryUrl; } private static final int MAX_RETRIES = 5; diff --git a/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java b/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java index b1b16b0e2413..d890e2bce9e2 100644 --- a/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java @@ -111,15 +111,15 @@ import static org.apache.solr.common.params.CommonParams.NAME; /** - *

A Handler which provides a REST API for replication and serves replication requests from Slaves.

- *

When running on the master, it provides the following commands

  1. Get the current replicable index version + *

    A Handler which provides a REST API for replication and serves replication requests from Secondaries.

    + *

    When running on the primary, it provides the following commands

    1. Get the current replicable index version * (command=indexversion)
    2. Get the list of files for a given index version * (command=filelist&indexversion=<VERSION>)
    3. Get full or a part (chunk) of a given index or a config * file (command=filecontent&file=<FILE_NAME>) You can optionally specify an offset and length to get that * chunk of the file. You can request a configuration file by using "cf" parameter instead of the "file" parameter.
    4. - *
    5. Get status/statistics (command=details)

    When running on the slave, it provides the following + *

  2. Get status/statistics (command=details)

When running on the secondary, it provides the following * commands

  1. Perform an index fetch now (command=snappull)
  2. Get status/statistics (command=details)
  3. - *
  4. Abort an index fetch (command=abort)
  5. Enable/Disable polling the master for new versions (command=enablepoll + *
  6. Abort an index fetch (command=abort)
  7. Enable/Disable polling the primary for new versions (command=enablepoll * or command=disablepoll)
* * @@ -185,9 +185,9 @@ public String toString() { private NamedList confFileNameAlias = new NamedList<>(); - private boolean isMaster = false; + private boolean isPrimary = false; - private boolean isSlave = false; + private boolean isSecondary = false; private boolean replicateOnOptimize = false; @@ -240,7 +240,7 @@ public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throw final SolrParams solrParams = req.getParams(); String command = solrParams.required().get(COMMAND); - // This command does not give the current index version of the master + // This command does not give the current index version of the primary // It gives the current 'replicateable' index version if (command.equals(CMD_INDEX_VERSION)) { IndexCommit commitPoint = indexCommitPoint; // make a copy so it won't change @@ -291,12 +291,12 @@ public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throw if (abortFetch()) { rsp.add(STATUS, OK_STATUS); } else { - reportErrorOnResponse(rsp, "No slave configured", null); + reportErrorOnResponse(rsp, "No secondary configured", null); } } else if (command.equals(CMD_SHOW_COMMITS)) { populateCommitInfo(rsp); } else if (command.equals(CMD_DETAILS)) { - getReplicationDetails(rsp, solrParams.getBool("slave", true)); + getReplicationDetails(rsp, solrParams.getBool("secondary", true)); } else if (CMD_ENABLE_REPL.equalsIgnoreCase(command)) { replicationEnabled.set(true); rsp.add(STATUS, OK_STATUS); @@ -337,9 +337,9 @@ private void deleteSnapshot(ModifiableSolrParams params, SolrQueryResponse rsp) } private void fetchIndex(SolrParams solrParams, SolrQueryResponse rsp) throws InterruptedException { - String masterUrl = solrParams.get(MASTER_URL); - if (!isSlave && masterUrl == null) { - reportErrorOnResponse(rsp, "No slave configured or no 'masterUrl' specified", null); + String primaryUrl = solrParams.get(PRIMARY_URL); + if (!isSecondary && primaryUrl == null) { + reportErrorOnResponse(rsp, "No secondary configured or no 'primaryUrl' specified", null); return; } final SolrParams paramsCopy = new ModifiableSolrParams(solrParams); @@ -406,7 +406,7 @@ static Long getCheckSum(Checksum checksum, File f) { private volatile IndexFetcher currentIndexFetcher; public IndexFetchResult doFetch(SolrParams solrParams, boolean forceReplication) { - String masterUrl = solrParams == null ? null : solrParams.get(MASTER_URL); + String primaryUrl = solrParams == null ? null : solrParams.get(PRIMARY_URL); if (!indexFetchLock.tryLock()) return IndexFetchResult.LOCK_OBTAIN_FAILED; if (core.getCoreContainer().isShutDown()) { @@ -414,7 +414,7 @@ public IndexFetchResult doFetch(SolrParams solrParams, boolean forceReplication) return IndexFetchResult.CONTAINER_IS_SHUTTING_DOWN; } try { - if (masterUrl != null) { + if (primaryUrl != null) { if (currentIndexFetcher != null && currentIndexFetcher != pollingIndexFetcher) { currentIndexFetcher.destroy(); } @@ -826,7 +826,7 @@ private void disablePoll(SolrQueryResponse rsp) { log.info("inside disable poll, value of pollDisabled = {}", pollDisabled); rsp.add(STATUS, OK_STATUS); } else { - reportErrorOnResponse(rsp, "No slave configured", null); + reportErrorOnResponse(rsp, "No secondary configured", null); } } @@ -836,7 +836,7 @@ private void enablePoll(SolrQueryResponse rsp) { log.info("inside enable poll, value of pollDisabled = {}", pollDisabled); rsp.add(STATUS, OK_STATUS); } else { - reportErrorOnResponse(rsp, "No slave configured", null); + reportErrorOnResponse(rsp, "No secondary configured", null); } } @@ -871,7 +871,7 @@ public Category getCategory() { @Override public String getDescription() { - return "ReplicationHandler provides replication of index and configuration files from Master to Slaves"; + return "ReplicationHandler provides replication of index and configuration files from Primary to Secondaries"; } /** @@ -897,14 +897,14 @@ public void initializeMetrics(SolrMetricsContext parentContext, String scope) { true, GENERATION, getCategory().toString(), scope); solrMetricsContext.gauge(() -> (core != null && !core.isClosed() ? core.getIndexDir() : ""), true, "indexPath", getCategory().toString(), scope); - solrMetricsContext.gauge(() -> isMaster, - true, "isMaster", getCategory().toString(), scope); - solrMetricsContext.gauge(() -> isSlave, - true, "isSlave", getCategory().toString(), scope); + solrMetricsContext.gauge(() -> isPrimary, + true, "isPrimary", getCategory().toString(), scope); + solrMetricsContext.gauge(() -> isSecondary, + true, "isSecondary", getCategory().toString(), scope); final MetricsMap fetcherMap = new MetricsMap((detailed, map) -> { IndexFetcher fetcher = currentIndexFetcher; if (fetcher != null) { - map.put(MASTER_URL, fetcher.getMasterUrl()); + map.put(PRIMARY_URL, fetcher.getPrimaryUrl()); if (getPollInterval() != null) { map.put(POLL_INTERVAL, getPollInterval()); } @@ -930,11 +930,11 @@ public void initializeMetrics(SolrMetricsContext parentContext, String scope) { } }); solrMetricsContext.gauge(fetcherMap, true, "fetcher", getCategory().toString(), scope); - solrMetricsContext.gauge(() -> isMaster && includeConfFiles != null ? includeConfFiles : "", + solrMetricsContext.gauge(() -> isPrimary && includeConfFiles != null ? includeConfFiles : "", true, "confFilesToReplicate", getCategory().toString(), scope); - solrMetricsContext.gauge(() -> isMaster ? getReplicateAfterStrings() : Collections.emptyList(), + solrMetricsContext.gauge(() -> isPrimary ? getReplicateAfterStrings() : Collections.emptyList(), true, REPLICATE_AFTER, getCategory().toString(), scope); - solrMetricsContext.gauge( () -> isMaster && replicationEnabled.get(), + solrMetricsContext.gauge( () -> isPrimary && replicationEnabled.get(), true, "replicationEnabled", getCategory().toString(), scope); } @@ -942,76 +942,76 @@ public void initializeMetrics(SolrMetricsContext parentContext, String scope) { /** * Used for showing statistics and progress information. */ - private NamedList getReplicationDetails(SolrQueryResponse rsp, boolean showSlaveDetails) { + private NamedList getReplicationDetails(SolrQueryResponse rsp, boolean showSecondaryDetails) { NamedList details = new SimpleOrderedMap<>(); - NamedList master = new SimpleOrderedMap<>(); - NamedList slave = new SimpleOrderedMap<>(); + NamedList primary = new SimpleOrderedMap<>(); + NamedList secondary = new SimpleOrderedMap<>(); details.add("indexSize", NumberUtils.readableSize(core.getIndexSize())); details.add("indexPath", core.getIndexDir()); details.add(CMD_SHOW_COMMITS, getCommits()); - details.add("isMaster", String.valueOf(isMaster)); - details.add("isSlave", String.valueOf(isSlave)); + details.add("isPrimary", String.valueOf(isPrimary)); + details.add("isSecondary", String.valueOf(isSecondary)); CommitVersionInfo vInfo = getIndexVersion(); details.add("indexVersion", null == vInfo ? 0 : vInfo.version); details.add(GENERATION, null == vInfo ? 0 : vInfo.generation); IndexCommit commit = indexCommitPoint; // make a copy so it won't change - if (isMaster) { - if (includeConfFiles != null) master.add(CONF_FILES, includeConfFiles); - master.add(REPLICATE_AFTER, getReplicateAfterStrings()); - master.add("replicationEnabled", String.valueOf(replicationEnabled.get())); + if (isPrimary) { + if (includeConfFiles != null) primary.add(CONF_FILES, includeConfFiles); + primary.add(REPLICATE_AFTER, getReplicateAfterStrings()); + primary.add("replicationEnabled", String.valueOf(replicationEnabled.get())); } - if (isMaster && commit != null) { + if (isPrimary && commit != null) { CommitVersionInfo repCommitInfo = CommitVersionInfo.build(commit); - master.add("replicableVersion", repCommitInfo.version); - master.add("replicableGeneration", repCommitInfo.generation); + primary.add("replicableVersion", repCommitInfo.version); + primary.add("replicableGeneration", repCommitInfo.generation); } IndexFetcher fetcher = currentIndexFetcher; if (fetcher != null) { Properties props = loadReplicationProperties(); - if (showSlaveDetails) { + if (showSecondaryDetails) { try { @SuppressWarnings({"rawtypes"}) NamedList nl = fetcher.getDetails(); - slave.add("masterDetails", nl.get(CMD_DETAILS)); + secondary.add("primaryDetails", nl.get(CMD_DETAILS)); } catch (Exception e) { log.warn( - "Exception while invoking 'details' method for replication on master ", + "Exception while invoking 'details' method for replication on primary ", e); - slave.add(ERR_STATUS, "invalid_master"); + secondary.add(ERR_STATUS, "invalid_primary"); } } - slave.add(MASTER_URL, fetcher.getMasterUrl()); + secondary.add(PRIMARY_URL, fetcher.getPrimaryUrl()); if (getPollInterval() != null) { - slave.add(POLL_INTERVAL, getPollInterval()); + secondary.add(POLL_INTERVAL, getPollInterval()); } Date nextScheduled = getNextScheduledExecTime(); if (nextScheduled != null && !isPollingDisabled()) { - slave.add(NEXT_EXECUTION_AT, nextScheduled.toString()); + secondary.add(NEXT_EXECUTION_AT, nextScheduled.toString()); } else if (isPollingDisabled()) { - slave.add(NEXT_EXECUTION_AT, "Polling disabled"); + secondary.add(NEXT_EXECUTION_AT, "Polling disabled"); } - addVal(slave, IndexFetcher.INDEX_REPLICATED_AT, props, Date.class); - addVal(slave, IndexFetcher.INDEX_REPLICATED_AT_LIST, props, List.class); - addVal(slave, IndexFetcher.REPLICATION_FAILED_AT_LIST, props, List.class); - addVal(slave, IndexFetcher.TIMES_INDEX_REPLICATED, props, Integer.class); - addVal(slave, IndexFetcher.CONF_FILES_REPLICATED, props, Integer.class); - addVal(slave, IndexFetcher.TIMES_CONFIG_REPLICATED, props, Integer.class); - addVal(slave, IndexFetcher.CONF_FILES_REPLICATED_AT, props, Integer.class); - addVal(slave, IndexFetcher.LAST_CYCLE_BYTES_DOWNLOADED, props, Long.class); - addVal(slave, IndexFetcher.TIMES_FAILED, props, Integer.class); - addVal(slave, IndexFetcher.REPLICATION_FAILED_AT, props, Date.class); - addVal(slave, IndexFetcher.PREVIOUS_CYCLE_TIME_TAKEN, props, Long.class); - addVal(slave, IndexFetcher.CLEARED_LOCAL_IDX, props, Long.class); - - slave.add("currentDate", new Date().toString()); - slave.add("isPollingDisabled", String.valueOf(isPollingDisabled())); + addVal(secondary, IndexFetcher.INDEX_REPLICATED_AT, props, Date.class); + addVal(secondary, IndexFetcher.INDEX_REPLICATED_AT_LIST, props, List.class); + addVal(secondary, IndexFetcher.REPLICATION_FAILED_AT_LIST, props, List.class); + addVal(secondary, IndexFetcher.TIMES_INDEX_REPLICATED, props, Integer.class); + addVal(secondary, IndexFetcher.CONF_FILES_REPLICATED, props, Integer.class); + addVal(secondary, IndexFetcher.TIMES_CONFIG_REPLICATED, props, Integer.class); + addVal(secondary, IndexFetcher.CONF_FILES_REPLICATED_AT, props, Integer.class); + addVal(secondary, IndexFetcher.LAST_CYCLE_BYTES_DOWNLOADED, props, Long.class); + addVal(secondary, IndexFetcher.TIMES_FAILED, props, Integer.class); + addVal(secondary, IndexFetcher.REPLICATION_FAILED_AT, props, Date.class); + addVal(secondary, IndexFetcher.PREVIOUS_CYCLE_TIME_TAKEN, props, Long.class); + addVal(secondary, IndexFetcher.CLEARED_LOCAL_IDX, props, Long.class); + + secondary.add("currentDate", new Date().toString()); + secondary.add("isPollingDisabled", String.valueOf(isPollingDisabled())); boolean isReplicating = isReplicating(); - slave.add("isReplicating", String.valueOf(isReplicating)); + secondary.add("isReplicating", String.valueOf(isReplicating)); if (isReplicating) { try { long bytesToDownload = 0; @@ -1027,9 +1027,9 @@ private NamedList getReplicationDetails(SolrQueryResponse rsp, boolean s bytesToDownload += (Long) file.get(SIZE); } - slave.add("filesToDownload", filesToDownload); - slave.add("numFilesToDownload", String.valueOf(filesToDownload.size())); - slave.add("bytesToDownload", NumberUtils.readableSize(bytesToDownload)); + secondary.add("filesToDownload", filesToDownload); + secondary.add("numFilesToDownload", String.valueOf(filesToDownload.size())); + secondary.add("bytesToDownload", NumberUtils.readableSize(bytesToDownload)); long bytesDownloaded = 0; List filesDownloaded = new ArrayList<>(); @@ -1058,17 +1058,17 @@ private NamedList getReplicationDetails(SolrQueryResponse rsp, boolean s percentDownloaded = (currFileSizeDownloaded * 100) / currFileSize; } } - slave.add("filesDownloaded", filesDownloaded); - slave.add("numFilesDownloaded", String.valueOf(filesDownloaded.size())); + secondary.add("filesDownloaded", filesDownloaded); + secondary.add("numFilesDownloaded", String.valueOf(filesDownloaded.size())); long estimatedTimeRemaining = 0; Date replicationStartTimeStamp = fetcher.getReplicationStartTimeStamp(); if (replicationStartTimeStamp != null) { - slave.add("replicationStartTime", replicationStartTimeStamp.toString()); + secondary.add("replicationStartTime", replicationStartTimeStamp.toString()); } long elapsed = fetcher.getReplicationTimeElapsed(); - slave.add("timeElapsed", String.valueOf(elapsed) + "s"); + secondary.add("timeElapsed", String.valueOf(elapsed) + "s"); if (bytesDownloaded > 0) estimatedTimeRemaining = ((bytesToDownload - bytesDownloaded) * elapsed) / bytesDownloaded; @@ -1079,24 +1079,24 @@ private NamedList getReplicationDetails(SolrQueryResponse rsp, boolean s if (elapsed > 0) downloadSpeed = (bytesDownloaded / elapsed); if (currFile != null) - slave.add("currentFile", currFile); - slave.add("currentFileSize", NumberUtils.readableSize(currFileSize)); - slave.add("currentFileSizeDownloaded", NumberUtils.readableSize(currFileSizeDownloaded)); - slave.add("currentFileSizePercent", String.valueOf(percentDownloaded)); - slave.add("bytesDownloaded", NumberUtils.readableSize(bytesDownloaded)); - slave.add("totalPercent", String.valueOf(totalPercent)); - slave.add("timeRemaining", String.valueOf(estimatedTimeRemaining) + "s"); - slave.add("downloadSpeed", NumberUtils.readableSize(downloadSpeed)); + secondary.add("currentFile", currFile); + secondary.add("currentFileSize", NumberUtils.readableSize(currFileSize)); + secondary.add("currentFileSizeDownloaded", NumberUtils.readableSize(currFileSizeDownloaded)); + secondary.add("currentFileSizePercent", String.valueOf(percentDownloaded)); + secondary.add("bytesDownloaded", NumberUtils.readableSize(bytesDownloaded)); + secondary.add("totalPercent", String.valueOf(totalPercent)); + secondary.add("timeRemaining", String.valueOf(estimatedTimeRemaining) + "s"); + secondary.add("downloadSpeed", NumberUtils.readableSize(downloadSpeed)); } catch (Exception e) { log.error("Exception while writing replication details: ", e); } } } - if (isMaster) - details.add("master", master); - if (slave.size() > 0) - details.add("slave", slave); + if (isPrimary) + details.add("primary", primary); + if (secondary.size() > 0) + details.add("secondary", secondary); @SuppressWarnings({"rawtypes"}) NamedList snapshotStats = snapShootDetails; @@ -1241,33 +1241,35 @@ public void inform(SolrCore core) { numberBackupsToKeep = 0; } @SuppressWarnings({"rawtypes"}) - NamedList slave = (NamedList) initArgs.get("slave"); - boolean enableSlave = isEnabled( slave ); - if (enableSlave) { - currentIndexFetcher = pollingIndexFetcher = new IndexFetcher(slave, this, core); - setupPolling((String) slave.get(POLL_INTERVAL)); - isSlave = true; + // this ternary operator should go away in the next version because legacy terminology has been deprecated + NamedList secondary = (NamedList)(initArgs.get("secondary") != null ? initArgs.get("secondary") : initArgs.get("slave")); + boolean enableSecondary = isEnabled( secondary ); + if (enableSecondary) { + currentIndexFetcher = pollingIndexFetcher = new IndexFetcher(secondary, this, core); + setupPolling((String) secondary.get(POLL_INTERVAL)); + isSecondary = true; } @SuppressWarnings({"rawtypes"}) - NamedList master = (NamedList) initArgs.get("master"); - boolean enableMaster = isEnabled( master ); + // this ternary operator should go away in the next version because legacy terminology has been deprecated + NamedList primary = (NamedList)(initArgs.get("primary") != null ? initArgs.get("primary") : initArgs.get("master")); + boolean enablePrimary = isEnabled( primary ); - if (enableMaster || (enableSlave && !currentIndexFetcher.fetchFromLeader)) { + if (enablePrimary || (enableSecondary && !currentIndexFetcher.fetchFromLeader)) { if (core.getCoreContainer().getZkController() != null) { log.warn("SolrCloud is enabled for core {} but so is old-style replication. " + "Make sure you intend this behavior, it usually indicates a mis-configuration. " - + "Master setting is {} and slave setting is {}" - , core.getName(), enableMaster, enableSlave); + + "Primary setting is {} and secondary setting is {}" + , core.getName(), enablePrimary, enableSecondary); } } - if (!enableSlave && !enableMaster) { - enableMaster = true; - master = new NamedList<>(); + if (!enableSecondary && !enablePrimary) { + enablePrimary = true; + primary = new NamedList<>(); } - if (enableMaster) { - includeConfFiles = (String) master.get(CONF_FILES); + if (enablePrimary) { + includeConfFiles = (String) primary.get(CONF_FILES); if (includeConfFiles != null && includeConfFiles.trim().length() > 0) { List files = Arrays.asList(includeConfFiles.split(",")); for (String file : files) { @@ -1279,11 +1281,11 @@ public void inform(SolrCore core) { log.info("Replication enabled for following config files: {}", includeConfFiles); } @SuppressWarnings({"rawtypes"}) - List backup = master.getAll("backupAfter"); + List backup = primary.getAll("backupAfter"); boolean backupOnCommit = backup.contains("commit"); boolean backupOnOptimize = !backupOnCommit && backup.contains("optimize"); @SuppressWarnings({"rawtypes"}) - List replicateAfter = master.getAll(REPLICATE_AFTER); + List replicateAfter = primary.getAll(REPLICATE_AFTER); replicateOnCommit = replicateAfter.contains("commit"); replicateOnOptimize = !replicateOnCommit && replicateAfter.contains("optimize"); @@ -1351,7 +1353,7 @@ public void inform(SolrCore core) { if (s!=null) s.decref(); } } - isMaster = true; + isPrimary = true; } { @@ -1363,7 +1365,7 @@ public void inform(SolrCore core) { log.info("Commits will be reserved for {} ms", reserveCommitDuration); } - // check master or slave is enabled + // check primary or secondary is enabled private boolean isEnabled( @SuppressWarnings({"rawtypes"})NamedList params ){ if( params == null ) return false; Object enable = params.get( "enable" ); @@ -1768,13 +1770,13 @@ private static Long readIntervalNs(String interval) { private static final String EXCEPTION = "exception"; - public static final String MASTER_URL = "masterUrl"; + public static final String PRIMARY_URL = "primaryUrl"; public static final String FETCH_FROM_LEADER = "fetchFromLeader"; - // in case of TLOG replica, if masterVersion = zero, don't do commit + // in case of TLOG replica, if primaryVersion = zero, don't do commit // otherwise updates from current tlog won't copied over properly to the new tlog, leading to data loss - public static final String SKIP_COMMIT_ON_MASTER_VERSION_ZERO = "skipCommitOnMasterVersionZero"; + public static final String SKIP_COMMIT_ON_MASTER_VERSION_ZERO = "skipCommitOnPrimaryVersionZero"; public static final String STATUS = "status"; diff --git a/solr/core/src/java/org/apache/solr/handler/component/FacetComponent.java b/solr/core/src/java/org/apache/solr/handler/component/FacetComponent.java index 698525a00eb0..471dd350afbd 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/FacetComponent.java +++ b/solr/core/src/java/org/apache/solr/handler/component/FacetComponent.java @@ -1033,9 +1033,9 @@ private void refinePivotFacets(ResponseBuilder rb, ShardRequest sreq) { } for (Entry>> pivotFacetResponseFromShard : pivotFacetResponsesFromShard) { - PivotFacet masterPivotFacet = fi.pivotFacets.get(pivotFacetResponseFromShard.getKey()); - masterPivotFacet.mergeResponseFromShard(shardNumber, rb, pivotFacetResponseFromShard.getValue()); - masterPivotFacet.removeAllRefinementsForShard(shardNumber); + PivotFacet primaryPivotFacet = fi.pivotFacets.get(pivotFacetResponseFromShard.getKey()); + primaryPivotFacet.mergeResponseFromShard(shardNumber, rb, pivotFacetResponseFromShard.getValue()); + primaryPivotFacet.removeAllRefinementsForShard(shardNumber); } } diff --git a/solr/core/src/java/org/apache/solr/handler/component/StatsValuesFactory.java b/solr/core/src/java/org/apache/solr/handler/component/StatsValuesFactory.java index 574cf05b627f..4527d3ac9de3 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/StatsValuesFactory.java +++ b/solr/core/src/java/org/apache/solr/handler/component/StatsValuesFactory.java @@ -182,7 +182,7 @@ protected AbstractStatsValues(StatsField statsField) { // "NumericValueSourceStatsValues" which would have diff parent classes // // part of the complexity here being that the StatsValues API serves two - // masters: collecting concrete Values from things like DocValuesStats and + // primaries: collecting concrete Values from things like DocValuesStats and // the distributed aggregation logic, but also collecting docIds which it // then // uses to go out and pull concreate values from the ValueSource diff --git a/solr/core/src/java/org/apache/solr/search/stats/ExactSharedStatsCache.java b/solr/core/src/java/org/apache/solr/search/stats/ExactSharedStatsCache.java index 01c479bb81b5..7ab803772008 100644 --- a/solr/core/src/java/org/apache/solr/search/stats/ExactSharedStatsCache.java +++ b/solr/core/src/java/org/apache/solr/search/stats/ExactSharedStatsCache.java @@ -40,7 +40,7 @@ public class ExactSharedStatsCache extends ExactStatsCache { // local stats obtained from shard servers private final Map> perShardTermStats = new ConcurrentHashMap<>(); private final Map> perShardColStats = new ConcurrentHashMap<>(); - // global stats synchronized from the master + // global stats synchronized from the primary private final Map currentGlobalTermStats = new ConcurrentHashMap<>(); private final Map currentGlobalColStats = new ConcurrentHashMap<>(); diff --git a/solr/core/src/java/org/apache/solr/search/stats/LRUStatsCache.java b/solr/core/src/java/org/apache/solr/search/stats/LRUStatsCache.java index 7e94f5651268..5b21d8f49170 100644 --- a/solr/core/src/java/org/apache/solr/search/stats/LRUStatsCache.java +++ b/solr/core/src/java/org/apache/solr/search/stats/LRUStatsCache.java @@ -65,7 +65,7 @@ public class LRUStatsCache extends ExactStatsCache { // map of > private final Map> perShardColStats = new ConcurrentHashMap<>(); - // global stats synchronized from the master + // global stats synchronized from the primary // cache of private final CaffeineCache currentGlobalTermStats = new CaffeineCache<>(); diff --git a/solr/core/src/java/org/apache/solr/search/stats/StatsCache.java b/solr/core/src/java/org/apache/solr/search/stats/StatsCache.java index 238bb1257bbd..9f156ccc602c 100644 --- a/solr/core/src/java/org/apache/solr/search/stats/StatsCache.java +++ b/solr/core/src/java/org/apache/solr/search/stats/StatsCache.java @@ -176,7 +176,7 @@ public void mergeToGlobalStats(SolrQueryRequest req, protected abstract void doMergeToGlobalStats(SolrQueryRequest req, List responses); /** - * Receive global stats data from the master and update a local cache of global stats + * Receive global stats data from the primary and update a local cache of global stats * with this global data. This event occurs either as a separate request, or * together with the regular query request, in which case this method is * called first, before preparing a {@link QueryCommand} to be submitted to diff --git a/solr/core/src/java/org/apache/solr/security/JWTVerificationkeyResolver.java b/solr/core/src/java/org/apache/solr/security/JWTVerificationkeyResolver.java index 3aca77cd3a99..3dd7f9ccccdc 100644 --- a/solr/core/src/java/org/apache/solr/security/JWTVerificationkeyResolver.java +++ b/solr/core/src/java/org/apache/solr/security/JWTVerificationkeyResolver.java @@ -106,7 +106,7 @@ public Key resolveKey(JsonWebSignature jws, List nestingContex } } - // Add all keys into a master list + // Add all keys into a reference list if (issuerConfig.usesHttpsJwk()) { keysSource = "[" + String.join(", ", issuerConfig.getJwksUrls()) + "]"; for (HttpsJwks hjwks : issuerConfig.getHttpsJwks()) { diff --git a/solr/core/src/java/org/apache/solr/util/TestInjection.java b/solr/core/src/java/org/apache/solr/util/TestInjection.java index 651d26c1d808..c0a5a080adb5 100644 --- a/solr/core/src/java/org/apache/solr/util/TestInjection.java +++ b/solr/core/src/java/org/apache/solr/util/TestInjection.java @@ -143,7 +143,7 @@ static Random random() { // non-private for testing private volatile static AtomicInteger countPrepRecoveryOpPauseForever = new AtomicInteger(0); - public volatile static Integer delayBeforeSlaveCommitRefresh=null; + public volatile static Integer delayBeforeSecondaryCommitRefresh=null; public volatile static Integer delayInExecutePlanAction=null; @@ -185,7 +185,7 @@ public static void reset() { countPrepRecoveryOpPauseForever = new AtomicInteger(0); failIndexFingerprintRequests = null; wrongIndexFingerprint = null; - delayBeforeSlaveCommitRefresh = null; + delayBeforeSecondaryCommitRefresh = null; delayInExecutePlanAction = null; failInExecutePlanAction = false; skipIndexWriterCommitOnClose = false; @@ -521,11 +521,11 @@ private static Pair parseValue(final String raw) { return new Pair<>(Boolean.parseBoolean(val), Integer.parseInt(percent)); } - public static boolean injectDelayBeforeSlaveCommitRefresh() { - if (delayBeforeSlaveCommitRefresh!=null) { + public static boolean injectDelayBeforeSecondaryCommitRefresh() { + if (delayBeforeSecondaryCommitRefresh!=null) { try { - log.info("Pausing IndexFetcher for {}ms", delayBeforeSlaveCommitRefresh); - Thread.sleep(delayBeforeSlaveCommitRefresh); + log.info("Pausing IndexFetcher for {}ms", delayBeforeSecondaryCommitRefresh); + Thread.sleep(delayBeforeSecondaryCommitRefresh); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-master-throttled.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-primary-throttled.xml similarity index 100% rename from solr/core/src/test-files/solr/collection1/conf/solrconfig-master-throttled.xml rename to solr/core/src/test-files/solr/collection1/conf/solrconfig-primary-throttled.xml diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-master.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-primary.xml similarity index 99% rename from solr/core/src/test-files/solr/collection1/conf/solrconfig-master.xml rename to solr/core/src/test-files/solr/collection1/conf/solrconfig-primary.xml index e501af2bee48..7209faba3123 100644 --- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-master.xml +++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-primary.xml @@ -33,7 +33,7 @@ - + commit - http://remote_host:port/solr/core_name/replication + http://remote_host:port/solr/core_name/replication - @@ -158,13 +158,13 @@ The code below shows how to configure a ReplicationHandler on a slave. internal - @@ -172,7 +172,7 @@ The code below shows how to configure a ReplicationHandler on a slave. 5000 10000 - username @@ -183,23 +183,23 @@ The code below shows how to configure a ReplicationHandler on a slave. == Setting Up a Repeater with the ReplicationHandler -A master may be able to serve only so many slaves without affecting performance. Some organizations have deployed slave servers across multiple data centers. If each slave downloads the index from a remote data center, the resulting download may consume too much network bandwidth. To avoid performance degradation in cases like this, you can configure one or more slaves as repeaters. A repeater is simply a node that acts as both a master and a slave. +A primary server may be able to serve only so many secondaries without affecting performance. Some organizations have deployed secondary servers across multiple data centers. If each secondary server downloads the index from a remote data center, the resulting download may consume too much network bandwidth. To avoid performance degradation in cases like this, you can configure one or more secondaries as repeaters. A repeater is simply a node that acts as both a primary server and a secondary server. -* To configure a server as a repeater, the definition of the Replication `requestHandler` in the `solrconfig.xml` file must include file lists of use for both masters and slaves. -* Be sure to set the `replicateAfter` parameter to commit, even if `replicateAfter` is set to optimize on the main master. This is because on a repeater (or any slave), a commit is called only after the index is downloaded. The optimize command is never called on slaves. -* Optionally, one can configure the repeater to fetch compressed files from the master through the compression parameter to reduce the index download time. +* To configure a server as a repeater, the definition of the Replication `requestHandler` in the `solrconfig.xml` file must include file lists of use for both primaries and secondaries. +* Be sure to set the `replicateAfter` parameter to commit, even if `replicateAfter` is set to optimize on the main primary server. This is because on a repeater (or any secondary server), a commit is called only after the index is downloaded. The optimize command is never called on secondaries. +* Optionally, one can configure the repeater to fetch compressed files from the primary server through the compression parameter to reduce the index download time. Here is an example of a ReplicationHandler configuration for a repeater: [source,xml] ---- - + commit schema.xml,stopwords.txt,synonyms.txt - - http://master.solr.company.com:8983/solr/core_name/replication + + http://primary.solr.company.com:8983/solr/core_name/replication 00:00:60 @@ -207,13 +207,13 @@ Here is an example of a ReplicationHandler configuration for a repeater: == Commit and Optimize Operations -When a commit or optimize operation is performed on the master, the RequestHandler reads the list of file names which are associated with each commit point. This relies on the `replicateAfter` parameter in the configuration to decide which types of events should trigger replication. +When a commit or optimize operation is performed on the primary server, the RequestHandler reads the list of file names which are associated with each commit point. This relies on the `replicateAfter` parameter in the configuration to decide which types of events should trigger replication. These operations are supported: -* `commit`: Triggers replication whenever a commit is performed on the master index. -* `optimize`: Triggers replication whenever the master index is optimized. -* `startup`: Triggers replication whenever the master index starts up. +* `commit`: Triggers replication whenever a commit is performed on the primary index. +* `optimize`: Triggers replication whenever the primary index is optimized. +* `startup`: Triggers replication whenever the primary index starts up. The `replicateAfter` parameter can accept multiple arguments. For example: @@ -224,91 +224,91 @@ The `replicateAfter` parameter can accept multiple arguments. For example: optimize ---- -== Slave Replication +== Secondary Replication -The master is totally unaware of the slaves. +The primary server is totally unaware of the secondaries. -The slave continuously keeps polling the master (depending on the `pollInterval` parameter) to check the current index version of the master. If the slave finds out that the master has a newer version of the index it initiates a replication process. The steps are as follows: +The secondary server continuously keeps polling the primary server (depending on the `pollInterval` parameter) to check the current index version of the primary server. If the secondary server finds out that the primary server has a newer version of the index it initiates a replication process. The steps are as follows: -* The slave issues a `filelist` command to get the list of the files. This command returns the names of the files as well as some metadata (for example, size, a lastmodified timestamp, an alias if any). -* The slave checks with its own index if it has any of those files in the local index. It then runs the filecontent command to download the missing files. This uses a custom format (akin to the HTTP chunked encoding) to download the full content or a part of each file. If the connection breaks in between, the download resumes from the point it failed. At any point, the slave tries 5 times before giving up a replication altogether. -* The files are downloaded into a temp directory, so that if either the slave or the master crashes during the download process, no files will be corrupted. Instead, the current replication will simply abort. -* After the download completes, all the new files are moved to the live index directory and the file's timestamp is same as its counterpart on the master. -* A commit command is issued on the slave by the Slave's ReplicationHandler and the new index is loaded. +* The secondary server issues a `filelist` command to get the list of the files. This command returns the names of the files as well as some metadata (for example, size, a lastmodified timestamp, an alias if any). +* The secondary server checks with its own index if it has any of those files in the local index. It then runs the filecontent command to download the missing files. This uses a custom format (akin to the HTTP chunked encoding) to download the full content or a part of each file. If the connection breaks in between, the download resumes from the point it failed. At any point, the secondary server tries 5 times before giving up a replication altogether. +* The files are downloaded into a temp directory, so that if either the secondary server or the primary server crashes during the download process, no files will be corrupted. Instead, the current replication will simply abort. +* After the download completes, all the new files are moved to the live index directory and the file's timestamp is same as its counterpart on the primary. +* A commit command is issued on the secondary server by the Secondary's ReplicationHandler and the new index is loaded. === Replicating Configuration Files -To replicate configuration files, list them using using the `confFiles` parameter. Only files found in the `conf` directory of the master's Solr instance will be replicated. +To replicate configuration files, list them using using the `confFiles` parameter. Only files found in the `conf` directory of the primary's Solr instance will be replicated. -Solr replicates configuration files only when the index itself is replicated. That means even if a configuration file is changed on the master, that file will be replicated only after there is a new commit/optimize on master's index. +Solr replicates configuration files only when the index itself is replicated. That means even if a configuration file is changed on the primary, that file will be replicated only after there is a new commit/optimize on primary's index. -Unlike the index files, where the timestamp is good enough to figure out if they are identical, configuration files are compared against their checksum. The `schema.xml` files (on master and slave) are judged to be identical if their checksums are identical. +Unlike the index files, where the timestamp is good enough to figure out if they are identical, configuration files are compared against their checksum. The `schema.xml` files (on primary and secondary servers) are judged to be identical if their checksums are identical. As a precaution when replicating configuration files, Solr copies configuration files to a temporary directory before moving them into their ultimate location in the conf directory. The old configuration files are then renamed and kept in the same `conf/` directory. The ReplicationHandler does not automatically clean up these old files. If a replication involved downloading of at least one configuration file, the ReplicationHandler issues a core-reload command instead of a commit command. -=== Resolving Corruption Issues on Slave Servers +=== Resolving Corruption Issues on Secondary Servers -If documents are added to the slave, then the slave is no longer in sync with its master. However, the slave will not undertake any action to put itself in sync, until the master has new index data. +If documents are added to the secondary server, then the secondary server is no longer in sync with its primary. However, the secondary server will not undertake any action to put itself in sync, until the primary has new index data. -When a commit operation takes place on the master, the index version of the master becomes different from that of the slave. The slave then fetches the list of files and finds that some of the files present on the master are also present in the local index but with different sizes and timestamps. This means that the master and slave have incompatible indexes. +When a commit operation takes place on the primary, the index version of the primary server becomes different from that of the secondary server. The secondary server then fetches the list of files and finds that some of the files present on the primary are also present in the local index but with different sizes and timestamps. This means that the primary and secondary server have incompatible indexes. -To correct this problem, the slave then copies all the index files from master to a new index directory and asks the core to load the fresh index from the new directory. +To correct this problem, the secondary server then copies all the index files from primary to a new index directory and asks the core to load the fresh index from the new directory. == HTTP API Commands for the ReplicationHandler You can use the HTTP commands below to control the ReplicationHandler's operations. `enablereplication`:: -Enable replication on the "master" for all its slaves. +Enable replication on the "primary" for all its secondaries. + [source,bash] -http://_master_host:port_/solr/_core_name_/replication?command=enablereplication +http://_primary_host:port_/solr/_core_name_/replication?command=enablereplication `disablereplication`:: -Disable replication on the master for all its slaves. +Disable replication on the primary for all its secondaries. + [source,bash] -http://_master_host:port_/solr/_core_name_/replication?command=disablereplication +http://_primary_host:port_/solr/_core_name_/replication?command=disablereplication `indexversion`:: -Return the version of the latest replicatable index on the specified master or slave. +Return the version of the latest replicatable index on the specified primary or secondary server. + [source,bash] http://_host:port_/solr/_core_name_/replication?command=indexversion `fetchindex`:: -Force the specified slave to fetch a copy of the index from its master. +Force the specified secondary server to fetch a copy of the index from its primary. + [source.bash] -http://_slave_host:port_/solr/_core_name_/replication?command=fetchindex +http://_secondary_host:port_/solr/_core_name_/replication?command=fetchindex + -If you like, you can pass an extra attribute such as `masterUrl` or `compression` (or any other parameter which is specified in the `` tag) to do a one time replication from a master. This obviates the need for hard-coding the master in the slave. +If you like, you can pass an extra attribute such as `primaryUrl` or `compression` (or any other parameter which is specified in the `` tag) to do a one time replication from a primary. This obviates the need for hard-coding the primary server location in the secondary server. `abortfetch`:: -Abort copying an index from a master to the specified slave. +Abort copying an index from a primary server to the specified secondary server. + [source,bash] -http://_slave_host:port_/solr/_core_name_/replication?command=abortfetch +http://_secondary_host:port_/solr/_core_name_/replication?command=abortfetch `enablepoll`:: -Enable the specified slave to poll for changes on the master. +Enable the specified secondary to poll for changes on the primary. + [source,bash] -http://_slave_host:port_/solr/_core_name_/replication?command=enablepoll +http://_secondary_host:port_/solr/_core_name_/replication?command=enablepoll `disablepoll`:: -Disable the specified slave from polling for changes on the master. +Disable the specified secondary server from polling for changes on the primary server. + [source,bash] -http://_slave_host:port_/solr/_core_name_/replication?command=disablepoll +http://_secondary_host:port_/solr/_core_name_/replication?command=disablepoll `details`:: Retrieve configuration details and current status. + [source,bash] -http://_slave_host:port_/solr/_core_name_/replication?command=details +http://_secondary_host:port_/solr/_core_name_/replication?command=details `filelist`:: Retrieve a list of Lucene files present in the specified host's index. @@ -319,10 +319,10 @@ http://_host:port_/solr/_core_name_/replication?command=filelist&generation=<_ge You can discover the generation number of the index by running the `indexversion` command. `backup`:: -Create a backup on master if there are committed index data in the server; otherwise, does nothing. +Create a backup on primary if there are committed index data in the server; otherwise, does nothing. + [source,bash] -http://_master_host:port_/solr/_core_name_/replication?command=backup +http://_primary_host:port_/solr/_core_name_/replication?command=backup + This command is useful for making periodic backups. There are several supported request parameters: + @@ -335,7 +335,7 @@ This command is useful for making periodic backups. There are several supported Restore a backup from a backup repository. + [source,bash] -http://_master_host:port_/solr/_core_name_/replication?command=restore +http://_primary_host:port_/solr/_core_name_/replication?command=restore + This command is used to restore a backup. There are several supported request parameters: + @@ -347,7 +347,7 @@ This command is used to restore a backup. There are several supported request pa Check the status of a running restore operation. + [source,bash] -http://_master_host:port_/solr/_core_name_/replication?command=restorestatus +http://_primary_host:port_/solr/_core_name_/replication?command=restorestatus + This command is used to check the status of a restore operation. This command takes no parameters. + @@ -357,7 +357,7 @@ The status value can be "In Progress" , "success" or "failed". If it failed then Delete any backup created using the `backup` command. + [source,bash] -http://_master_host:port_ /solr/_core_name_/replication?command=deletebackup +http://_primary_host:port_ /solr/_core_name_/replication?command=deletebackup + There are two supported parameters: @@ -369,15 +369,15 @@ There are two supported parameters: Optimizing an index is not something most users should generally worry about - but in particular users should be aware of the impacts of optimizing an index when using the `ReplicationHandler`. -The time required to optimize a master index can vary dramatically. A small index may be optimized in minutes. A very large index may take hours. The variables include the size of the index and the speed of the hardware. +The time required to optimize a primary index can vary dramatically. A small index may be optimized in minutes. A very large index may take hours. The variables include the size of the index and the speed of the hardware. -Distributing a newly optimized index may take only a few minutes or up to an hour or more, again depending on the size of the index and the performance capabilities of network connections and disks. During optimization the machine is under load and does not process queries very well. Given a schedule of updates being driven a few times an hour to the slaves, we cannot run an optimize with every committed snapshot. +Distributing a newly optimized index may take only a few minutes or up to an hour or more, again depending on the size of the index and the performance capabilities of network connections and disks. During optimization the machine is under load and does not process queries very well. Given a schedule of updates being driven a few times an hour to the secondaries, we cannot run an optimize with every committed snapshot. Copying an optimized index means that the *entire* index will need to be transferred during the next `snappull`. This is a large expense, but not nearly as huge as running the optimize everywhere. -Consider this example: on a three-slave one-master configuration, distributing a newly-optimized index takes approximately 80 seconds _total_. Rolling the change across a tier would require approximately ten minutes per machine (or machine group). If this optimize were rolled across the query tier, and if each slave node being optimized were disabled and not receiving queries, a rollout would take at least twenty minutes and potentially as long as an hour and a half. Additionally, the files would need to be synchronized so that the _following_ the optimize, `snappull` would not think that the independently optimized files were different in any way. This would also leave the door open to independent corruption of indexes instead of each being a perfect copy of the master. +Consider this example: on a three-secondary server one-primary server configuration, distributing a newly-optimized index takes approximately 80 seconds _total_. Rolling the change across a tier would require approximately ten minutes per machine (or machine group). If this optimize were rolled across the query tier, and if each secondary node being optimized were disabled and not receiving queries, a rollout would take at least twenty minutes and potentially as long as an hour and a half. Additionally, the files would need to be synchronized so that the _following_ the optimize, `snappull` would not think that the independently optimized files were different in any way. This would also leave the door open to independent corruption of indexes instead of each being a perfect copy of the primary. -Optimizing on the master allows for a straight-forward optimization operation. No query slaves need to be taken out of service. The optimized index can be distributed in the background as queries are being normally serviced. The optimization can occur at any time convenient to the application providing index updates. +Optimizing on the primary allows for a straight-forward optimization operation. No query secondaries need to be taken out of service. The optimized index can be distributed in the background as queries are being normally serviced. The optimization can occur at any time convenient to the application providing index updates. While optimizing may have some benefits in some situations, a rapidly changing index will not retain those benefits for long, and since optimization is an intensive process, it may be better to consider other options, such as lowering the merge factor (discussed in the section on <>). diff --git a/solr/solr-ref-guide/src/major-changes-in-solr-8.adoc b/solr/solr-ref-guide/src/major-changes-in-solr-8.adoc index 13184f86c087..ba382f7d3932 100644 --- a/solr/solr-ref-guide/src/major-changes-in-solr-8.adoc +++ b/solr/solr-ref-guide/src/major-changes-in-solr-8.adoc @@ -390,7 +390,7 @@ When upgrading to Solr 7.4, users should be aware of the following major changes *Legacy Scaling (non-SolrCloud)* -* In the <> of scaling Solr, a slave no longer commits an empty index when a completely new index is detected on master during replication. To return to the previous behavior pass `false` to `skipCommitOnMasterVersionZero` in the slave section of replication handler configuration, or pass it to the `fetchindex` command. +* In the <> of scaling Solr, a secondary server no longer commits an empty index when a completely new index is detected on a primary server during replication. To return to the previous behavior pass `false` to `skipCommitOnPrimaryVersionZero` in the secondary section of replication handler configuration, or pass it to the `fetchindex` command. If you are upgrading from a version earlier than Solr 7.3, please see previous version notes below. @@ -493,7 +493,7 @@ See the section <> has supplanted much of this functionality, but if you are still using Master-Slave index replication, you can use this screen to: +The Replication screen shows you the current replication state for the core you have specified. <> has supplanted much of this functionality, but if you are still using Primary-Secondary index replication, you can use this screen to: . View the replicatable index state. (on a master node) -. View the current replication status (on a slave node) +. View the current replication status (on a secondary node) . Disable replication. (on a master node) .Caution When Using SolrCloud diff --git a/solr/solr-ref-guide/src/shards-and-indexing-data-in-solrcloud.adoc b/solr/solr-ref-guide/src/shards-and-indexing-data-in-solrcloud.adoc index 3aa07cbdae77..b42c47b02dce 100644 --- a/solr/solr-ref-guide/src/shards-and-indexing-data-in-solrcloud.adoc +++ b/solr/solr-ref-guide/src/shards-and-indexing-data-in-solrcloud.adoc @@ -32,7 +32,7 @@ SolrCloud addresses those limitations. There is support for distributing both th == Leaders and Replicas -In SolrCloud there are no masters or slaves. Instead, every shard consists of at least one physical *replica*, exactly one of which is a *leader*. Leaders are automatically elected, initially on a first-come-first-served basis, and then based on the ZooKeeper process described at http://zookeeper.apache.org/doc/r{ivy-zookeeper-version}/recipes.html#sc_leaderElection. +In SolrCloud there are no primaries or secondaries. Instead, every shard consists of at least one physical *replica*, exactly one of which is a *leader*. Leaders are automatically elected, initially on a first-come-first-served basis, and then based on the ZooKeeper process described at http://zookeeper.apache.org/doc/r{ivy-zookeeper-version}/recipes.html#sc_leaderElection. If a leader goes down, one of the other replicas is automatically elected as the new leader. diff --git a/solr/solr-ref-guide/src/solr-glossary.adoc b/solr/solr-ref-guide/src/solr-glossary.adoc index 5c471af54c93..e214b51edc01 100644 --- a/solr/solr-ref-guide/src/solr-glossary.adoc +++ b/solr/solr-ref-guide/src/solr-glossary.adoc @@ -144,7 +144,7 @@ A <> that acts as a physical copy of a <> in a <>:: -A method of copying a master index from one server to one or more "slave" or "child" servers. +A method of copying a master index from one server to one or more "secondary" or "child" servers. [[requesthandler]]<>:: Logic and configuration parameters that tell Solr how to handle incoming "requests", whether the requests are to return search results, to index documents, or to handle other custom situations. diff --git a/solr/solr-ref-guide/src/updatehandlers-in-solrconfig.adoc b/solr/solr-ref-guide/src/updatehandlers-in-solrconfig.adoc index 4c0deeed14b4..5e4b869991f1 100644 --- a/solr/solr-ref-guide/src/updatehandlers-in-solrconfig.adoc +++ b/solr/solr-ref-guide/src/updatehandlers-in-solrconfig.adoc @@ -78,7 +78,7 @@ You can also specify 'soft' autoCommits in the same way that you can specify 'so === commitWithin -The `commitWithin` settings allow forcing document commits to happen in a defined time period. This is used most frequently with <>, and for that reason the default is to perform a soft commit. This does not, however, replicate new documents to slave servers in a master/slave environment. If that's a requirement for your implementation, you can force a hard commit by adding a parameter, as in this example: +The `commitWithin` settings allow forcing document commits to happen in a defined time period. This is used most frequently with <>, and for that reason the default is to perform a soft commit. This does not, however, replicate new documents to secondary servers in a primary/secondary environment. If that's a requirement for your implementation, you can force a hard commit by adding a parameter, as in this example: [source,xml] ---- diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/LBHttp2SolrClient.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/LBHttp2SolrClient.java index 96f96bf5cb88..957b2429e39c 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/LBHttp2SolrClient.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/LBHttp2SolrClient.java @@ -40,8 +40,8 @@ * {@link Http2SolrClient}. This is useful when you * have multiple Solr servers and the requests need to be Load Balanced among them. * - * Do NOT use this class for indexing in master/slave scenarios since documents must be sent to the - * correct master; no inter-node routing is done. + * Do NOT use this class for indexing in primary/secondary scenarios since documents must be sent to the + * correct primary; no inter-node routing is done. * * In SolrCloud (leader/replica) scenarios, it is usually better to use * {@link CloudSolrClient}, but this class may be used diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/LBHttpSolrClient.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/LBHttpSolrClient.java index bc4efbbf9f40..99cf0713eba8 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/LBHttpSolrClient.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/LBHttpSolrClient.java @@ -33,8 +33,8 @@ * {@link HttpSolrClient}. This is useful when you * have multiple Solr servers and the requests need to be Load Balanced among them. * - * Do NOT use this class for indexing in master/slave scenarios since documents must be sent to the - * correct master; no inter-node routing is done. + * Do NOT use this class for indexing in primary/secondary scenarios since documents must be sent to the + * correct primary; no inter-node routing is done. * * In SolrCloud (leader/replica) scenarios, it is usually better to use * {@link CloudSolrClient}, but this class may be used diff --git a/solr/solrj/src/test-files/solrj/solr/collection1/conf/solrconfig-slave1.xml b/solr/solrj/src/test-files/solrj/solr/collection1/conf/solrconfig-secondary1.xml similarity index 100% rename from solr/solrj/src/test-files/solrj/solr/collection1/conf/solrconfig-slave1.xml rename to solr/solrj/src/test-files/solrj/solr/collection1/conf/solrconfig-secondary1.xml diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/TestLBHttp2SolrClient.java b/solr/solrj/src/test/org/apache/solr/client/solrj/TestLBHttp2SolrClient.java index ffe52febf7ff..9100866de436 100644 --- a/solr/solrj/src/test/org/apache/solr/client/solrj/TestLBHttp2SolrClient.java +++ b/solr/solrj/src/test/org/apache/solr/client/solrj/TestLBHttp2SolrClient.java @@ -279,7 +279,7 @@ public String getDataDir() { } public String getSolrConfigFile() { - return "solrj/solr/collection1/conf/solrconfig-slave1.xml"; + return "solrj/solr/collection1/conf/solrconfig-secondary1.xml"; } public String getSolrXmlFile() { diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/TestLBHttpSolrClient.java b/solr/solrj/src/test/org/apache/solr/client/solrj/TestLBHttpSolrClient.java index 79381a060992..992545e05cb8 100644 --- a/solr/solrj/src/test/org/apache/solr/client/solrj/TestLBHttpSolrClient.java +++ b/solr/solrj/src/test/org/apache/solr/client/solrj/TestLBHttpSolrClient.java @@ -280,7 +280,7 @@ public String getDataDir() { } public String getSolrConfigFile() { - return "solrj/solr/collection1/conf/solrconfig-slave1.xml"; + return "solrj/solr/collection1/conf/solrconfig-secondary1.xml"; } public String getSolrXmlFile() { diff --git a/solr/webapp/web/css/angular/collections.css b/solr/webapp/web/css/angular/collections.css index a0c52ff8b6a4..5104d4f9c6cb 100644 --- a/solr/webapp/web/css/angular/collections.css +++ b/solr/webapp/web/css/angular/collections.css @@ -228,7 +228,7 @@ limitations under the License. #content #collections #data #alias-data h2 { background-image: url( ../../img/ico/box.png ); } #content #collections #data #collection-data h2 { background-image: url( ../../img/ico/box.png ); } #content #collections #data #shard-data h2 { background-image: url( ../../img/ico/sitemap.png ); } -#content #collections #data #shard-data .replica h2 { background-image: url( ../../img/ico/node-slave.png ); } +#content #collections #data #shard-data .replica h2 { background-image: url( ../../img/ico/node-secondary.png ); } #content #collections #data #index-data { diff --git a/solr/webapp/web/css/angular/dashboard.css b/solr/webapp/web/css/angular/dashboard.css index 734d62a9de68..82948938fa3d 100644 --- a/solr/webapp/web/css/angular/dashboard.css +++ b/solr/webapp/web/css/angular/dashboard.css @@ -144,8 +144,8 @@ limitations under the License. #content #dashboard #system h2 { background-image: url( ../../img/ico/server.png ); } #content #dashboard #statistics h2 { background-image: url( ../../img/ico/chart.png ); } #content #dashboard #replication h2 { background-image: url( ../../img/ico/node.png ); } -#content #dashboard #replication.master h2 { background-image: url( ../../img/ico/node-master.png ); } -#content #dashboard #replication.slave h2 { background-image: url( ../../img/ico/node-slave.png ); } +#content #dashboard #replication.primary h2 { background-image: url( ../../img/ico/node-primary.png ); } +#content #dashboard #replication.secondary h2 { background-image: url( ../../img/ico/node-secondary.png ); } #content #dashboard #instance h2 { background-image: url( ../../img/ico/server.png ); } #content #dashboard #collection h2 { background-image: url( ../../img/ico/book-open-text.png ); } #content #dashboard #shards h2 { background-image: url( ../../img/ico/documents-stack.png ); } diff --git a/solr/webapp/web/css/angular/menu.css b/solr/webapp/web/css/angular/menu.css index 87b51695c7ab..8ccf5796f937 100644 --- a/solr/webapp/web/css/angular/menu.css +++ b/solr/webapp/web/css/angular/menu.css @@ -268,7 +268,7 @@ limitations under the License. #menu #cloud.global p a { background-image: url( ../../img/ico/network-cloud.png ); } #menu #cloud.global .tree a { background-image: url( ../../img/ico/folder-tree.png ); } #menu #cloud.global .nodes a { background-image: url( ../../img/solr-ico.png ); } -#menu #cloud.global .zkstatus a { background-image: url( ../../img/ico/node-master.png ); } +#menu #cloud.global .zkstatus a { background-image: url( ../../img/ico/node-primary.png ); } #menu #cloud.global .graph a { background-image: url( ../../img/ico/molecule.png ); } .sub-menu .ping.error a diff --git a/solr/webapp/web/css/angular/replication.css b/solr/webapp/web/css/angular/replication.css index 4eb608878d4f..6aa1f8188add 100644 --- a/solr/webapp/web/css/angular/replication.css +++ b/solr/webapp/web/css/angular/replication.css @@ -61,17 +61,17 @@ limitations under the License. border-bottom: 0; } -#content #replication .masterOnly, -#content #replication .slaveOnly +#content #replication .primaryOnly, +#content #replication .secondaryOnly { } -#content #replication.master .masterOnly +#content #replication.primary .primaryOnly { display: block; } -#content #replication.slave .slaveOnly +#content #replication.secondary .secondaryOnly { display: block; } @@ -300,7 +300,7 @@ limitations under the License. text-align: left; } -#content #replication.slave #details table .slaveOnly +#content #replication.secondary #details table .secondaryOnly { display: table-row; } diff --git a/solr/webapp/web/img/ico/node-master.png b/solr/webapp/web/img/ico/node-primary.png similarity index 100% rename from solr/webapp/web/img/ico/node-master.png rename to solr/webapp/web/img/ico/node-primary.png diff --git a/solr/webapp/web/img/ico/node-slave.png b/solr/webapp/web/img/ico/node-secondary.png similarity index 100% rename from solr/webapp/web/img/ico/node-slave.png rename to solr/webapp/web/img/ico/node-secondary.png diff --git a/solr/webapp/web/js/angular/controllers/core-overview.js b/solr/webapp/web/js/angular/controllers/core-overview.js index 0e2b3d2a6210..a0b8ce0efa62 100644 --- a/solr/webapp/web/js/angular/controllers/core-overview.js +++ b/solr/webapp/web/js/angular/controllers/core-overview.js @@ -33,8 +33,8 @@ function($scope, $rootScope, $routeParams, Luke, CoreSystem, Update, Replication $scope.refreshReplication = function() { Replication.details({core: $routeParams.core}, function(data) { - $scope.isSlave = data.details.isSlave == "true"; - $scope.isMaster = data.details.isMaster == "true"; + $scope.isSecondary = data.details.isSecondary == "true"; + $scope.isPrimary = data.details.isPrimary == "true"; $scope.replication = data.details; }, function(error) { diff --git a/solr/webapp/web/js/angular/controllers/replication.js b/solr/webapp/web/js/angular/controllers/replication.js index 9f7ac3e41c46..f784d4b152fc 100644 --- a/solr/webapp/web/js/angular/controllers/replication.js +++ b/solr/webapp/web/js/angular/controllers/replication.js @@ -26,12 +26,12 @@ solrAdminApp.controller('ReplicationController', var timeout; var interval; if ($scope.interval) $interval.cancel($scope.interval); - $scope.isSlave = (response.details.isSlave === 'true'); - if ($scope.isSlave) { - $scope.progress = getProgressDetails(response.details.slave); - $scope.iterations = getIterations(response.details.slave); - $scope.versions = getSlaveVersions(response.details); - $scope.settings = getSlaveSettings(response.details); + $scope.isSecondary = (response.details.isSecondary === 'true'); + if ($scope.isSecondary) { + $scope.progress = getProgressDetails(response.details.secondary); + $scope.iterations = getIterations(response.details.secondary); + $scope.versions = getSecondaryVersions(response.details); + $scope.settings = getSecondarySettings(response.details); if ($scope.settings.isReplicating) { timeout = $timeout($scope.refresh, 1000); } else if(!$scope.settings.isPollingDisabled && $scope.settings.pollInterval) { @@ -41,9 +41,9 @@ solrAdminApp.controller('ReplicationController', timeout = $timeout($scope.refresh, 1000*(1+$scope.settings.tick)); } } else { - $scope.versions = getMasterVersions(response.details); + $scope.versions = getPrimaryVersions(response.details); } - $scope.master = getMasterSettings(response.details, $scope.isSlave); + $scope.primary = getPrimarySettings(response.details, $scope.isSecondary); var onRouteChangeOff = $scope.$on('$routeChangeStart', function() { if (interval) $interval.cancel(interval); @@ -85,7 +85,7 @@ var getProgressDetails = function(progress) { return progress; }; -var getIterations = function(slave) { +var getIterations = function(secondary) { var iterations = []; @@ -93,17 +93,17 @@ var getIterations = function(slave) { return list.filter(function(e) {return e.date == date}); }; - for (var i in slave.indexReplicatedAtList) { - var date = slave.indexReplicatedAtList[i]; + for (var i in secondary.indexReplicatedAtList) { + var date = secondary.indexReplicatedAtList[i]; var iteration = {date:date, status:"replicated", latest: false}; - if (date == slave.indexReplicatedAt) { + if (date == secondary.indexReplicatedAt) { iteration.latest = true; } iterations.push(iteration); } - for (var i in slave.replicationFailedAtList) { - var failedDate = slave.replicationFailedAtList[i]; + for (var i in secondary.replicationFailedAtList) { + var failedDate = secondary.replicationFailedAtList[i]; var matchingIterations = find(iterations, failedDate); if (matchingIterations[0]) { iteration = matchingIterations[0]; @@ -112,7 +112,7 @@ var getIterations = function(slave) { iteration = {date: failedDate, status:"failed", latest:false}; iterations.push(iteration); } - if (failedDate == slave.replicationFailedAt) { + if (failedDate == secondary.replicationFailedAt) { iteration.latest = true; } } @@ -120,37 +120,37 @@ var getIterations = function(slave) { return iterations; }; -var getMasterVersions = function(data) { - versions = {masterSearch:{}, master:{}}; +var getPrimaryVersions = function(data) { + versions = {primarySearch:{}, primary:{}}; - versions.masterSearch.version = data.indexVersion; - versions.masterSearch.generation = data.generation; - versions.masterSearch.size = data.indexSize; + versions.primarySearch.version = data.indexVersion; + versions.primarySearch.generation = data.generation; + versions.primarySearch.size = data.indexSize; - versions.master.version = data.master.replicableVersion || '-'; - versions.master.generation = data.master.replicableGeneration || '-'; - versions.master.size = '-'; + versions.primary.version = data.primary.replicableVersion || '-'; + versions.primary.generation = data.primary.replicableGeneration || '-'; + versions.primary.size = '-'; return versions; }; -var getSlaveVersions = function(data) { - versions = {masterSearch: {}, master: {}, slave: {}}; +var getSecondaryVersions = function(data) { + versions = {primarySearch: {}, primary: {}, secondary: {}}; - versions.slave.version = data.indexVersion; - versions.slave.generation = data.generation; - versions.slave.size = data.indexSize; + versions.secondary.version = data.indexVersion; + versions.secondary.generation = data.generation; + versions.secondary.size = data.indexSize; - versions.master.version = data.slave.masterDetails.replicableVersion || '-'; - versions.master.generation = data.slave.masterDetails.replicableGeneration || '-'; - versions.master.size = '-'; + versions.primary.version = data.secondary.primaryDetails.replicableVersion || '-'; + versions.primary.generation = data.secondary.primaryDetails.replicableGeneration || '-'; + versions.primary.size = '-'; - versions.masterSearch.version = data.slave.masterDetails.indexVersion; - versions.masterSearch.generation = data.slave.masterDetails.generation; - versions.masterSearch.size = data.slave.masterDetails.indexSize; + versions.primarySearch.version = data.secondary.primaryDetails.indexVersion; + versions.primarySearch.generation = data.secondary.primaryDetails.generation; + versions.primarySearch.size = data.secondary.primaryDetails.indexSize; - versions.changedVersion = data.indexVersion !== data.slave.masterDetails.indexVersion; - versions.changedGeneration = data.generation !== data.slave.masterDetails.generation; + versions.changedVersion = data.indexVersion !== data.secondary.primaryDetails.indexVersion; + versions.changedGeneration = data.generation !== data.secondary.primaryDetails.generation; return versions; }; @@ -181,13 +181,13 @@ var parseSeconds = function(time) { return seconds; } -var getSlaveSettings = function(data) { +var getSecondarySettings = function(data) { var settings = {}; - settings.masterUrl = data.slave.masterUrl; - settings.isPollingDisabled = data.slave.isPollingDisabled == 'true'; - settings.pollInterval = data.slave.pollInterval; - settings.isReplicating = data.slave.isReplicating == 'true'; - settings.nextExecutionAt = data.slave.nextExecutionAt; + settings.primaryUrl = data.secondary.primaryUrl; + settings.isPollingDisabled = data.secondary.isPollingDisabled == 'true'; + settings.pollInterval = data.secondary.pollInterval; + settings.isReplicating = data.secondary.isReplicating == 'true'; + settings.nextExecutionAt = data.secondary.nextExecutionAt; if(settings.isReplicating) { settings.isApprox = true; @@ -195,7 +195,7 @@ var getSlaveSettings = function(data) { } else if (!settings.isPollingDisabled && settings.pollInterval) { if( settings.nextExecutionAt ) { settings.nextExecutionAtEpoch = parseDateToEpoch(settings.nextExecutionAt); - settings.currentTime = parseDateToEpoch(data.slave.currentDate); + settings.currentTime = parseDateToEpoch(data.secondary.currentDate); if( settings.nextExecutionAtEpoch > settings.currentTime) { settings.isApprox = false; @@ -206,15 +206,15 @@ var getSlaveSettings = function(data) { return settings; }; -var getMasterSettings = function(details, isSlave) { - var master = {}; - var masterData = isSlave ? details.slave.masterDetails.master : details.master; - master.replicationEnabled = masterData.replicationEnabled == "true"; - master.replicateAfter = masterData.replicateAfter.join(", "); +var getPrimarySettings = function(details, isSecondary) { + var primary = {}; + var primaryData = isSecondary ? details.secondary.primaryDetails.primary : details.primary; + primary.replicationEnabled = primaryData.replicationEnabled == "true"; + primary.replicateAfter = primaryData.replicateAfter.join(", "); - if (masterData.confFiles) { - master.files = []; - var confFiles = masterData.confFiles.split(','); + if (primaryData.confFiles) { + primary.files = []; + var confFiles = primaryData.confFiles.split(','); for (var i=0; i=0) { title = file.replace(':', ' ยป '); var parts = file.split(':'); - if (isSlave) { + if (isSecondary) { short = parts[1]; } else { short = parts[0]; } } - master.files.push({title:title, name:short}); + primary.files.push({title:title, name:short}); } } - return master; + return primary; } diff --git a/solr/webapp/web/partials/core_overview.html b/solr/webapp/web/partials/core_overview.html index f1826f65c096..29a64cfae782 100644 --- a/solr/webapp/web/partials/core_overview.html +++ b/solr/webapp/web/partials/core_overview.html @@ -99,8 +99,8 @@

Instance

Replication - (Slave) - (Master) + (Secondary) + (Primary)

@@ -126,45 +126,45 @@

- + - Master (Searching) + Primary (Searching)
{{replication.indexVersion}}
{{replication.generation}}
{{replication.indexSize || '-'}}
- + - Master (Replicable) -
{{replication.master.replicableVersion || '-'}}
-
{{replication.master.replicableGeneration || '-'}}
+ Primary (Replicable) +
{{replication.primary.replicableVersion || '-'}}
+
{{replication.primary.replicableGeneration || '-'}}
-
- + - Master (Replicable) -
{{replication.master.replicableVersion || '-'}}
-
{{replication.master.replicableGeneration || '-'}}
+ Primary (Replicable) +
{{replication.primary.replicableVersion || '-'}}
+
{{replication.primary.replicableGeneration || '-'}}
-
- + - Master (Searching) -
{{replication.slave.masterDetails.indexVersion}}
-
{{replication.slave.masterDetails.generation}}
-
{{replication.slave.masterDetails.indexSize || '-'}}
+ Primary (Searching) +
{{replication.secondary.primaryDetails.indexVersion}}
+
{{replication.secondary.primaryDetails.generation}}
+
{{replication.secondary.primaryDetails.indexSize || '-'}}
- + - Slave (Searching) + Secondary (Searching)
{{replication.indexVersion}}
{{replication.generation}}
{{replication.indexSize || '-'}}
diff --git a/solr/webapp/web/partials/replication.html b/solr/webapp/web/partials/replication.html index b3d668422a85..974ba50159bf 100644 --- a/solr/webapp/web/partials/replication.html +++ b/solr/webapp/web/partials/replication.html @@ -84,7 +84,7 @@ -
+
Iterations:
@@ -118,47 +118,47 @@ - + - Master (Searching) + Primary (Searching) -
{{versions.masterSearch.version}}
+
{{versions.primarySearch.version}}
-
{{versions.masterSearch.generation}}
+
{{versions.primarySearch.generation}}
-
{{versions.masterSearch.size}}
+
{{versions.primarySearch.size}}
- + - Master (Replicable) + Primary (Replicable) -
{{versions.master.version}}
+
{{versions.primary.version}}
-
{{versions.master.generation}}
+
{{versions.primary.generation}}
-
{{versions.master.size}}
+
{{versions.primary.size}}
- + - Slave (Searching) + Secondary (Searching) -
{{versions.slave.version}}
+
{{versions.secondary.version}}
-
{{versions.slave.generation}}
+
{{versions.secondary.generation}}
-
{{versions.slave.size}}
+
{{versions.secondary.size}}
@@ -169,14 +169,14 @@
-
+
Settings:
    -
  • +
  • -
    master url:
    -
    {{settings.masterUrl}}
    +
    primary url:
    +
    {{settings.primaryUrl}}
  • @@ -189,21 +189,21 @@
-
+
-
Settings (Master):
+
Settings (Primary):
  • replication enable:
    -
     
    +
     
  • replicateAfter:
    -
    {{master.replicateAfter}}
    +
    {{primary.replicateAfter}}
  • -
  • +
  • confFiles:
    -
    {{file.name}}{{ $last ? '' :', '}}
    +
    {{file.name}}{{ $last ? '' :', '}}
@@ -213,7 +213,7 @@